From 04219b8a92a5709f2282511628972aeef91168f7 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> Date: Fri, 3 Dec 2021 19:59:02 +0800 Subject: [PATCH 01/53] Add native support to Azure Blob Signed-off-by: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> --- api/v1beta1/bucket_types.go | 3 +- controllers/bucket_controller.go | 196 ++++++++++++++++++++++- go.mod | 10 ++ go.sum | 140 +++++++--------- main.go | 5 + pkg/azure/cloudprovider/cloudprovider.go | 136 ++++++++++++++++ 6 files changed, 404 insertions(+), 86 deletions(-) create mode 100644 pkg/azure/cloudprovider/cloudprovider.go diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 41b732d1c..cadf92761 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -32,7 +32,7 @@ const ( // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). - // +kubebuilder:validation:Enum=generic;aws;gcp + // +kubebuilder:validation:Enum=generic;aws;gcp;azure // +kubebuilder:default:=generic // +optional Provider string `json:"provider,omitempty"` @@ -86,6 +86,7 @@ const ( GenericBucketProvider string = "generic" AmazonBucketProvider string = "aws" GoogleBucketProvider string = "gcp" + AzureBlobProvider string = "azure" ) // BucketStatus defines the observed state of a bucket diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 86911102e..658aaf501 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -20,15 +20,22 @@ import ( "context" "crypto/sha1" "fmt" + "io/ioutil" + "net/url" "os" "path/filepath" "strings" "time" + "github.com/Azure/azure-pipeline-go/pipeline" + storagemgmt "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2021-04-01/storage" + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest/to" "github.com/go-logr/logr" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" @@ -47,9 +54,10 @@ import ( "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + "github.com/fluxcd/source-controller/pkg/azure/cloudprovider" + "github.com/fluxcd/source-controller/pkg/gcp" "github.com/fluxcd/source-controller/pkg/sourceignore" ) @@ -66,6 +74,7 @@ type BucketReconciler struct { EventRecorder kuberecorder.EventRecorder ExternalEventRecorder *events.Recorder MetricsRecorder *metrics.Recorder + AzureCloudConfig string } type BucketReconcilerOptions struct { @@ -191,6 +200,11 @@ func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket if err != nil { return sourceBucket, err } + } else if bucket.Spec.Provider == sourcev1.AzureBlobProvider { + sourceBucket, err = r.reconcileWithAzureBlob(ctx, bucket, tempDir) + if err != nil { + return sourceBucket, err + } } else { sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) if err != nil { @@ -401,6 +415,123 @@ func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket source return sourcev1.Bucket{}, nil } +func (r *BucketReconciler) reconcileWithAzureBlob(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { + azBlobPipeline, err := r.authAzure(ctx, bucket) + if err != nil { + err = fmt.Errorf("auth error: %w", err) + return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + } + + ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + defer cancel() + + ep := strings.TrimRight(bucket.Spec.Endpoint, "/") + blobContainerURLString := strings.Join([]string{ep, bucket.Spec.BucketName}, "/") + blobContainerURL, err := url.Parse(blobContainerURLString) + if err != nil { + return sourcev1.Bucket{}, fmt.Errorf("invalid blob container endpoint '%s': %w", blobContainerURLString, err) + } + + blobContainer := azblob.NewContainerURL(*blobContainerURL, *azBlobPipeline) + if err != nil { + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + + downloadBlobToLocalFile := func(ctx context.Context, blobPipeline pipeline.Pipeline, url url.URL, blobName, localPath string) error { + containerURL := azblob.NewContainerURL(url, blobPipeline) + blobURL := containerURL.NewBlobURL(blobName) + + blobProps, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return err + } + + content := make([]byte, blobProps.ContentLength()) + err = azblob.DownloadBlobToBuffer(ctx, blobURL, 0, 0, content, azblob.DownloadFromBlobOptions{}) + if err != nil { + return err + } + + dir := filepath.Dir(localPath) + err = os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create local directory '%s': %w", blobURL, err) + } + + err = ioutil.WriteFile(localPath, content, 0600) + return err + } + + // Look for file with ignore rules first + path := filepath.Join(tempDir, sourceignore.IgnoreFile) + + if err := downloadBlobToLocalFile(ctxTimeout, *azBlobPipeline, blobContainer.URL(), sourceignore.IgnoreFile, path); err != nil { + if resp, ok := err.(azblob.StorageError); ok && resp.ServiceCode() != "BlobNotFound" { + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + } + + ps, err := sourceignore.ReadIgnoreFile(path, nil) + if err != nil { + return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + } + // In-spec patterns take precedence + if bucket.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + } + + matcher := sourceignore.NewMatcher(ps) + + // download blob content + marker := azblob.Marker{Val: nil} + for true { + resp, err := blobContainer.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{ + Prefix: "", + }) + if err != nil { + return sourcev1.Bucket{}, fmt.Errorf("failed to list blob in blob container '%s': %w", blobContainerURLString, err) + } + + if len(resp.Segment.BlobItems) == 0 { + break + } + + var eg errgroup.Group + for i := range resp.Segment.BlobItems { + object := resp.Segment.BlobItems[i] + eg.Go(func() error { + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { + return nil + } + + if matcher.Match(strings.Split(object.Name, "/"), false) { + return nil + } + + localPath := filepath.Join(tempDir, object.Name) + if err := downloadBlobToLocalFile(ctxTimeout, *azBlobPipeline, blobContainer.URL(), object.Name, localPath); err != nil { + return fmt.Errorf("failed to download blob items from blob container '%s': %w", blobContainerURLString, err) + } + + return nil + }) + } + + err = eg.Wait() + if err != nil { + return sourcev1.Bucket{}, fmt.Errorf("failed to download blob items from blob container '%s': %w", blobContainerURLString, err) + } + + if resp.Marker == nil { + break + } + + marker.Val = resp.Marker + } + + return sourcev1.Bucket{}, nil +} + // authGCP creates a new Google Cloud Platform storage client // to interact with the storage service. func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { @@ -475,6 +606,69 @@ func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket return minio.New(bucket.Spec.Endpoint, &opt) } +// authAzure creates a new Azure blob client to interact with Azure Storage Account. +func (r *BucketReconciler) authAzure(ctx context.Context, bucket sourcev1.Bucket) (*pipeline.Pipeline, error) { + endpointURL, err := url.Parse(bucket.Spec.Endpoint) + if err != nil { + return nil, fmt.Errorf("invalid endpoint: '%s': %w", bucket.Spec.Endpoint, err) + } + + var key string + var resourceId string + if bucket.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: bucket.GetNamespace(), + Name: bucket.Spec.SecretRef.Name, + } + + var secret corev1.Secret + if err := r.Get(ctx, secretName, &secret); err != nil { + return nil, fmt.Errorf("credentials secret error: %w", err) + } + + if k, ok := secret.Data["accesskey"]; ok { + resourceId = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + key = string(k) + } + } + + if key == "" && resourceId != "" { + cloudProvider, err := cloudprovider.NewCloudProvider(r.AzureCloudConfig) + if err != nil { + return nil, fmt.Errorf("unable to load azure cloud config '%s': %w", r.AzureCloudConfig, err) + } + + tokens := strings.Split(resourceId, "/") + subscriptionId := tokens[2] + rg := tokens[4] + client := storagemgmt.NewAccountsClient(subscriptionId) + client.Authorizer = cloudProvider.Authorizer + accountName := strings.Split(endpointURL.Hostname(), ".")[0] + + keys, err := client.ListKeys(ctx, rg, accountName, "") + if err != nil { + return nil, fmt.Errorf("unable to list keys for storage account '%s': %w", resourceId, err) + } + + key = to.String((*keys.Keys)[0].Value) + } + + if key == "" { + return nil, fmt.Errorf("no blob credentials found") + } + + accountName := strings.Split(endpointURL.Host, ".")[0] + credential, err := azblob.NewSharedKeyCredential(accountName, key) + if err != nil { + return nil, fmt.Errorf("invalid credentials: %w", err) + } + + blobPipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + return &blobPipeline, nil +} + // checksum calculates the SHA1 checksum of the given root directory. // It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the // list with relative file paths and their checksums. diff --git a/go.mod b/go.mod index 775b6ba79..ad1e98bec 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,14 @@ replace github.com/fluxcd/source-controller/api => ./api require ( cloud.google.com/go v0.93.3 // indirect cloud.google.com/go/storage v1.16.0 + github.com/Azure/aad-pod-identity v1.8.5 + github.com/Azure/azure-pipeline-go v0.2.3 + github.com/Azure/azure-sdk-for-go v60.0.0+incompatible + github.com/Azure/azure-storage-blob-go v0.14.0 + github.com/Azure/go-autorest/autorest v0.11.18 + github.com/Azure/go-autorest/autorest/adal v0.9.13 + github.com/Azure/go-autorest/autorest/to v0.4.0 + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Masterminds/semver/v3 v3.1.1 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/bshuster-repo/logrus-logstash-hook v1.0.2 // indirect @@ -50,11 +58,13 @@ require ( golang.org/x/text v0.3.7 // indirect google.golang.org/api v0.54.0 google.golang.org/genproto v0.0.0-20210830153122-0bac4d21c8ea // indirect + gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible helm.sh/helm/v3 v3.6.3 k8s.io/api v0.22.2 k8s.io/apimachinery v0.22.2 k8s.io/client-go v0.22.2 + k8s.io/klog/v2 v2.9.0 sigs.k8s.io/controller-runtime v0.10.2 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index 75f12b595..752935abd 100644 --- a/go.sum +++ b/go.sum @@ -45,37 +45,64 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.16.0 h1:1UwAux2OZP4310YXg5ohqBEpV16Y93uZG4+qOX7K2Kg= cloud.google.com/go/storage v1.16.0/go.mod h1:ieKBmUyzcftN5tbxwnXClMKH00CfcQ+xL6NN0r5QfmE= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= +contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/aad-pod-identity v1.8.5 h1:OUjgpRfgpf1uKgC8WIVXP2CozjcFAlWxv+jFn2WD484= +github.com/Azure/aad-pod-identity v1.8.5/go.mod h1:mu0fepT2bK9Te2VNlKPy9qEFSgdFZXM3pblQ/9cMWhg= +github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v57.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v60.0.0+incompatible h1:vVRJhSSTwhIHQTzTjqoZCItFJeBwfdNSqHcgGV10FHQ= +github.com/Azure/azure-sdk-for-go v60.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.1.0/go.mod h1:Gf7/i2FUpyb/sGBLIFxTBzrNzBo7aPXXE3ZVeDRwdpM= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= +github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -107,7 +134,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -116,18 +142,15 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -138,14 +161,12 @@ github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edY github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bshuster-repo/logrus-logstash-hook v1.0.2 h1:JYRWo+QGnQdedgshosug9hxpPYTB9oJ1ZZD3fY31alU= github.com/bshuster-repo/logrus-logstash-hook v1.0.2/go.mod h1:HgYntJprnHSPaF9VPPPLP1L5S1vMWxRfa1J+vzDrDTw= -github.com/bugsnag/bugsnag-go v2.1.2+incompatible h1:E7dor84qzwUO8KdCM68CZwq9QOSR7HXlLx3Wj5vui2s= github.com/bugsnag/bugsnag-go v2.1.2+incompatible/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/panicwrap v1.3.4 h1:A6sXFtDGsgU/4BLf5JT0o5uYg3EeKgGx3Sfs+/uk3pU= github.com/bugsnag/panicwrap v1.3.4/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -188,6 +209,7 @@ github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkE github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-iptables v0.3.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -202,7 +224,6 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -215,6 +236,7 @@ github.com/deislabs/oras v0.11.1/go.mod h1:39lCtf8Q6WDC7ul9cnyWXONNzKvabEKk+AX+L github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/docker/cli v20.10.9+incompatible h1:OJ7YkwQA+k2Oi51lmCojpjiygKpi76P7bg91b2eJxYU= github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.7.0-rc.0+incompatible h1:Nw9tozLpkMnG3IA1zLzsCuwKizII6havt4iIXWWzU2s= @@ -230,7 +252,6 @@ github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHz github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -240,7 +261,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -260,23 +280,19 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.1 h1:biCgZMjpDSv3Q4mZPikUJILx3t2MuNXR4Oa5jRQxaNQ= github.com/fluxcd/pkg/apis/acl v0.0.1/go.mod h1:y3qOXUFObVWk7jzOjubMnr/u18j1kCeSi6olycnxr/E= github.com/fluxcd/pkg/apis/meta v0.10.0/go.mod h1:CW9X9ijMTpNe7BwnokiUOrLl/h13miwVr/3abEQLbKE= github.com/fluxcd/pkg/apis/meta v0.10.1 h1:zISenRlqNG7WK8TP3HxZTvv+1Z7JZOUIQvZrOr6pQ2w= github.com/fluxcd/pkg/apis/meta v0.10.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= -github.com/fluxcd/pkg/gittestserver v0.4.2 h1:XqoiemTnnUNldnOw8N7OTdalu2iZp1FTRhp9uUauDJQ= github.com/fluxcd/pkg/gittestserver v0.4.2/go.mod h1:hUPx21fe/6oox336Wih/XF1fnmzLmptNMOvATbTZXNY= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= github.com/fluxcd/pkg/gitutil v0.1.0/go.mod h1:Ybz50Ck5gkcnvF0TagaMwtlRy3X3wXuiri1HVsK5id4= -github.com/fluxcd/pkg/helmtestserver v0.2.0 h1:cE7YHDmrWI0hr9QpaaeQ0vQ16Z0IiqZKiINDpqdY610= github.com/fluxcd/pkg/helmtestserver v0.2.0/go.mod h1:Yie8n7xuu5Nvf1Q7302LKsubJhWpwzCaK0rLJvmF7aI= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= @@ -284,7 +300,6 @@ github.com/fluxcd/pkg/runtime v0.12.0 h1:BPZZ8bBkimpqGAPXqOf3LTaw+tcw6HgbWyCuzbb github.com/fluxcd/pkg/runtime v0.12.0/go.mod h1:EyaTR2TOYcjL5U//C4yH3bt2tvTgIOSXpVRbWxUn/C4= github.com/fluxcd/pkg/ssh v0.1.0 h1:cym2bqiT4IINOdLV0J6GYxer16Ii/7b2+RlK3CG+CnA= github.com/fluxcd/pkg/ssh v0.1.0/go.mod h1:KUuVhaB6AX3IHTGCd3Ti/nesn5t1Nz4zCThFkkjHctM= -github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= @@ -292,6 +307,7 @@ github.com/fluxcd/pkg/version v0.1.0 h1:v+SmCanmCB5Tj2Cx9TXlj+kNRfPGbAvirkeqsp7Z github.com/fluxcd/pkg/version v0.1.0/go.mod h1:V7Z/w8dxLQzv0FHqa5ox5TeyOd2zOd49EeuWFgnwyj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= @@ -300,11 +316,9 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/garyburd/redigo v1.6.3 h1:HCeeRluvAgMusMomi1+6Y5dmFOdYV/JzoRrrbFlkGIc= github.com/garyburd/redigo v1.6.3/go.mod h1:rTb6epsqigu3kYKBnaF028A7Tf/Aw5s0cqA47doKKqw= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= @@ -315,7 +329,6 @@ github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4u github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= @@ -385,27 +398,20 @@ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2K github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.1 h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8= github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= -github.com/gobuffalo/logger v1.0.1 h1:ZEgyRGgAm4ZAhAO45YXMs5Fp+bzGLESFewzAVBMKuTg= github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= -github.com/gobuffalo/packd v0.3.0 h1:eMwymTkA1uXsqxS0Tpoop3Lc0u3kTfiMBE6nKtQU4g4= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= -github.com/gobuffalo/packr/v2 v2.7.1 h1:n3CIW5T17T8v4GGK5sWXLVWJhCz7b5aNLSxW6gYim4o= github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godror/godror v0.13.3/go.mod h1:2ouUT4kdhUBk7TAkHWD4SN0CdI0pgEQbo8FVHhbSKWg= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.1.0+incompatible h1:sIa2eCvUTwgjbqXrPLfNwUf9S3i3mpH1O1atV+iL/Wk= github.com/gofrs/uuid v4.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -473,11 +479,9 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -498,8 +502,9 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso= @@ -508,10 +513,8 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= @@ -527,6 +530,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= @@ -537,7 +541,6 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -560,7 +563,6 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -577,9 +579,7 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE= github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= @@ -594,11 +594,9 @@ github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMW github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= @@ -617,19 +615,14 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libgit2/git2go/v31 v31.6.1 h1:FnKHHDDBgltSsu9RpKuL4rSR8dQ1JTf9dfvFhZ1y7Aw= github.com/libgit2/git2go/v31 v31.6.1/go.mod h1:c/rkJcBcUFx6wHaT++UwNpKvIsmPNqCeQ/vzO4DrEec= @@ -649,10 +642,11 @@ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -663,7 +657,6 @@ github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+tw github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= @@ -683,7 +676,6 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -692,11 +684,9 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -723,7 +713,6 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -739,7 +728,6 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -747,7 +735,6 @@ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -768,12 +755,10 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.3 h1:7JgpsBaN0uMkyju4tbYHu0mnM55hNKVYLsXmwr15NQI= github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -782,7 +767,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -796,6 +780,7 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -812,6 +797,7 @@ github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= @@ -822,6 +808,7 @@ github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzz github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -838,25 +825,20 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.4.0 h1:LUa41nrWTQNGhzdsZ5lTnkwbNjj6rXTdazA1cSdjkOY= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= -github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -866,20 +848,16 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d h1:QKK1cJOPfb6nDDB8fC1l41/IcezASje2lsA13diVqfM= github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -904,7 +882,6 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -944,13 +921,9 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940 h1:p7OofyZ509h8DmPLh8Hn+EIIZm/xYhdZHJ9GnXHdr6U= github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.7 h1:4DTF1WOM2ZZS/xMOkTFBOcb6XiHu/PKn3rVo6dbewQE= github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9 h1:AsFN8kXcCVkUFHyuzp1FtYbzp1nCO/H6+1uPSGEyPzM= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -997,7 +970,6 @@ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1016,6 +988,7 @@ golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1056,7 +1029,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1095,6 +1067,7 @@ golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1168,6 +1141,7 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1187,6 +1161,7 @@ golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1210,6 +1185,7 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1332,12 +1308,10 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= @@ -1440,6 +1414,7 @@ google.golang.org/genproto v0.0.0-20210830153122-0bac4d21c8ea h1:5eMUso2GVOxypVH google.golang.org/genproto v0.0.0-20210830153122-0bac4d21c8ea/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1489,13 +1464,11 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1505,7 +1478,6 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -1524,10 +1496,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= helm.sh/helm/v3 v3.6.0/go.mod h1:mIIus8EOqj+obtycw3sidsR4ORr2aFDmXMSI3k+oeVY= helm.sh/helm/v3 v3.6.3 h1:0nKDyXJr23nI3JrcP7HH7NcR+CYRvro/52Dvr1KhGO0= @@ -1542,6 +1512,7 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= @@ -1551,16 +1522,17 @@ k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQR k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= -k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4= k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= @@ -1568,6 +1540,7 @@ k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHD k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= @@ -1581,15 +1554,14 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kubectl v0.21.0 h1:WZXlnG/yjcE4LWO2g6ULjFxtzK6H1TKzsfaBFuVIhNg= k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/letsencrypt v0.0.3 h1:H7xDfhkaFFSYEJlKeq38RwX2jYcnTeHuDQyT+mMNMwM= rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/main.go b/main.go index 67f00a920..8085e9860 100644 --- a/main.go +++ b/main.go @@ -77,6 +77,7 @@ func main() { storagePath string storageAddr string storageAdvAddr string + azureCloudConfig string concurrent int requeueDependency time.Duration watchAllNamespaces bool @@ -110,6 +111,9 @@ func main() { "The max allowed size in bytes of a file in a Helm chart.") flag.DurationVar(&requeueDependency, "requeue-dependency", 30*time.Second, "The interval at which failing dependencies are reevaluated.") + flag.StringVar(&azureCloudConfig, "azure-cloud-config", + envOrDefault("AZURE_CLOUD_CONFIG", "/etc/kubernetes/azure.json"), + "Azure cloud config file.") clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) @@ -217,6 +221,7 @@ func main() { EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, MetricsRecorder: metricsRecorder, + AzureCloudConfig: azureCloudConfig, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { diff --git a/pkg/azure/cloudprovider/cloudprovider.go b/pkg/azure/cloudprovider/cloudprovider.go new file mode 100644 index 000000000..27316b035 --- /dev/null +++ b/pkg/azure/cloudprovider/cloudprovider.go @@ -0,0 +1,136 @@ +/* + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE +*/ +// based on https://github.com/Azure/aad-pod-identity/blob/0fbc00f8b572ee780199ddb4489a94f1f01d3815/pkg/cloudprovider/cloudprovider.go + +package cloudprovider + +import ( + "fmt" + "os" + "strings" + + "github.com/Azure/aad-pod-identity/pkg/config" + "github.com/Azure/aad-pod-identity/pkg/utils" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "gopkg.in/yaml.v2" + "k8s.io/klog/v2" +) + +// Client is a cloud provider client +type Client struct { + Config config.AzureConfig + configFile string + Authorizer autorest.Authorizer +} + +// NewCloudProvider returns a azure cloud provider client +func NewCloudProvider(configFile string) (*Client, error) { + client := &Client{ + configFile: configFile, + } + if err := client.Init(); err != nil { + return nil, fmt.Errorf("failed to initialize cloud provider client, error: %+v", err) + } + return client, nil +} + +// Init initializes the cloud provider client based +// on a config path or environment variables +func (c *Client) Init() error { + c.Config = config.AzureConfig{} + if c.configFile != "" { + klog.V(6).Info("populating AzureConfig from azure.json") + bytes, err := os.ReadFile(c.configFile) + if err != nil { + return fmt.Errorf("failed to config file %s, error: %+v", c.configFile, err) + } + if err = yaml.Unmarshal(bytes, &c.Config); err != nil { + return fmt.Errorf("failed to unmarshal JSON, error: %+v", err) + } + } else { + klog.V(6).Info("populating AzureConfig from secret/environment variables") + c.Config.Cloud = os.Getenv("CLOUD") + c.Config.TenantID = os.Getenv("TENANT_ID") + c.Config.ClientID = os.Getenv("CLIENT_ID") + c.Config.ClientSecret = os.Getenv("CLIENT_SECRET") + c.Config.SubscriptionID = os.Getenv("SUBSCRIPTION_ID") + c.Config.ResourceGroupName = os.Getenv("RESOURCE_GROUP") + c.Config.VMType = os.Getenv("VM_TYPE") + c.Config.UseManagedIdentityExtension = strings.EqualFold(os.Getenv("USE_MSI"), "True") + c.Config.UserAssignedIdentityID = os.Getenv("USER_ASSIGNED_MSI_CLIENT_ID") + } + + azureEnv, err := azure.EnvironmentFromName(c.Config.Cloud) + if err != nil { + return fmt.Errorf("failed to get cloud environment, error: %+v", err) + } + + err = adal.AddToUserAgent("flux-source-controller") + if err != nil { + return fmt.Errorf("failed to add flux-source-controller to user agent, error: %+v", err) + } + + oauthConfig, err := adal.NewOAuthConfig(azureEnv.ActiveDirectoryEndpoint, c.Config.TenantID) + if err != nil { + return fmt.Errorf("failed to create OAuth config, error: %+v", err) + } + + var spt *adal.ServicePrincipalToken + if c.Config.UseManagedIdentityExtension { + // MSI endpoint is required for both types of MSI - system assigned and user assigned. + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return fmt.Errorf("failed to get MSI endpoint, error: %+v", err) + } + // UserAssignedIdentityID is empty, so we are going to use system assigned MSI + if c.Config.UserAssignedIdentityID == "" { + klog.Infof("MIC using system assigned identity for authentication.") + spt, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, azureEnv.ResourceManagerEndpoint) + if err != nil { + return fmt.Errorf("failed to get token from system-assigned identity, error: %+v", err) + } + } else { // User assigned identity usage. + klog.Infof("MIC using user assigned identity: %s for authentication.", utils.RedactClientID(c.Config.UserAssignedIdentityID)) + spt, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, azureEnv.ResourceManagerEndpoint, c.Config.UserAssignedIdentityID) + if err != nil { + return fmt.Errorf("failed to get token from user-assigned identity, error: %+v", err) + } + } + } else { // This is the default scenario - use service principal to get the token. + spt, err = adal.NewServicePrincipalToken( + *oauthConfig, + c.Config.ClientID, + c.Config.ClientSecret, + azureEnv.ResourceManagerEndpoint, + ) + if err != nil { + return fmt.Errorf("failed to get service principal token, error: %+v", err) + } + } + + c.Authorizer = autorest.NewBearerAuthorizer(spt) + return nil +} From 774cf9a2fb71728593ff3c72d0b7ece5b49d2b85 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> Date: Sat, 4 Dec 2021 00:10:35 +0800 Subject: [PATCH 02/53] Remove calling deprecated MSI function Signed-off-by: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> --- pkg/azure/cloudprovider/cloudprovider.go | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/pkg/azure/cloudprovider/cloudprovider.go b/pkg/azure/cloudprovider/cloudprovider.go index 27316b035..b6d44ae51 100644 --- a/pkg/azure/cloudprovider/cloudprovider.go +++ b/pkg/azure/cloudprovider/cloudprovider.go @@ -100,24 +100,12 @@ func (c *Client) Init() error { var spt *adal.ServicePrincipalToken if c.Config.UseManagedIdentityExtension { - // MSI endpoint is required for both types of MSI - system assigned and user assigned. - msiEndpoint, err := adal.GetMSIVMEndpoint() + klog.Infof("using user assigned identity: %s for authentication.", utils.RedactClientID(c.Config.UserAssignedIdentityID)) + spt, err = adal.NewServicePrincipalTokenFromManagedIdentity(azureEnv.ResourceManagerEndpoint, &adal.ManagedIdentityOptions{ + ClientID: c.Config.UserAssignedIdentityID, + }) if err != nil { - return fmt.Errorf("failed to get MSI endpoint, error: %+v", err) - } - // UserAssignedIdentityID is empty, so we are going to use system assigned MSI - if c.Config.UserAssignedIdentityID == "" { - klog.Infof("MIC using system assigned identity for authentication.") - spt, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, azureEnv.ResourceManagerEndpoint) - if err != nil { - return fmt.Errorf("failed to get token from system-assigned identity, error: %+v", err) - } - } else { // User assigned identity usage. - klog.Infof("MIC using user assigned identity: %s for authentication.", utils.RedactClientID(c.Config.UserAssignedIdentityID)) - spt, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, azureEnv.ResourceManagerEndpoint, c.Config.UserAssignedIdentityID) - if err != nil { - return fmt.Errorf("failed to get token from user-assigned identity, error: %+v", err) - } + return fmt.Errorf("failed to get token from user-assigned identity, error: %+v", err) } } else { // This is the default scenario - use service principal to get the token. spt, err = adal.NewServicePrincipalToken( From e89ef087cfd1a70fc75d08c69816540c7ac072fb Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> Date: Sat, 4 Dec 2021 09:46:59 +0800 Subject: [PATCH 03/53] Update CRD Signed-off-by: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> --- .../source.toolkit.fluxcd.io_buckets.yaml | 96 +++++++++--- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 141 +++++++++++++----- .../source.toolkit.fluxcd.io_helmcharts.yaml | 106 ++++++++++--- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 96 +++++++++--- 4 files changed, 336 insertions(+), 103 deletions(-) diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index f0e37ff66..2df0f80fd 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.4.1 creationTimestamp: null name: buckets.source.toolkit.fluxcd.io spec: @@ -35,28 +35,42 @@ spec: description: Bucket is the Schema for the buckets API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: BucketSpec defines the desired state of an S3 compatible bucket + description: BucketSpec defines the desired state of an S3 compatible + bucket properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -70,7 +84,10 @@ spec: description: The bucket endpoint address. type: string ignore: - description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. type: string insecure: description: Insecure allows connecting to a non-TLS S3 HTTP endpoint. @@ -85,12 +102,14 @@ spec: - generic - aws - gcp + - azure type: string region: description: The bucket region. type: string secretRef: - description: The name of the secret containing authentication credentials for the Bucket. + description: The name of the secret containing authentication credentials + for the Bucket. properties: name: description: Name of the referent @@ -99,7 +118,8 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean timeout: default: 20s @@ -114,20 +134,24 @@ spec: description: BucketStatus defines the observed state of a bucket properties: artifact: - description: Artifact represents the output of the last successful Bucket sync. + description: Artifact represents the output of the last successful + Bucket sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -139,23 +163,45 @@ spec: conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -168,7 +214,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -181,14 +231,16 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer url: - description: URL is the download link for the artifact output of the last Bucket sync. + description: URL is the download link for the artifact output of the + last Bucket sync. type: string type: object type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index fee0fb612..fe00a2c24 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.4.1 creationTimestamp: null name: gitrepositories.source.toolkit.fluxcd.io spec: @@ -37,10 +37,14 @@ spec: description: GitRepository is the Schema for the gitrepositories API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -48,17 +52,26 @@ spec: description: GitRepositorySpec defines the desired state of a Git repository. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -67,21 +80,27 @@ spec: type: object gitImplementation: default: go-git - description: Determines which git client library to use. Defaults to go-git, valid values are ('go-git', 'libgit2'). + description: Determines which git client library to use. Defaults + to go-git, valid values are ('go-git', 'libgit2'). enum: - go-git - libgit2 type: string ignore: - description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. + description: Ignore overrides the set of excluded patterns in the + .sourceignore format (which is the same as .gitignore). If not provided, + a default will be used, consult the documentation for your version + to find out what those are. type: string include: description: Extra git repositories to map into the repository items: - description: GitRepositoryInclude defines a source with a from and to path. + description: GitRepositoryInclude defines a source with a from and + to path. properties: fromPath: - description: The path to copy contents from, defaults to the root directory. + description: The path to copy contents from, defaults to the + root directory. type: string repository: description: Reference to a GitRepository to include. @@ -93,7 +112,8 @@ spec: - name type: object toPath: - description: The path to copy contents to, defaults to the name of the source ref. + description: The path to copy contents to, defaults to the name + of the source ref. type: string required: - repository @@ -103,26 +123,34 @@ spec: description: The interval at which to check for repository updates. type: string recurseSubmodules: - description: When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the 'go-git' GitImplementation. + description: When enabled, after the clone is created, initializes + all submodules within, using their default settings. This option + is available only when using the 'go-git' GitImplementation. type: boolean ref: - description: The Git reference to checkout and monitor for changes, defaults to master branch. + description: The Git reference to checkout and monitor for changes, + defaults to master branch. properties: branch: description: The Git branch to checkout, defaults to master. type: string commit: - description: The Git commit SHA to checkout, if specified Tag filters will be ignored. + description: The Git commit SHA to checkout, if specified Tag + filters will be ignored. type: string semver: - description: The Git tag semver expression, takes precedence over Tag. + description: The Git tag semver expression, takes precedence over + Tag. type: string tag: description: The Git tag to checkout, takes precedence over Branch. type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain identity, identity.pub and known_hosts fields. + description: The secret name containing the Git credentials. For HTTPS + repositories the secret must contain username and password fields. + For SSH repositories the secret must contain identity, identity.pub + and known_hosts fields. properties: name: description: Name of the referent @@ -131,26 +159,31 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean timeout: default: 20s - description: The timeout for remote Git operations like cloning, defaults to 20s. + description: The timeout for remote Git operations like cloning, defaults + to 20s. type: string url: description: The repository URL, can be a HTTP/S or SSH address. pattern: ^(http|https|ssh):// type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points to. + description: Verify OpenPGP signature for the Git commit HEAD points + to. properties: mode: - description: Mode describes what git object should be verified, currently ('head'). + description: Mode describes what git object should be verified, + currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all trusted Git authors. + description: The secret name containing the public keys of all + trusted Git authors. properties: name: description: Name of the referent @@ -169,20 +202,24 @@ spec: description: GitRepositoryStatus defines the observed state of a Git repository. properties: artifact: - description: Artifact represents the output of the last successful repository sync. + description: Artifact represents the output of the last successful + repository sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -194,23 +231,45 @@ spec: conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -223,7 +282,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -236,7 +299,8 @@ spec: type: object type: array includedArtifacts: - description: IncludedArtifacts represents the included artifacts from the last successful repository sync. + description: IncludedArtifacts represents the included artifacts from + the last successful repository sync. items: description: Artifact represents the output of a source synchronisation. properties: @@ -244,14 +308,17 @@ spec: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -262,14 +329,16 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. format: int64 type: integer url: - description: URL is the download link for the artifact output of the last repository sync. + description: URL is the download link for the artifact output of the + last repository sync. type: string type: object type: object diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index a5380d360..9ddcb0ad1 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.4.1 creationTimestamp: null name: helmcharts.source.toolkit.fluxcd.io spec: @@ -46,10 +46,14 @@ spec: description: HelmChart is the Schema for the helmcharts API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -57,17 +61,26 @@ spec: description: HelmChartSpec defines the desired state of a Helm chart. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -75,14 +88,18 @@ spec: - namespaceSelectors type: object chart: - description: The name or path the Helm chart is available at in the SourceRef. + description: The name or path the Helm chart is available at in the + SourceRef. type: string interval: description: The interval at which to check the Source for updates. type: string reconcileStrategy: default: ChartVersion - description: Determines what enables the creation of a new artifact. Valid values are ('ChartVersion', 'Revision'). See the documentation of the values for an explanation on their behavior. Defaults to ChartVersion when omitted. + description: Determines what enables the creation of a new artifact. + Valid values are ('ChartVersion', 'Revision'). See the documentation + of the values for an explanation on their behavior. Defaults to + ChartVersion when omitted. enum: - ChartVersion - Revision @@ -94,7 +111,8 @@ spec: description: APIVersion of the referent. type: string kind: - description: Kind of the referent, valid values are ('HelmRepository', 'GitRepository', 'Bucket'). + description: Kind of the referent, valid values are ('HelmRepository', + 'GitRepository', 'Bucket'). enum: - HelmRepository - GitRepository @@ -108,19 +126,28 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean valuesFile: - description: Alternative values file to use as the default chart values, expected to be a relative path in the SourceRef. Deprecated in favor of ValuesFiles, for backwards compatibility the file defined here is merged before the ValuesFiles items. Ignored when omitted. + description: Alternative values file to use as the default chart values, + expected to be a relative path in the SourceRef. Deprecated in favor + of ValuesFiles, for backwards compatibility the file defined here + is merged before the ValuesFiles items. Ignored when omitted. type: string valuesFiles: - description: Alternative list of values files to use as the chart values (values.yaml is not included by default), expected to be a relative path in the SourceRef. Values files are merged in the order of this list with the last file overriding the first. Ignored when omitted. + description: Alternative list of values files to use as the chart + values (values.yaml is not included by default), expected to be + a relative path in the SourceRef. Values files are merged in the + order of this list with the last file overriding the first. Ignored + when omitted. items: type: string type: array version: default: '*' - description: The chart version semver expression, ignored for charts from GitRepository and Bucket sources. Defaults to latest when omitted. + description: The chart version semver expression, ignored for charts + from GitRepository and Bucket sources. Defaults to latest when omitted. type: string required: - chart @@ -131,20 +158,24 @@ spec: description: HelmChartStatus defines the observed state of the HelmChart. properties: artifact: - description: Artifact represents the output of the last successful chart sync. + description: Artifact represents the output of the last successful + chart sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -156,23 +187,45 @@ spec: conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -185,7 +238,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -198,7 +255,8 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index 52d496e3d..d12a22bc8 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.4.1 creationTimestamp: null name: helmrepositories.source.toolkit.fluxcd.io spec: @@ -37,10 +37,14 @@ spec: description: HelmRepository is the Schema for the helmrepositories API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object @@ -48,17 +52,26 @@ spec: description: HelmRepositorySpec defines the reference to a Helm repository. properties: accessFrom: - description: AccessFrom defines an Access Control List for allowing cross-namespace references to this object. + description: AccessFrom defines an Access Control List for allowing + cross-namespace references to this object. properties: namespaceSelectors: - description: NamespaceSelectors is the list of namespace selectors to which this ACL applies. Items in this list are evaluated using a logical OR operation. + description: NamespaceSelectors is the list of namespace selectors + to which this ACL applies. Items in this list are evaluated + using a logical OR operation. items: - description: NamespaceSelector selects the namespaces to which this ACL applies. An empty map of MatchLabels matches all namespaces in a cluster. + description: NamespaceSelector selects the namespaces to which + this ACL applies. An empty map of MatchLabels matches all + namespaces in a cluster. properties: matchLabels: additionalProperties: type: string - description: MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + description: MatchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. type: object type: object type: array @@ -69,10 +82,18 @@ spec: description: The interval at which to check the upstream for updates. type: string passCredentials: - description: PassCredentials allows the credentials from the SecretRef to be passed on to a host that does not match the host as defined in URL. This may be required if the host of the advertised chart URLs in the index differ from the defined URL. Enabling this should be done with caution, as it can potentially result in credentials getting stolen in a MITM-attack. + description: PassCredentials allows the credentials from the SecretRef + to be passed on to a host that does not match the host as defined + in URL. This may be required if the host of the advertised chart + URLs in the index differ from the defined URL. Enabling this should + be done with caution, as it can potentially result in credentials + getting stolen in a MITM-attack. type: boolean secretRef: - description: The name of the secret containing authentication credentials for the Helm repository. For HTTP/S basic auth the secret must contain username and password fields. For TLS the secret must contain a certFile and keyFile, and/or caCert fields. + description: The name of the secret containing authentication credentials + for the Helm repository. For HTTP/S basic auth the secret must contain + username and password fields. For TLS the secret must contain a + certFile and keyFile, and/or caCert fields. properties: name: description: Name of the referent @@ -81,14 +102,16 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: This flag tells the controller to suspend the reconciliation + of this source. type: boolean timeout: default: 60s description: The timeout of index downloading, defaults to 60s. type: string url: - description: The Helm repository URL, a valid URL contains at least a protocol and host. + description: The Helm repository URL, a valid URL contains at least + a protocol and host. type: string required: - interval @@ -98,20 +121,24 @@ spec: description: HelmRepositoryStatus defines the observed state of the HelmRepository. properties: artifact: - description: Artifact represents the output of the last successful repository sync. + description: Artifact represents the output of the last successful + repository sync. properties: checksum: description: Checksum is the SHA256 checksum of the artifact. type: string lastUpdateTime: - description: LastUpdateTime is the timestamp corresponding to the last update of this artifact. + description: LastUpdateTime is the timestamp corresponding to + the last update of this artifact. format: date-time type: string path: description: Path is the relative file path of this artifact. type: string revision: - description: Revision is a human readable identifier traceable in the origin source system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm chart version, etc. + description: Revision is a human readable identifier traceable + in the origin source system. It can be a Git commit SHA, Git + tag, a Helm index timestamp, a Helm chart version, etc. type: string url: description: URL is the HTTP address of this artifact. @@ -123,23 +150,45 @@ spec: conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + \ // +patchStrategy=merge // +listType=map // +listMapKey=type + \ Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating details about the transition. This may be an empty string. + description: message is a human readable message indicating + details about the transition. This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ @@ -152,7 +201,11 @@ spec: - Unknown type: string type: - description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -165,7 +218,8 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. From 8e86b574563f5415b50a705fba81bca30c4a2c3f Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 11:17:22 +0200 Subject: [PATCH 04/53] Introduce more explicit Condition types This commit introduces new Condition types to the v1beta1 API, facilitating easier observation of (potentially) problematic state for end-users. - `ArtifactUnavailableCondition`: indicates there is no artifact available for the resource. This Condition should be set by the reconciler as soon as it observes the absence of an artifact for a source. - `CheckoutFailedCondition`: indicates a transient or persistent checkout failure. This Condition should be set by the reconciler as soon as it observes a Git checkout failure, including any prerequisites like the unavailability of the referenced Secret used for authentication. It should be deleted as soon as a successful checkout has been observed again. - `SourceVerifiedCondition`: indicates the integrity of the source has been verified. The Condition should be set to True or False by the reconciler based on the result of the integrity check. If there is no verification mode and/or secret configured, the Condition should be removed. - `IncludeUnavailableCondition`: indicates one of the referenced includes is not available. This Condition should for example be set by the reconciler when the include does not exist, or does not have an artifact. If the includes become available, it should be deleted. - `ArtifactOutdatedCondition`: indicates the current artifact of the source is outdated. This Condition should for example be set by the reconciler when it notices there is a newer revision for an artifact, or the previously included artifacts differ from the current available ones. The Condition should be removed after writing a new artifact to the storage. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/gitrepository_types.go | 57 ++++++++++++------- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 12 ++-- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index bab79ca58..7a00fb641 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -34,6 +34,30 @@ const ( LibGit2Implementation = "libgit2" ) +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + CheckoutFailedCondition string = "CheckoutFailed" + + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + + // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not + // exist, or does not have an Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" +) + // GitRepositorySpec defines the desired state of a Git repository. type GitRepositorySpec struct { // The repository URL, can be a HTTP/S or SSH address. @@ -42,10 +66,8 @@ type GitRepositorySpec struct { URL string `json:"url"` // The secret name containing the Git credentials. - // For HTTPS repositories the secret must contain username and password - // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For HTTPS repositories the secret must contain username and password fields. + // For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -63,16 +85,16 @@ type GitRepositorySpec struct { // +optional Reference *GitRepositoryRef `json:"ref,omitempty"` - // Verify OpenPGP signature for the Git commit HEAD points to. + // Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. // +optional Verification *GitRepositoryVerification `json:"verify,omitempty"` - // Ignore overrides the set of excluded patterns in the .sourceignore format - // (which is the same as .gitignore). If not provided, a default will be used, - // consult the documentation for your version to find out what those are. + // Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). + // If not provided, a default will be used, consult the documentation for your version to find out what those are. // +optional Ignore *string `json:"ignore,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this source. // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` @@ -84,13 +106,13 @@ type GitRepositorySpec struct { // +optional GitImplementation string `json:"gitImplementation,omitempty"` - // When enabled, after the clone is created, initializes all submodules within, - // using their default settings. + // When enabled, after the clone is created, initializes all submodules within, using their default settings. // This option is available only when using the 'go-git' GitImplementation. // +optional RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` - // Extra git repositories to map into the repository + // Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for + // this resource. Include []GitRepositoryInclude `json:"include,omitempty"` // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. @@ -144,11 +166,11 @@ type GitRepositoryRef struct { // GitRepositoryVerification defines the OpenPGP signature verification process. type GitRepositoryVerification struct { - // Mode describes what git object should be verified, currently ('head'). + // Mode describes what Git object should be verified, currently ('head'). // +kubebuilder:validation:Enum=head Mode string `json:"mode"` - // The secret name containing the public keys of all trusted Git authors. + // SecretRef containing the public keys of all trusted Git authors. SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` } @@ -162,8 +184,7 @@ type GitRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last repository - // sync. + // URL is the download link for the artifact output of the last repository sync. // +optional URL string `json:"url,omitempty"` @@ -179,12 +200,10 @@ type GitRepositoryStatus struct { } const ( - // GitOperationSucceedReason represents the fact that the git clone, pull - // and checkout operations succeeded. + // GitOperationSucceedReason represents the fact that the git clone, pull and checkout operations succeeded. GitOperationSucceedReason string = "GitOperationSucceed" - // GitOperationFailedReason represents the fact that the git clone, pull or - // checkout operations failed. + // GitOperationFailedReason represents the fact that the git clone, pull or checkout operations failed. GitOperationFailedReason string = "GitOperationFailed" ) diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index fee0fb612..cfc25685d 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -76,7 +76,7 @@ spec: description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. type: string include: - description: Extra git repositories to map into the repository + description: Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for this resource. items: description: GitRepositoryInclude defines a source with a from and to path. properties: @@ -122,7 +122,7 @@ spec: type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain identity, identity.pub and known_hosts fields. + description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. properties: name: description: Name of the referent @@ -131,7 +131,7 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: Suspend tells the controller to suspend the reconciliation of this source. This flag tells the controller to suspend the reconciliation of this source. type: boolean timeout: default: 20s @@ -142,15 +142,15 @@ spec: pattern: ^(http|https|ssh):// type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points to. + description: Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. properties: mode: - description: Mode describes what git object should be verified, currently ('head'). + description: Mode describes what Git object should be verified, currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all trusted Git authors. + description: SecretRef containing the public keys of all trusted Git authors. properties: name: description: Name of the referent From c50a1707d3a7923cac90305f9242daa1a8b856bc Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 12:33:18 +0200 Subject: [PATCH 05/53] Implement new runtime interfaces, prepare testenv This commit ensures all API objects implement the interfaces used by the runtime package to work with conditions, etc., and prepares the test suite to work with the `pkg/runtime/testenv` wrapper. Changes are made in a backwards compatible way (that being: the existing code can still be build and works as expected), but without proper dependency boundaries. The result of this is that the API package temporary depends on the runtime package, which is resolved when all reconcilers have been refactored and the API package does no longer contain condition modifying functions. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/go.mod | 6 +- api/go.sum | 94 +++++++- api/v1beta1/bucket_types.go | 33 ++- api/v1beta1/gitrepository_types.go | 33 ++- api/v1beta1/helmchart_types.go | 35 ++- api/v1beta1/helmrepository_types.go | 33 ++- .../source.toolkit.fluxcd.io_buckets.yaml | 4 +- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 8 +- .../source.toolkit.fluxcd.io_helmcharts.yaml | 2 +- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 4 +- controllers/gitrepository_controller.go | 8 +- controllers/helmchart_controller_test.go | 116 +++++----- controllers/helmrepository_controller_test.go | 8 +- controllers/legacy_suite_test.go | 184 +++++++++++++++ controllers/storage.go | 8 +- controllers/suite_test.go | 218 +++++++----------- docs/api/source.md | 51 ++-- go.mod | 5 +- go.sum | 27 +-- 19 files changed, 578 insertions(+), 299 deletions(-) create mode 100644 controllers/legacy_suite_test.go diff --git a/api/go.mod b/api/go.mod index dbd78e413..2c6891e10 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,7 +4,11 @@ go 1.16 require ( github.com/fluxcd/pkg/apis/acl v0.0.1 - github.com/fluxcd/pkg/apis/meta v0.10.1 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 + // TODO(hidde): introduction of the runtime package is temporary, and the dependency should be removed as soon as + // all APIs have been updated to the runtime standards (more specifically; have dropped their condition modifying + // functions). + github.com/fluxcd/pkg/runtime v0.13.0-rc.3 k8s.io/apimachinery v0.22.2 sigs.k8s.io/controller-runtime v0.10.2 ) diff --git a/api/go.sum b/api/go.sum index 80a589759..33335ffb0 100644 --- a/api/go.sum +++ b/api/go.sum @@ -22,13 +22,17 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -52,6 +56,7 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= @@ -59,7 +64,9 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -67,17 +74,22 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -86,6 +98,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -98,13 +111,16 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.1 h1:biCgZMjpDSv3Q4mZPikUJILx3t2MuNXR4Oa5jRQxaNQ= github.com/fluxcd/pkg/apis/acl v0.0.1/go.mod h1:y3qOXUFObVWk7jzOjubMnr/u18j1kCeSi6olycnxr/E= -github.com/fluxcd/pkg/apis/meta v0.10.1 h1:zISenRlqNG7WK8TP3HxZTvv+1Z7JZOUIQvZrOr6pQ2w= -github.com/fluxcd/pkg/apis/meta v0.10.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 h1:RHHrztAFv9wmjM+Pk7Svt1UdD+1SdnQSp76MWFiM7Hg= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3 h1:VxtmEL/m3/9wJBhhhWQ48fz8m93B7UiyVi5cXYbiy3E= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3/go.mod h1:5ioX9wb63+RUvHBdjRsFG4uYn6Ll/Yoa7Ema6XKIIuQ= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -125,6 +141,7 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -133,6 +150,7 @@ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwoh github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -145,6 +163,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -170,6 +189,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -192,28 +212,35 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -222,6 +249,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -230,6 +258,7 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -238,6 +267,7 @@ github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUB github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -263,10 +293,14 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -278,6 +312,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -296,17 +331,20 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -315,6 +353,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -324,20 +363,25 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -360,9 +404,12 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -379,14 +426,19 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -410,13 +462,17 @@ go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -459,6 +515,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -477,6 +534,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -493,12 +551,14 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -526,6 +586,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -549,6 +610,7 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -561,6 +623,7 @@ golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6 golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -571,10 +634,13 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -617,12 +683,15 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -639,6 +708,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -662,6 +732,7 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -687,6 +758,7 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -694,6 +766,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -726,15 +799,23 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= +k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -744,17 +825,24 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc= sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 6d3b68b10..b2b2d26e7 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -126,7 +127,7 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return bucket } @@ -136,14 +137,14 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) return bucket } @@ -158,22 +159,32 @@ func BucketReadyMessage(bucket Bucket) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *Bucket) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *Bucket) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *Bucket) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index 7a00fb641..bd305eddf 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -215,7 +216,7 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -226,7 +227,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -234,7 +235,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -249,22 +250,32 @@ func GitRepositoryReadyMessage(repository GitRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *GitRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *GitRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index a6aa189e4..3228be1c2 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) // HelmChartKind is the string representation of a HelmChart. @@ -152,7 +153,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&chart, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return chart } @@ -162,7 +163,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&chart, meta.ReadyCondition, reason, message) return chart } @@ -170,7 +171,7 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&chart, meta.ReadyCondition, reason, message) return chart } @@ -185,22 +186,26 @@ func HelmChartReadyMessage(chart HelmChart) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { - return in.Status.Artifact +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions } -// GetStatusConditions returns a pointer to the Status.Conditions slice -func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { - return &in.Status.Conditions +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmChart) GetInterval() metav1.Duration { +// GetInterval returns the interval at which the source is reconciled. +func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + // GetValuesFiles returns a merged list of ValuesFiles. func (in *HelmChart) GetValuesFiles() []string { valuesFiles := in.Spec.ValuesFiles @@ -212,6 +217,12 @@ func (in *HelmChart) GetValuesFiles() []string { return valuesFiles } +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 400eb83f1..63f59b161 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -113,7 +114,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -123,7 +124,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -131,7 +132,7 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -146,22 +147,32 @@ func HelmRepositoryReadyMessage(repository HelmRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *HelmRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 5f20d8d8f..e92b17220 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -93,7 +93,7 @@ spec: description: The name of the secret containing authentication credentials for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -181,7 +181,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index cfc25685d..625a6efd8 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -87,7 +87,7 @@ spec: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -125,7 +125,7 @@ spec: description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -153,7 +153,7 @@ spec: description: SecretRef containing the public keys of all trusted Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -262,7 +262,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index a5380d360..8f2bccfe2 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -198,7 +198,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index 52d496e3d..28628a005 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -75,7 +75,7 @@ spec: description: The name of the secret containing authentication credentials for the Helm repository. For HTTP/S basic auth the secret must contain username and password fields. For TLS the secret must contain a certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -165,7 +165,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 9dd92290f..b58188a96 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -122,7 +122,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // check dependencies if len(repository.Spec.Include) > 0 { if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()) + repository = sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()) if err := r.updateStatus(ctx, req, repository.Status); err != nil { log.Error(err, "unable to update status for dependency not ready") return ctrl.Result{Requeue: true}, err @@ -278,7 +278,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour var gr sourcev1.GitRepository err := r.Get(context.Background(), dName, &gr) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } includedArtifacts = append(includedArtifacts, gr.GetArtifact()) } @@ -323,11 +323,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour for i, incl := range repository.Spec.Include { toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } } diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index cb9838b15..f762fcd08 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -129,9 +129,9 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -146,7 +146,7 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) When("Setting valid valuesFiles attribute", func() { @@ -161,12 +161,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -184,12 +184,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -207,12 +207,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -228,12 +228,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -250,12 +250,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -271,12 +271,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -682,7 +682,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Committing a new version in the chart metadata") @@ -727,9 +727,9 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*now.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*now.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values).ToNot(BeNil()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) @@ -744,7 +744,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) @@ -762,12 +762,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -785,12 +785,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -808,12 +808,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -834,16 +834,16 @@ var _ = Describe("HelmChartReconciler", func() { // Use status condition to be sure. for _, condn := range got.Status.Conditions { if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && - storage.ArtifactExist(*got.Status.Artifact) { + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { return true } } return false }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -860,12 +860,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -970,7 +970,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) }) }) @@ -1213,9 +1213,9 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -1232,12 +1232,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1255,12 +1255,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1278,12 +1278,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1299,12 +1299,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -1321,12 +1321,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e7d945a60..69a3656e9 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -99,7 +99,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Updating the chart index") @@ -112,7 +112,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) updated := &sourcev1.HelmRepository{} @@ -291,7 +291,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") @@ -385,7 +385,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go new file mode 100644 index 000000000..303eb7933 --- /dev/null +++ b/controllers/legacy_suite_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "math/rand" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/getter" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var ginkgoTestEnv *envtest.Environment +var ginkgoTestStorage *Storage + +var examplePublicKey []byte +var examplePrivateKey []byte +var exampleCA []byte + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger( + zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), + ) + + By("bootstrapping test environment") + t := true + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + ginkgoTestEnv = &envtest.Environment{ + UseExistingCluster: &t, + } + } else { + ginkgoTestEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + } + + var err error + cfg, err = ginkgoTestEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = sourcev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + Expect(loadExampleKeys()).To(Succeed()) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") + + ginkgoTestStorage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&GitRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") + + err = (&HelmRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") + + err = (&HelmChartReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + + go func() { + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred()) + }() + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + if ginkgoTestStorage != nil { + err := os.RemoveAll(ginkgoTestStorage.BasePath) + Expect(err).NotTo(HaveOccurred()) + } + err := ginkgoTestEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func loadExampleKeys() (err error) { + examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + return err + } + examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + return err + } + exampleCA, err = os.ReadFile("testdata/certs/ca.pem") + return err +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/controllers/storage.go b/controllers/storage.go index d765e4303..448194150 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -53,7 +53,7 @@ type Storage struct { Timeout time.Duration `json:"timeout"` } -// NewStorage creates the storage helper for a given path and hostname +// NewStorage creates the storage helper for a given path and hostname. func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { return nil, fmt.Errorf("invalid dir path: %s", basePath) @@ -81,7 +81,11 @@ func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { if artifact.Path == "" { return } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) + format := "http://%s/%s" + if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { + format = "%s/%s" + } + artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) } // SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 5f5341155..9ba2c472b 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,173 +17,131 @@ limitations under the License. package controllers import ( + "fmt" "math/rand" - "net/http" "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage +const ( + timeout = 10 * time.Second + interval = 1 * time.Second +) -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte +var ( + testEnv *testenv.Environment + testStorage *Storage + testServer *testserver.ArtifactServer + testEventsH controller.Events + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte +) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) +func init() { + rand.Seed(time.Now().UnixNano()) } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) +func TestMain(m *testing.M) { + initTestTLS() - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + testServer, err = testserver.NewTempArtifactServer() + if err != nil { + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) + } + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) + if err != nil { + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) + } + + testEventsH = controller.MakeEvents(testEnv, "source-controller-test", nil) + testMetricsH = controller.MustMakeMetrics(testEnv) + + //if err := (&GitRepositoryReconciler{ + // Client: testEnv, + // Events: testEventsH, + // Metrics: testMetricsH, + // Storage: testStorage, + //}).SetupWithManager(testEnv); err != nil { + // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + //} go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } }() + <-testEnv.Manager.Elected() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + code := m.Run() - close(done) -}, 60) + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } -var _ = AfterSuite(func() { - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) -func init() { - rand.Seed(time.Now().UnixNano()) + os.Exit(code) } -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + panic(err) + } + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") if err != nil { - return err + panic(err) } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") if err != nil { - return err + panic(err) } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err } -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] +func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { + storage, err := NewStorage(s.Root(), s.URL(), timeout) + if err != nil { + return nil, err } - return string(b) + return storage, nil } diff --git a/docs/api/source.md b/docs/api/source.md index ba84a7c63..b917f9c71 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -313,10 +313,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference <td> <em>(Optional)</em> <p>The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.</p> +For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.</p> </td> </tr> <tr> @@ -372,7 +370,7 @@ GitRepositoryVerification </td> <td> <em>(Optional)</em> -<p>Verify OpenPGP signature for the Git commit HEAD points to.</p> +<p>Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.</p> </td> </tr> <tr> @@ -384,9 +382,8 @@ string </td> <td> <em>(Optional)</em> -<p>Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.</p> +<p>Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.</p> </td> </tr> <tr> @@ -398,7 +395,8 @@ bool </td> <td> <em>(Optional)</em> -<p>This flag tells the controller to suspend the reconciliation of this source.</p> +<p>Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.</p> </td> </tr> <tr> @@ -423,8 +421,7 @@ bool </td> <td> <em>(Optional)</em> -<p>When enabled, after the clone is created, initializes all submodules within, -using their default settings. +<p>When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.</p> </td> </tr> @@ -438,7 +435,8 @@ This option is available only when using the ‘go-git’ GitImplementat </em> </td> <td> -<p>Extra git repositories to map into the repository</p> +<p>Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.</p> </td> </tr> <tr> @@ -1349,10 +1347,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference <td> <em>(Optional)</em> <p>The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.</p> +For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.</p> </td> </tr> <tr> @@ -1408,7 +1404,7 @@ GitRepositoryVerification </td> <td> <em>(Optional)</em> -<p>Verify OpenPGP signature for the Git commit HEAD points to.</p> +<p>Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.</p> </td> </tr> <tr> @@ -1420,9 +1416,8 @@ string </td> <td> <em>(Optional)</em> -<p>Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.</p> +<p>Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.</p> </td> </tr> <tr> @@ -1434,7 +1429,8 @@ bool </td> <td> <em>(Optional)</em> -<p>This flag tells the controller to suspend the reconciliation of this source.</p> +<p>Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.</p> </td> </tr> <tr> @@ -1459,8 +1455,7 @@ bool </td> <td> <em>(Optional)</em> -<p>When enabled, after the clone is created, initializes all submodules within, -using their default settings. +<p>When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.</p> </td> </tr> @@ -1474,7 +1469,8 @@ This option is available only when using the ‘go-git’ GitImplementat </em> </td> <td> -<p>Extra git repositories to map into the repository</p> +<p>Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.</p> </td> </tr> <tr> @@ -1547,8 +1543,7 @@ string </td> <td> <em>(Optional)</em> -<p>URL is the download link for the artifact output of the last repository -sync.</p> +<p>URL is the download link for the artifact output of the last repository sync.</p> </td> </tr> <tr> @@ -1623,7 +1618,7 @@ string </em> </td> <td> -<p>Mode describes what git object should be verified, currently (‘head’).</p> +<p>Mode describes what Git object should be verified, currently (‘head’).</p> </td> </tr> <tr> @@ -1636,7 +1631,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference </em> </td> <td> -<p>The secret name containing the public keys of all trusted Git authors.</p> +<p>SecretRef containing the public keys of all trusted Git authors.</p> </td> </tr> </tbody> diff --git a/go.mod b/go.mod index 775b6ba79..af3dd099d 100644 --- a/go.mod +++ b/go.mod @@ -15,13 +15,14 @@ require ( github.com/cyphar/filepath-securejoin v0.2.2 github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/fluxcd/pkg/apis/meta v0.10.1 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 github.com/fluxcd/pkg/gittestserver v0.4.2 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.2.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.12.0 + github.com/fluxcd/pkg/runtime v0.13.0-rc.3 github.com/fluxcd/pkg/ssh v0.1.0 + github.com/fluxcd/pkg/testserver v0.1.0 github.com/fluxcd/pkg/untar v0.1.0 github.com/fluxcd/pkg/version v0.1.0 github.com/fluxcd/source-controller/api v0.19.1 diff --git a/go.sum b/go.sum index 75f12b595..424c282d7 100644 --- a/go.sum +++ b/go.sum @@ -269,9 +269,8 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.1 h1:biCgZMjpDSv3Q4mZPikUJILx3t2MuNXR4Oa5jRQxaNQ= github.com/fluxcd/pkg/apis/acl v0.0.1/go.mod h1:y3qOXUFObVWk7jzOjubMnr/u18j1kCeSi6olycnxr/E= -github.com/fluxcd/pkg/apis/meta v0.10.0/go.mod h1:CW9X9ijMTpNe7BwnokiUOrLl/h13miwVr/3abEQLbKE= -github.com/fluxcd/pkg/apis/meta v0.10.1 h1:zISenRlqNG7WK8TP3HxZTvv+1Z7JZOUIQvZrOr6pQ2w= -github.com/fluxcd/pkg/apis/meta v0.10.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 h1:RHHrztAFv9wmjM+Pk7Svt1UdD+1SdnQSp76MWFiM7Hg= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= github.com/fluxcd/pkg/gittestserver v0.4.2 h1:XqoiemTnnUNldnOw8N7OTdalu2iZp1FTRhp9uUauDJQ= github.com/fluxcd/pkg/gittestserver v0.4.2/go.mod h1:hUPx21fe/6oox336Wih/XF1fnmzLmptNMOvATbTZXNY= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -280,8 +279,8 @@ github.com/fluxcd/pkg/helmtestserver v0.2.0 h1:cE7YHDmrWI0hr9QpaaeQ0vQ16Z0IiqZKi github.com/fluxcd/pkg/helmtestserver v0.2.0/go.mod h1:Yie8n7xuu5Nvf1Q7302LKsubJhWpwzCaK0rLJvmF7aI= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.12.0 h1:BPZZ8bBkimpqGAPXqOf3LTaw+tcw6HgbWyCuzbbsJGs= -github.com/fluxcd/pkg/runtime v0.12.0/go.mod h1:EyaTR2TOYcjL5U//C4yH3bt2tvTgIOSXpVRbWxUn/C4= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3 h1:VxtmEL/m3/9wJBhhhWQ48fz8m93B7UiyVi5cXYbiy3E= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3/go.mod h1:5ioX9wb63+RUvHBdjRsFG4uYn6Ll/Yoa7Ema6XKIIuQ= github.com/fluxcd/pkg/ssh v0.1.0 h1:cym2bqiT4IINOdLV0J6GYxer16Ii/7b2+RlK3CG+CnA= github.com/fluxcd/pkg/ssh v0.1.0/go.mod h1:KUuVhaB6AX3IHTGCd3Ti/nesn5t1Nz4zCThFkkjHctM= github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= @@ -1125,6 +1124,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1262,6 +1262,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1541,33 +1542,32 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= -k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= -k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4= k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= -k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= @@ -1594,8 +1594,9 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc= sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ= From 65dd61ebf213949c86b26b3ab976a3546a0632b7 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 12:54:45 +0200 Subject: [PATCH 06/53] Introduce `artifactSet` to replace `hasArtifactUpdated` NOTE: Remove `hasArtifactUpdated` in the future once it's no longer used. Signed-off-by: Hidde Beydals <hello@hidde.co> --- controllers/artifact.go | 38 ++++++++++++++++++++++++ controllers/artifact_test.go | 56 ++++++++++++++++++++++-------------- 2 files changed, 73 insertions(+), 21 deletions(-) diff --git a/controllers/artifact.go b/controllers/artifact.go index 0e16fd03c..fa21bd0a4 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,9 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +type artifactSet []*sourcev1.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} + // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts +// NOTE: artifactSet is a replacement for this. Remove this once it's not used +// anywhere. func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { if len(current) != len(updated) { return true diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 959661615..935c93bf7 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -1,26 +1,40 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "testing" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestHasUpdated(t *testing.T) { +func Test_artifactSet_Diff(t *testing.T) { tests := []struct { name string - current []*sourcev1.Artifact - updated []*sourcev1.Artifact + current artifactSet + updated artifactSet expected bool }{ { - name: "not updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, no diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -28,13 +42,13 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "bar", }, @@ -42,8 +56,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "not updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, no diff", + current: artifactSet{ { Revision: "foo", }, @@ -51,7 +65,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -62,8 +76,8 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, diff", + current: artifactSet{ { Revision: "foo", }, @@ -71,7 +85,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -82,8 +96,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "updated different artifact count", - current: []*sourcev1.Artifact{ + name: "different artifact count", + current: artifactSet{ { Revision: "foo", }, @@ -91,7 +105,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -101,7 +115,7 @@ func TestHasUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := hasArtifactUpdated(tt.current, tt.updated) + result := tt.current.Diff(tt.updated) if result != tt.expected { t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) } From c27850118068fc456c3b88c8f95dfe4c5f8d3687 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 21:49:03 +0530 Subject: [PATCH 07/53] source: Add `GetRequeueAfter` The problem with `GetInterval()` was that the returned type was of `metav1.Duration`, while almost anywhere it was used, a type of `time.Duration` was requested. The result of this was that we had to call `GetInterval().Duration` all the time, which would become a bit cumbersome after awhile. To prevent this, we introduce a new `GetRequeueAfter() time.Duration` method, which both results the right type, and bears a name that is easier to remember where the value is used most; while setting the `Result.RequeueAfter` during reconcile operations. The introduction of this method deprecates `GetInterval()`, which should be removed in a future MINOR release. Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 8 ++++++++ api/v1beta1/gitrepository_types.go | 8 ++++++++ api/v1beta1/helmchart_types.go | 8 ++++++++ api/v1beta1/helmrepository_types.go | 8 ++++++++ api/v1beta1/source.go | 5 +++++ 5 files changed, 37 insertions(+) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index b2b2d26e7..19d796c4b 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -169,7 +171,13 @@ func (in *Bucket) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in Bucket) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index bd305eddf..af44858bd 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -260,7 +262,13 @@ func (in *GitRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in GitRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 3228be1c2..94231ed7c 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -196,7 +198,13 @@ func (in *HelmChart) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 63f59b161..dcb69a20b 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -157,7 +159,13 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/source.go b/api/v1beta1/source.go index 5c10d00fc..f22780dd1 100644 --- a/api/v1beta1/source.go +++ b/api/v1beta1/source.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,9 +31,12 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + // GetRequeueAfter returns the duration after which the source must be reconciled again. + GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. GetArtifact() *Artifact // GetInterval returns the interval at which the source is updated. + // Deprecated: use GetRequeueAfter instead. GetInterval() metav1.Duration } From 2d88469d51f00522d2e1ae03b16867cb726dc4f6 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 22:30:20 +0530 Subject: [PATCH 08/53] Use new events and metrics helpers in main.go Signed-off-by: Sunny <darkowlzz@protonmail.com> --- main.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/main.go b/main.go index 67f00a920..73f015456 100644 --- a/main.go +++ b/main.go @@ -33,13 +33,12 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" @@ -114,6 +113,7 @@ func main() { clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(logger.NewLogger(logOptions)) @@ -132,9 +132,6 @@ func main() { } } - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -163,6 +160,12 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + eventsH := helper.MakeEvents(mgr, controllerName, eventRecorder) + metricsH := helper.MustMakeMetrics(mgr) + + // NOTE: Temporarily, to be able to keep reconcilers-dev branches in sync. + _ = eventsH + if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) } @@ -174,7 +177,7 @@ func main() { Storage: storage, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -189,7 +192,7 @@ func main() { Getters: getters, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -203,7 +206,7 @@ func main() { Getters: getters, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -216,7 +219,7 @@ func main() { Storage: storage, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From e7f0406e05fa35bb4111f90a5641f7e5f46dd5aa Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 25 Nov 2021 00:03:11 +0530 Subject: [PATCH 09/53] Move Artifact conditions to conditions Also, introduce FetchFailedCondition for generic fetch failures. Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/condition_types.go | 16 ++++++++++++++++ api/v1beta1/gitrepository_types.go | 8 -------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/api/v1beta1/condition_types.go b/api/v1beta1/condition_types.go index 4077a2ab6..1a9db62ad 100644 --- a/api/v1beta1/condition_types.go +++ b/api/v1beta1/condition_types.go @@ -18,6 +18,22 @@ package v1beta1 const SourceFinalizer = "finalizers.fluxcd.io" +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the + // Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" +) + const ( // URLInvalidReason represents the fact that a given source has an invalid URL. URLInvalidReason string = "URLInvalid" diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index af44858bd..21449602a 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -38,10 +38,6 @@ const ( ) const ( - // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactUnavailableCondition string = "ArtifactUnavailable" - // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. @@ -55,10 +51,6 @@ const ( // exist, or does not have an Artifact. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. IncludeUnavailableCondition string = "IncludeUnavailable" - - // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactOutdatedCondition string = "ArtifactOutdated" ) // GitRepositorySpec defines the desired state of a Git repository. From a722ec76d8ac9f63e4280e937c309a5461f20915 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Fri, 26 Nov 2021 00:18:08 +0530 Subject: [PATCH 10/53] Add gomega matcher for artifact Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- controllers/artifact_matchers_test.go | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 controllers/artifact_matchers_test.go diff --git a/controllers/artifact_matchers_test.go b/controllers/artifact_matchers_test.go new file mode 100644 index 000000000..63ff81ced --- /dev/null +++ b/controllers/artifact_matchers_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *sourcev1.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*sourcev1.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} From 0ae7758e9f97afd7416fb4df073fa0fe317cee72 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> Date: Tue, 7 Dec 2021 08:17:56 +0800 Subject: [PATCH 11/53] Fixes PR comments Signed-off-by: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> --- controllers/bucket_controller.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 658aaf501..ecf1bacb7 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -20,7 +20,6 @@ import ( "context" "crypto/sha1" "fmt" - "io/ioutil" "net/url" "os" "path/filepath" @@ -458,7 +457,7 @@ func (r *BucketReconciler) reconcileWithAzureBlob(ctx context.Context, bucket so return fmt.Errorf("failed to create local directory '%s': %w", blobURL, err) } - err = ioutil.WriteFile(localPath, content, 0600) + err = os.WriteFile(localPath, content, 0600) return err } @@ -483,9 +482,9 @@ func (r *BucketReconciler) reconcileWithAzureBlob(ctx context.Context, bucket so matcher := sourceignore.NewMatcher(ps) // download blob content - marker := azblob.Marker{Val: nil} - for true { - resp, err := blobContainer.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{ + marker := azblob.Marker{} + for { + resp, err := blobContainer.ListBlobsFlatSegment(ctxTimeout, marker, azblob.ListBlobsSegmentOptions{ Prefix: "", }) if err != nil { @@ -496,7 +495,7 @@ func (r *BucketReconciler) reconcileWithAzureBlob(ctx context.Context, bucket so break } - var eg errgroup.Group + eg, ctxErr := errgroup.WithContext(ctxTimeout) for i := range resp.Segment.BlobItems { object := resp.Segment.BlobItems[i] eg.Go(func() error { @@ -509,7 +508,7 @@ func (r *BucketReconciler) reconcileWithAzureBlob(ctx context.Context, bucket so } localPath := filepath.Join(tempDir, object.Name) - if err := downloadBlobToLocalFile(ctxTimeout, *azBlobPipeline, blobContainer.URL(), object.Name, localPath); err != nil { + if err := downloadBlobToLocalFile(ctxErr, *azBlobPipeline, blobContainer.URL(), object.Name, localPath); err != nil { return fmt.Errorf("failed to download blob items from blob container '%s': %w", blobContainerURLString, err) } From f1302d8c9c2fa323d711c5a26594014ce4a9b592 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 22:08:52 +0530 Subject: [PATCH 12/53] bucket: Replace GetInterval() with GetRequeueAfter() Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 86911102e..511c74432 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -171,10 +171,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", time.Since(start).String(), - bucket.GetInterval().Duration.String(), + bucket.GetRequeueAfter().String(), )) - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil } func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { From 795b83d897a0dfdbf1dd07598576d2d126190fb3 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Sat, 31 Jul 2021 03:58:43 +0200 Subject: [PATCH 13/53] Rewrite `BucketReconciler` to new standards This commit rewrites the `BucketReconciler` to new standards, while implementing the newly introduced Condition types, and trying to adhere better to Kubernetes API conventions. More specifically it introduces: - Implementation of more explicit Condition types to highlight abnormalities. - Extensive usage of the `conditions` subpackage from `runtime`. - Better and more conflict-resilient (status)patching of reconciled objects using the `patch` subpackage from runtime. - Proper implementation of kstatus' `Reconciling` and `Stalled` conditions. - Refactor of reconciler logic, including more efficient detection of changes to bucket objects by making use of the etag data available, and downloading of object files in parallel with a limited number of workers (4). - Integration tests that solely rely on `testenv` and do not use Ginkgo. There are a couple of TODOs marked in-code, these are suggestions for the future and should be non-blocking. In addition to the TODOs, more complex and/or edge-case test scenarios may be added as well. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 60 +- controllers/bucket_controller.go | 959 +++++++++++++++----------- controllers/bucket_controller_test.go | 671 +++++++++++++++++- controllers/suite_test.go | 9 + main.go | 10 +- 5 files changed, 1235 insertions(+), 474 deletions(-) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 19d796c4b..815d5a5d9 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -19,12 +19,10 @@ package v1beta1 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -32,6 +30,19 @@ const ( BucketKind = "Bucket" ) +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" +) + +const ( + // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + DownloadFailedCondition string = "DownloadFailed" +) + // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). @@ -85,12 +96,6 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } -const ( - GenericBucketProvider string = "generic" - AmazonBucketProvider string = "aws" - GoogleBucketProvider string = "gcp" -) - // BucketStatus defines the observed state of a bucket type BucketStatus struct { // ObservedGeneration is the last observed generation. @@ -122,45 +127,6 @@ const ( BucketOperationFailedReason string = "BucketOperationFailed" ) -// BucketProgressing resets the conditions of the Bucket to metav1.Condition of -// type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason -// reason and message. It returns the modified Bucket. -func BucketProgressing(bucket Bucket) Bucket { - bucket.Status.ObservedGeneration = bucket.Generation - bucket.Status.URL = "" - bucket.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return bucket -} - -// BucketReady sets the given Artifact and URL on the Bucket and sets the -// meta.ReadyCondition to 'True', with the given reason and message. It returns -// the modified Bucket. -func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { - bucket.Status.Artifact = &artifact - bucket.Status.URL = url - conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with -// the given reason and message. It returns the modified Bucket. -func BucketNotReady(bucket Bucket, reason, message string) Bucket { - conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func BucketReadyMessage(bucket Bucket) string { - if c := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in Bucket) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 511c74432..e46c8b3f9 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -18,25 +18,26 @@ package controllers import ( "context" - "crypto/sha1" + "crypto/sha256" "fmt" "os" "path/filepath" + "sort" "strings" "time" - "github.com/go-logr/logr" + gcpstorage "cloud.google.com/go/storage" + "github.com/fluxcd/source-controller/pkg/gcp" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" + kerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -44,10 +45,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" "github.com/fluxcd/source-controller/pkg/sourceignore" @@ -61,11 +63,10 @@ import ( // BucketReconciler reconciles a Bucket object type BucketReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + helper.Events + helper.Metrics + + Storage *Storage } type BucketReconcilerOptions struct { @@ -84,521 +85,683 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc Complete(r) } -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, bucket) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } - - // Return early if the object is suspended. - if bucket.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize the Ready condition based on abnormalities that may have been observed + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } + } + + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + return ctrl.Result{Requeue: true}, nil } - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } - r.recordReadiness(ctx, reconciledBucket) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - bucket.GetRequeueAfter().String(), - )) - return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil + // Reconcile actual object + return r.reconcile(ctx, obj) } -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - var err error - var sourceBucket sourcev1.Bucket - tempDir, err := os.MkdirTemp("", bucket.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer os.RemoveAll(tempDir) - if bucket.Spec.Provider == sourcev1.GoogleBucketProvider { - sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } else { - sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) - } - return bucket, nil + // Reconcile the storage data + if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { + return result, err } - // create artifact dir - err = r.Storage.MkdirAll(artifact) + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) + return ctrl.Result{}, err } + defer os.RemoveAll(tmpDir) - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the source from upstream + var artifact sourcev1.Artifact + if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err } - defer unlock() - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the artifact to storage + if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { + return result, err } - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil +} - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") + return ctrl.Result{Requeue: true}, nil + } + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { + var secret corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + if err := r.Get(ctx, secretName, &secret); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + // Return error as the world as observed may change + return ctrl.Result{}, err + } } - // Record deleted status - r.recordReadiness(ctx, bucket) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err + switch obj.Spec.Provider { + case sourcev1.GoogleBucketProvider: + return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + default: + return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil } -// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket -// using a gcp client -func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - log := logr.FromContext(ctx) - gcpClient, err := r.authGCP(ctx, bucket) +// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + // Build the client with the configuration from the object and secret + s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } - defer gcpClient.Close(log) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } - // Look for file with ignore rules first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { - if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { + if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil) - // download bucket content - for { - object, err := objects.Next() - if err == gcp.IteratorDone { - break - } - if err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ + Recursive: true, + UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), + }) { + if err = object.Err; err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { continue } - - if matcher.Match(strings.Split(object.Name, "/"), false) { + // Ignore matches + if matcher.Match(strings.Split(object.Key, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Name) - if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + index[object.Key] = object.ETag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath, minio.GetObjectOptions{}); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return sourcev1.Bucket{}, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// reconcileWithMinio handles getting objects from an S3 compatible bucket -// using a minio client -func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - s3Client, err := r.authMinio(ctx, bucket) +// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } + defer gcpClient.Close(ctrl.LoggerFrom(ctx)) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } // Look for file with ignore rules first - // NB: S3 has flat filepath keys making it impossible to look - // for files in "subdirectories" without building up a tree first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { - if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if err != gcpstorage.ErrObjectNotExist { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) + for { + object, err := objects.Next() + if err != nil { + if err == gcp.IteratorDone { + break + } + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { continue } - if matcher.Match(strings.Split(object.Key, "/"), false) { + if matcher.Match(strings.Split(object.Name, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } + index[object.Name] = object.Etag } - return sourcev1.Bucket{}, nil -} -// authGCP creates a new Google Cloud Platform storage client -// to interact with the storage service. -func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { - var client *gcp.GCPClient - var err error - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { - return nil, err - } - client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) - if err != nil { - return nil, err - } - } else { - client, err = gcp.NewClient(ctx) - if err != nil { - return nil, err + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return client, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// authMinio creates a new Minio client to interact with S3 -// compatible storage services. -func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact does not differ from the object's current, it returns early. +// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is +// updated to its path. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact() != nil { + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + } + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "Stored artifact for revision '%s'", artifact.Revision) + } + }() + + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") + return ctrl.Result{}, err + } else if !f.IsDir() { + err := fmt.Errorf("source path '%s' is not a directory", dir) + ctrl.LoggerFrom(ctx).Error(err, "invalid target path") + return ctrl.Result{}, err } - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - - accesskey := "" - secretkey := "" - if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) - } - if accesskey == "" || secretkey == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + return ctrl.Result{}, err } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + return ctrl.Result{}, err + } + defer unlock() - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, nil); err != nil { + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + "Unable to archive artifact to storage: %s", err) + return ctrl.Result{}, err } + r.Events.EventWithMetaf(ctx, obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, events.EventSeverityInfo, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) - return minio.New(bucket.Spec.Endpoint, &opt) -} + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() -// checksum calculates the SHA1 checksum of the given root directory. -// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the -// list with relative file paths and their checksums. -func (r *BucketReconciler) checksum(root string) (string, error) { - sum := sha1.New() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - data, err := os.ReadFile(path) - if err != nil { - return err - } - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath))) - return nil - }); err != nil { - return "", err + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.Events.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + "Failed to update status URL symlink: %s", err) } - return fmt.Sprintf("%x", sum.Sum(nil)), nil + if url != "" { + obj.Status.URL = url + } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true - } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true +// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err } - return bucket, false + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil } -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) +// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + r.Eventf(ctx, obj, events.EventSeverityError, "GarbageCollectionFailed", + "Garbage collection for deleted resource failed: %s", err) + return err + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(ctx, obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", + "Garbage collected artifacts for deleted resource") + return nil } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + r.Eventf(ctx, obj, events.EventSeverityError, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) + return err + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(ctx, obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", "Garbage collected old artifacts") } return nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := logr.FromContext(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) +// buildMinioClient constructs a minio.Client with the data from the given object and Secret. +// It returns an error if the Secret does not have the required fields, or if there is no credential handler +// configured. +func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) { + opts := minio.Options{ + Region: obj.Spec.Region, + Secure: !obj.Spec.Insecure, } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to send event") - return + if secret != nil { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) } + if accessKey == "" || secretKey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opts.Creds = credentials.NewStaticV4(accessKey, secretKey, "") + } else if obj.Spec.Provider == sourcev1.AmazonBucketProvider { + opts.Creds = credentials.NewIAM("") } + return minio.New(obj.Spec.Endpoint, &opts) } -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := logr.FromContext(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) +// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) { + var client *gcp.GCPClient + var err error + if secret != nil { + if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { + return nil, err + } + client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) + if err != nil { + return nil, err + } } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) + client, err = gcp.NewClient(ctx) + if err != nil { + return nil, err + } } + return client, nil } -func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) { - if r.MetricsRecorder == nil { - return - } - log := logr.FromContext(ctx) +// etagIndex is an index of bucket keys and their Etag values. +type etagIndex map[string]string - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !bucket.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend) +// Revision calculates the SHA256 checksum of the index. +// The keys are sorted to ensure a stable order, and the SHA256 sum is then calculated for the string representations of +// the key/value pairs, each pair written on a newline +// The sum result is returned as a string. +func (i etagIndex) Revision() (string, error) { + keyIndex := make([]string, 0, len(i)) + for k := range i { + keyIndex = append(keyIndex, k) } -} - -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err + sort.Strings(keyIndex) + sum := sha256.New() + for _, k := range keyIndex { + if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i[k]))); err != nil { + return "", err + } } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus - - return r.Status().Patch(ctx, &bucket, patch) + return fmt.Sprintf("%x", sum.Sum(nil)), nil } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 01ff20d87..02164412b 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -17,59 +17,567 @@ limitations under the License. package controllers import ( + "context" + "crypto/md5" + "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" + "path" "path/filepath" + "strings" "testing" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestBucketReconciler_checksum(t *testing.T) { +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := newS3Server("test-bucket") + s3Server.Objects = []*s3MockObject{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { - name string - beforeFunc func(root string) - want string - wantErr bool + name string + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + want ctrl.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string }{ { - name: "empty root", - want: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: ctrl.Result{Requeue: true}, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &BucketReconciler{ + Storage: testStorage, + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + got, err := r.reconcileStorage(context.TODO(), obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + }) + } +} + +func TestBucketReconciler_reconcileMinioSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3MockObject + middleware http.Handler + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + // TODO(hidde): test the lesser happy paths + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", + Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + }, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3MockServer + if tt.bucketName != "" { + server = newS3Server(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + artifact sourcev1.Artifact + beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ { - name: "with file", - beforeFunc: func(root string) { - mockFile(root, "a/b/c.txt", "a dummy string") + name: "artifact revision up-to-date", + artifact: sourcev1.Artifact{ + Revision: "existing", + }, + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Status.Artifact = &artifact + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, - want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0", }, { - name: "with file in different path", - beforeFunc: func(root string) { - mockFile(root, "a/b.txt", "a dummy string") + name: "dir path deleted", + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + _ = os.RemoveAll(dir) }, - want: "e28c62b5cc488849950c4355dddc5523712616d4", + wantErr: true, }, + //{ + // name: "dir path empty", + //}, + //{ + // name: "success", + // artifact: sourcev1.Artifact{ + // Revision: "existing", + // }, + // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // obj.Status.Artifact = &artifact + // }, + // assertConditions: []metav1.Condition{ + // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), + // }, + //}, + //{ + // name: "symlink", + //}, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - root, err := os.MkdirTemp("", "bucket-checksum-") - if err != nil { - t.Fatal(err) + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, } - defer os.RemoveAll(root) + if tt.beforeFunc != nil { - tt.beforeFunc(root) + tt.beforeFunc(obj, tt.artifact, tmpDir) } - got, err := (&BucketReconciler{}).checksum(root) + + r := &BucketReconciler{ + Storage: testStorage, + } + + got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, tt.artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func Test_etagIndex_Revision(t *testing.T) { + tests := []struct { + name string + list etagIndex + want string + wantErr bool + }{ + { + name: "index with items", + list: map[string]string{ + "one": "one", + "two": "two", + "three": "three", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "index with items in different order", + list: map[string]string{ + "three": "three", + "one": "one", + "two": "two", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "empty index", + list: map[string]string{}, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "nil index", + list: nil, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.list.Revision() if (err != nil) != tt.wantErr { - t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("checksum() got = %v, want %v", got, tt.want) + t.Errorf("revision() got = %v, want %v", got, tt.want) } }) } } +// helpers + func mockFile(root, path, content string) error { filePath := filepath.Join(root, path) if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { @@ -80,3 +588,120 @@ func mockFile(root, path, content string) error { } return nil } + +type s3MockObject struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +type s3MockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*s3MockObject +} + +func newS3Server(bucketName string) *s3MockServer { + s := &s3MockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle(fmt.Sprintf("/%s/", s.BucketName), http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *s3MockServer) Start() { + s.srv.Start() +} + +func (s *s3MockServer) Stop() { + s.srv.Close() +} + +func (s *s3MockServer) HTTPAddress() string { + return s.srv.URL +} + +func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + return + } + + q := r.URL.Query() + + if q["location"] != nil { + fmt.Fprint(w, ` +<?xml version="1.0" encoding="UTF-8"?> +<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">Europe</LocationConstraint> + `) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + <Contents> + <Key>%s</Key> + <LastModified>%s</LastModified> + <Size>%d</Size> + <ETag>"%b"</ETag> + <StorageClass>STANDARD</StorageClass> + </Contents>`, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` +<?xml version="1.0" encoding="UTF-8"?> +<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> + <Name>%s</Name> + <Prefix/> + <Marker/> + <KeyCount>%d</KeyCount> + <MaxKeys>1000</MaxKeys> + <IsTruncated>false</IsTruncated> + %s +</ListBucketResult> + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *s3MockObject + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%b\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + return + } + + w.Write(found.Content) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 9ba2c472b..23d649f37 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -98,6 +98,15 @@ func TestMain(m *testing.M) { // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) //} + if err := (&BucketReconciler{ + Client: testEnv, + Events: testEventsH, + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } + go func() { fmt.Println("Starting the test environment") if err := testEnv.Start(ctx); err != nil { diff --git a/main.go b/main.go index 73f015456..4e3a1f539 100644 --- a/main.go +++ b/main.go @@ -214,12 +214,10 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + Events: eventsH, + Metrics: metricsH, + Storage: storage, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From f82a906bff6ea2912d51383df73a5673ed70e716 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Mon, 9 Aug 2021 13:24:49 +0200 Subject: [PATCH 14/53] Consolidate condition types into `FetchFailed` This commit consolidates the `DownloadFailed` and `CheckoutFailed` Condition types into a new more generic `FetchFailed` type to simplify the API and observations by consumers. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 7 ---- controllers/bucket_controller.go | 46 +++++++++++++-------------- controllers/bucket_controller_test.go | 8 ++--- 3 files changed, 27 insertions(+), 34 deletions(-) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 815d5a5d9..38ea4ca7f 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -36,13 +36,6 @@ const ( GoogleBucketProvider string = "gcp" ) -const ( - // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the - // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - DownloadFailedCondition string = "DownloadFailed" -) - // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index e46c8b3f9..7513968fe 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -122,12 +122,12 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res meta.ReadyCondition, conditions.WithConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), conditions.WithNegativePolarityConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), ) @@ -137,7 +137,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res patch.WithOwnedConditions{ Conditions: []string{ sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, meta.ReadyCondition, meta.ReconcilingCondition, @@ -259,7 +259,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret -// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// fails, it records v1beta1.FetchFailedCondition=True and returns early. // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { @@ -270,7 +270,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu Name: obj.Spec.SecretRef.Name, } if err := r.Get(ctx, secretName, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) @@ -292,8 +292,8 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // // The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into // account. In case of an error during the download process (including transient errors), it records -// v1beta1.DownloadFailedCondition=True and returns early. -// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. // @@ -303,7 +303,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) @@ -316,12 +316,12 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source defer cancel() exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -332,7 +332,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source path := filepath.Join(dir, sourceignore.IgnoreFile) if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -341,7 +341,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -362,7 +362,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), }) { if err = object.Err; err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -415,7 +415,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -424,7 +424,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) @@ -446,7 +446,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 secret *corev1.Secret, dir string) (ctrl.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) @@ -460,12 +460,12 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 defer cancel() exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -476,7 +476,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 path := filepath.Join(dir, sourceignore.IgnoreFile) if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if err != gcpstorage.ErrObjectNotExist { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -485,7 +485,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -508,7 +508,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err == gcp.IteratorDone { break } - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -560,7 +560,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -569,7 +569,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 02164412b..50403fa7d 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -305,7 +305,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -323,7 +323,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), }, }, { @@ -334,7 +334,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), }, }, { @@ -345,7 +345,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), }, }, { From 6f34ea820b1c011b6189496ea58335a754ca4aac Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Tue, 10 Aug 2021 03:48:11 +0530 Subject: [PATCH 15/53] BucketReconciler: Add reconcileArtifact tests Add `BucketReconciler.reconcileArtifact` tests based on `GitRepositoryReconciler.reconcileArtifact` test cases. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller_test.go | 143 +++++++++++++++++++------- 1 file changed, 104 insertions(+), 39 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 50403fa7d..79da563f3 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -164,7 +164,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", - Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", URL: testStorage.Hostname + "/reconcile-storage/c.txt", }, assertPaths: []string{ @@ -197,7 +197,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { obj.Status.Artifact = &sourcev1.Artifact{ Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: "http://outdated.com/reconcile-storage/hostname.txt", } if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { @@ -214,7 +214,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", }, }, @@ -441,57 +441,113 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } func TestBucketReconciler_reconcileArtifact(t *testing.T) { + // testChecksum is the checksum value of the artifacts created in this + // test. + const testChecksum = "4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1" + tests := []struct { name string - artifact sourcev1.Artifact - beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) want ctrl.Result wantErr bool assertConditions []metav1.Condition }{ { - name: "artifact revision up-to-date", - artifact: sourcev1.Artifact{ - Revision: "existing", + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactUnavailableCondition after creating artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} }, - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Status.Artifact = &artifact + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) }, + want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, }, { - name: "dir path deleted", - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - _ = os.RemoveAll(dir) + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + wantErr: true, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + defer f.Close() + t.Expect(err).ToNot(HaveOccurred()) + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, wantErr: true, }, - //{ - // name: "dir path empty", - //}, - //{ - // name: "success", - // artifact: sourcev1.Artifact{ - // Revision: "existing", - // }, - // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - // obj.Status.Artifact = &artifact - // }, - // assertConditions: []metav1.Condition{ - // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), - // }, - //}, - //{ - // name: "symlink", - //}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + r := &BucketReconciler{ + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) @@ -501,27 +557,36 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { Kind: sourcev1.BucketKind, }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-bucket", + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", }, Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } - if tt.beforeFunc != nil { - tt.beforeFunc(obj, tt.artifact, tmpDir) - } + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + artifact.Checksum = testChecksum - r := &BucketReconciler{ - Storage: testStorage, + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, tmpDir) } - got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, tt.artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) - //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } }) } } From 317cd449677c9fb37cac35e43a4c33c3fc407fa2 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Sun, 28 Nov 2021 00:50:26 +0530 Subject: [PATCH 16/53] Add more reconcileMinioSource test cases Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 9 +-- controllers/bucket_controller_test.go | 79 +++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 4 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 7513968fe..35f9a29ff 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -263,13 +263,14 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { - var secret corev1.Secret + var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name, } - if err := r.Get(ctx, secretName, &secret); err != nil { + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, @@ -281,9 +282,9 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu switch obj.Spec.Provider { case sourcev1.GoogleBucketProvider: - return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + return r.reconcileGCPSource(ctx, obj, artifact, secret, dir) default: - return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) + return r.reconcileMinioSource(ctx, obj, artifact, secret, dir) } } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 79da563f3..0f0063993 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -380,6 +380,85 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + } + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 4814abcba1bf0458148bdc94e8dd471faf258b64 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Mon, 29 Nov 2021 17:42:41 +0530 Subject: [PATCH 17/53] Add bucket controller tests for reconcileGCPSource - Introduce mock GCP Server to test the gcp bucket client against mocked gcp server results. - Add tests for reconcileGCPSource(). - Patch GCPClient.BucketExists() to return no error when the bucket doesn't exists. This keeps the GCP client compatible with the minio client. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller_test.go | 414 ++++++++++++++++++++++++++ pkg/gcp/gcp.go | 3 +- 2 files changed, 416 insertions(+), 1 deletion(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 0f0063993..f7f0498ea 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "context" "crypto/md5" + "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -32,6 +33,7 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + raw "google.golang.org/api/storage/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,6 +48,9 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) +// Environment variable to set the GCP Storage host for the GCP client. +const ENV_GCP_STORAGE_HOST = "STORAGE_EMULATOR_HOST" + func TestBucketReconciler_Reconcile(t *testing.T) { g := NewWithT(t) @@ -519,6 +524,271 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } } +func TestBucketReconciler_reconcileGCPSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcpMockObject + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", + Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + } + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Test bucket object. + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GoogleBucketProvider, + }, + } + + // Set up the mock GCP bucket server. + server := newGCPServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(ENV_GCP_STORAGE_HOST, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(ENV_GCP_STORAGE_HOST)).ToNot(HaveOccurred()) + }() + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + func TestBucketReconciler_reconcileArtifact(t *testing.T) { // testChecksum is the checksum value of the artifacts created in this // test. @@ -849,3 +1119,147 @@ func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { w.Write(found.Content) } } + +type gcpMockObject struct { + Key string + ContentType string + Content []byte +} + +type gcpMockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Etag string + Objects []*gcpMockObject + Close func() +} + +func newGCPServer(bucketName string) *gcpMockServer { + s := &gcpMockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (gs *gcpMockServer) Start() { + gs.srv.Start() +} + +func (gs *gcpMockServer) Stop() { + gs.srv.Close() +} + +func (gs *gcpMockServer) HTTPAddress() string { + return gs.srv.URL +} + +func (gs *gcpMockServer) GetAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range gs.Objects { + objs.Items = append(objs.Items, getGCPObject(gs.BucketName, *o)) + } + return objs +} + +func (gs *gcpMockServer) GetObjectFile(key string) ([]byte, error) { + for _, o := range gs.Objects { + if o.Key == key { + return o.Content, nil + } + } + return nil, fmt.Errorf("not found") +} + +func (gs *gcpMockServer) handler(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.RequestURI, "/b/") { + // Handle the bucket info related queries. + if r.RequestURI == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName) { + // Return info about the bucket. + response := getGCPBucket(gs.BucketName, gs.Etag) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } else if strings.Contains(r.RequestURI, "/o/") { + // Return info about object in the bucket. + var obj *gcpMockObject + for _, o := range gs.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if r.RequestURI == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName, url.QueryEscape(o.Key)) { + obj = o + } + } + if obj != nil { + response := getGCPObject(gs.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else if strings.Contains(r.RequestURI, "/o?") { + // Return info about all the objects in the bucket. + response := gs.GetAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else { + // Handle object file query. + bucketPrefix := fmt.Sprintf("/%s/", gs.BucketName) + if strings.HasPrefix(r.RequestURI, bucketPrefix) { + // The URL path is of the format /<bucket>/included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + // Handle returning object file in a bucket. + response, err := gs.GetObjectFile(key) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCPObject(bucket string, obj gcpMockObject) *raw.Object { + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + } +} + +func getGCPBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/pkg/gcp/gcp.go b/pkg/gcp/gcp.go index 9127fcde3..f98e498c4 100644 --- a/pkg/gcp/gcp.go +++ b/pkg/gcp/gcp.go @@ -73,7 +73,8 @@ func ValidateSecret(secret map[string][]byte, name string) error { func (c *GCPClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) if err == gcpstorage.ErrBucketNotExist { - return false, err + // Not returning error to be compatible with minio's API. + return false, nil } if err != nil { return false, err From 625f850de52e6114cc3b9e8e0309e709e8cf355a Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 9 Dec 2021 03:42:54 +0530 Subject: [PATCH 18/53] bucket: Ignore patch error not found on delete Ignore "not found" error while patching when the delete timestamp is set. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 35f9a29ff..23acb3725 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -35,6 +35,7 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -167,6 +168,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Finally, patch the resource if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } retErr = kerrors.NewAggregate([]error{retErr, err}) } From 4dd779975f301354d3d6fc99aed129e22ce85895 Mon Sep 17 00:00:00 2001 From: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> Date: Sat, 11 Dec 2021 16:56:16 +0800 Subject: [PATCH 19/53] Update go.sum Signed-off-by: Zhongcheng Lao <Zhongcheng.Lao@microsoft.com> --- go.sum | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/go.sum b/go.sum index e75189c67..2fec06c10 100644 --- a/go.sum +++ b/go.sum @@ -93,16 +93,21 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -146,6 +151,7 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= @@ -280,6 +286,7 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= @@ -289,9 +296,11 @@ github.com/fluxcd/pkg/apis/acl v0.0.1 h1:biCgZMjpDSv3Q4mZPikUJILx3t2MuNXR4Oa5jRQ github.com/fluxcd/pkg/apis/acl v0.0.1/go.mod h1:y3qOXUFObVWk7jzOjubMnr/u18j1kCeSi6olycnxr/E= github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 h1:RHHrztAFv9wmjM+Pk7Svt1UdD+1SdnQSp76MWFiM7Hg= github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/gittestserver v0.4.2 h1:XqoiemTnnUNldnOw8N7OTdalu2iZp1FTRhp9uUauDJQ= github.com/fluxcd/pkg/gittestserver v0.4.2/go.mod h1:hUPx21fe/6oox336Wih/XF1fnmzLmptNMOvATbTZXNY= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= github.com/fluxcd/pkg/gitutil v0.1.0/go.mod h1:Ybz50Ck5gkcnvF0TagaMwtlRy3X3wXuiri1HVsK5id4= +github.com/fluxcd/pkg/helmtestserver v0.2.0 h1:cE7YHDmrWI0hr9QpaaeQ0vQ16Z0IiqZKiINDpqdY610= github.com/fluxcd/pkg/helmtestserver v0.2.0/go.mod h1:Yie8n7xuu5Nvf1Q7302LKsubJhWpwzCaK0rLJvmF7aI= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= @@ -299,6 +308,7 @@ github.com/fluxcd/pkg/runtime v0.13.0-rc.3 h1:VxtmEL/m3/9wJBhhhWQ48fz8m93B7UiyVi github.com/fluxcd/pkg/runtime v0.13.0-rc.3/go.mod h1:5ioX9wb63+RUvHBdjRsFG4uYn6Ll/Yoa7Ema6XKIIuQ= github.com/fluxcd/pkg/ssh v0.1.0 h1:cym2bqiT4IINOdLV0J6GYxer16Ii/7b2+RlK3CG+CnA= github.com/fluxcd/pkg/ssh v0.1.0/go.mod h1:KUuVhaB6AX3IHTGCd3Ti/nesn5t1Nz4zCThFkkjHctM= +github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= @@ -406,6 +416,7 @@ github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PL github.com/gobuffalo/logger v1.0.1/go.mod h1:2zbswyIUa45I+c+FLXuWl9zSWEiVuthsk8ze5s8JvPs= github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b36chA3Q= github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -562,6 +573,7 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -578,6 +590,7 @@ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.3.1 h1:aLN7YINNZ7cYOPK3QC83dbM6KT0NMqVMw961TqrejlE= github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -619,9 +632,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libgit2/git2go/v31 v31.6.1 h1:FnKHHDDBgltSsu9RpKuL4rSR8dQ1JTf9dfvFhZ1y7Aw= github.com/libgit2/git2go/v31 v31.6.1/go.mod h1:c/rkJcBcUFx6wHaT++UwNpKvIsmPNqCeQ/vzO4DrEec= @@ -675,6 +691,7 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= @@ -683,9 +700,11 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -712,6 +731,7 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -727,6 +747,7 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -755,6 +776,7 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= @@ -828,17 +850,21 @@ github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351 h1:HXr/qUllAWv9riaI4zh2eXWKmCSDqVS/XH1MRHLKRwk= github.com/rubenv/sql-migrate v0.0.0-20200616145509-8d140a17f351/go.mod h1:DCgfY80j8GYL7MLEfvcpSFvjD0L5yZq/aZUJmhZklyg= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -853,11 +879,13 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d h1:QKK1cJOPfb6nDDB8fC1l41/IcezASje2lsA13diVqfM= github.com/sosedoff/gitkit v0.2.1-0.20200818155723-72ebbcf5056d/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -1471,6 +1499,7 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1480,6 +1509,7 @@ gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -1498,6 +1528,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= @@ -1528,6 +1559,7 @@ k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= +k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4= k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= @@ -1555,6 +1587,7 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kubectl v0.21.0 h1:WZXlnG/yjcE4LWO2g6ULjFxtzK6H1TKzsfaBFuVIhNg= k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= From e9d53531af479770501b1f70b1439376aedf9d1b Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 11:17:22 +0200 Subject: [PATCH 20/53] Introduce more explicit Condition types This commit introduces new Condition types to the v1beta1 API, facilitating easier observation of (potentially) problematic state for end-users. - `ArtifactUnavailableCondition`: indicates there is no artifact available for the resource. This Condition should be set by the reconciler as soon as it observes the absence of an artifact for a source. - `CheckoutFailedCondition`: indicates a transient or persistent checkout failure. This Condition should be set by the reconciler as soon as it observes a Git checkout failure, including any prerequisites like the unavailability of the referenced Secret used for authentication. It should be deleted as soon as a successful checkout has been observed again. - `SourceVerifiedCondition`: indicates the integrity of the source has been verified. The Condition should be set to True or False by the reconciler based on the result of the integrity check. If there is no verification mode and/or secret configured, the Condition should be removed. - `IncludeUnavailableCondition`: indicates one of the referenced includes is not available. This Condition should for example be set by the reconciler when the include does not exist, or does not have an artifact. If the includes become available, it should be deleted. - `ArtifactOutdatedCondition`: indicates the current artifact of the source is outdated. This Condition should for example be set by the reconciler when it notices there is a newer revision for an artifact, or the previously included artifacts differ from the current available ones. The Condition should be removed after writing a new artifact to the storage. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/gitrepository_types.go | 57 ++++++++++++------- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 12 ++-- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index c1014e6b7..9344e3763 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -34,6 +34,30 @@ const ( LibGit2Implementation = "libgit2" ) +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + CheckoutFailedCondition string = "CheckoutFailed" + + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + + // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not + // exist, or does not have an Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" +) + // GitRepositorySpec defines the desired state of a Git repository. type GitRepositorySpec struct { // The repository URL, can be a HTTP/S or SSH address. @@ -42,10 +66,8 @@ type GitRepositorySpec struct { URL string `json:"url"` // The secret name containing the Git credentials. - // For HTTPS repositories the secret must contain username and password - // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For HTTPS repositories the secret must contain username and password fields. + // For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -63,16 +85,16 @@ type GitRepositorySpec struct { // +optional Reference *GitRepositoryRef `json:"ref,omitempty"` - // Verify OpenPGP signature for the Git commit HEAD points to. + // Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. // +optional Verification *GitRepositoryVerification `json:"verify,omitempty"` - // Ignore overrides the set of excluded patterns in the .sourceignore format - // (which is the same as .gitignore). If not provided, a default will be used, - // consult the documentation for your version to find out what those are. + // Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). + // If not provided, a default will be used, consult the documentation for your version to find out what those are. // +optional Ignore *string `json:"ignore,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this source. // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` @@ -84,13 +106,13 @@ type GitRepositorySpec struct { // +optional GitImplementation string `json:"gitImplementation,omitempty"` - // When enabled, after the clone is created, initializes all submodules within, - // using their default settings. + // When enabled, after the clone is created, initializes all submodules within, using their default settings. // This option is available only when using the 'go-git' GitImplementation. // +optional RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` - // Extra git repositories to map into the repository + // Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for + // this resource. Include []GitRepositoryInclude `json:"include,omitempty"` // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. @@ -144,11 +166,11 @@ type GitRepositoryRef struct { // GitRepositoryVerification defines the OpenPGP signature verification process. type GitRepositoryVerification struct { - // Mode describes what git object should be verified, currently ('head'). + // Mode describes what Git object should be verified, currently ('head'). // +kubebuilder:validation:Enum=head Mode string `json:"mode"` - // The secret name containing the public keys of all trusted Git authors. + // SecretRef containing the public keys of all trusted Git authors. SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` } @@ -162,8 +184,7 @@ type GitRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last repository - // sync. + // URL is the download link for the artifact output of the last repository sync. // +optional URL string `json:"url,omitempty"` @@ -179,12 +200,10 @@ type GitRepositoryStatus struct { } const ( - // GitOperationSucceedReason represents the fact that the git clone, pull - // and checkout operations succeeded. + // GitOperationSucceedReason represents the fact that the git clone, pull and checkout operations succeeded. GitOperationSucceedReason string = "GitOperationSucceed" - // GitOperationFailedReason represents the fact that the git clone, pull or - // checkout operations failed. + // GitOperationFailedReason represents the fact that the git clone, pull or checkout operations failed. GitOperationFailedReason string = "GitOperationFailed" ) diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 4f5de9a06..d3f644be1 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -76,7 +76,7 @@ spec: description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. type: string include: - description: Extra git repositories to map into the repository + description: Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for this resource. items: description: GitRepositoryInclude defines a source with a from and to path. properties: @@ -122,7 +122,7 @@ spec: type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain identity, identity.pub and known_hosts fields. + description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. properties: name: description: Name of the referent @@ -131,7 +131,7 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: Suspend tells the controller to suspend the reconciliation of this source. This flag tells the controller to suspend the reconciliation of this source. type: boolean timeout: default: 20s @@ -142,15 +142,15 @@ spec: pattern: ^(http|https|ssh):// type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points to. + description: Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. properties: mode: - description: Mode describes what git object should be verified, currently ('head'). + description: Mode describes what Git object should be verified, currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all trusted Git authors. + description: SecretRef containing the public keys of all trusted Git authors. properties: name: description: Name of the referent From 10a8cc4089f17102a28374a2c41e0ab99749782c Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 12:33:18 +0200 Subject: [PATCH 21/53] Implement new runtime interfaces, prepare testenv This commit ensures all API objects implement the interfaces used by the runtime package to work with conditions, etc., and prepares the test suite to work with the `pkg/runtime/testenv` wrapper. Changes are made in a backwards compatible way (that being: the existing code can still be build and works as expected), but without proper dependency boundaries. The result of this is that the API package temporary depends on the runtime package, which is resolved when all reconcilers have been refactored and the API package does no longer contain condition modifying functions. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/go.mod | 6 +- api/go.sum | 94 +++++++- api/v1beta1/bucket_types.go | 33 ++- api/v1beta1/gitrepository_types.go | 33 ++- api/v1beta1/helmchart_types.go | 35 ++- api/v1beta1/helmrepository_types.go | 33 ++- .../source.toolkit.fluxcd.io_buckets.yaml | 4 +- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 8 +- .../source.toolkit.fluxcd.io_helmcharts.yaml | 2 +- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 4 +- controllers/gitrepository_controller.go | 8 +- controllers/helmchart_controller_test.go | 116 +++++----- controllers/helmrepository_controller_test.go | 8 +- controllers/legacy_suite_test.go | 184 +++++++++++++++ controllers/storage.go | 8 +- controllers/suite_test.go | 218 +++++++----------- docs/api/source.md | 51 ++-- go.mod | 5 +- go.sum | 27 +-- 19 files changed, 578 insertions(+), 299 deletions(-) create mode 100644 controllers/legacy_suite_test.go diff --git a/api/go.mod b/api/go.mod index dbd78e413..2c6891e10 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,7 +4,11 @@ go 1.16 require ( github.com/fluxcd/pkg/apis/acl v0.0.1 - github.com/fluxcd/pkg/apis/meta v0.10.1 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 + // TODO(hidde): introduction of the runtime package is temporary, and the dependency should be removed as soon as + // all APIs have been updated to the runtime standards (more specifically; have dropped their condition modifying + // functions). + github.com/fluxcd/pkg/runtime v0.13.0-rc.3 k8s.io/apimachinery v0.22.2 sigs.k8s.io/controller-runtime v0.10.2 ) diff --git a/api/go.sum b/api/go.sum index 80a589759..33335ffb0 100644 --- a/api/go.sum +++ b/api/go.sum @@ -22,13 +22,17 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -52,6 +56,7 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= @@ -59,7 +64,9 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -67,17 +74,22 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -86,6 +98,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -98,13 +111,16 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.1 h1:biCgZMjpDSv3Q4mZPikUJILx3t2MuNXR4Oa5jRQxaNQ= github.com/fluxcd/pkg/apis/acl v0.0.1/go.mod h1:y3qOXUFObVWk7jzOjubMnr/u18j1kCeSi6olycnxr/E= -github.com/fluxcd/pkg/apis/meta v0.10.1 h1:zISenRlqNG7WK8TP3HxZTvv+1Z7JZOUIQvZrOr6pQ2w= -github.com/fluxcd/pkg/apis/meta v0.10.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 h1:RHHrztAFv9wmjM+Pk7Svt1UdD+1SdnQSp76MWFiM7Hg= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3 h1:VxtmEL/m3/9wJBhhhWQ48fz8m93B7UiyVi5cXYbiy3E= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3/go.mod h1:5ioX9wb63+RUvHBdjRsFG4uYn6Ll/Yoa7Ema6XKIIuQ= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -125,6 +141,7 @@ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -133,6 +150,7 @@ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwoh github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -145,6 +163,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -170,6 +189,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -192,28 +212,35 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -222,6 +249,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -230,6 +258,7 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -238,6 +267,7 @@ github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUB github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -263,10 +293,14 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -278,6 +312,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -296,17 +331,20 @@ github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -315,6 +353,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -324,20 +363,25 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -360,9 +404,12 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -379,14 +426,19 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -410,13 +462,17 @@ go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -459,6 +515,7 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -477,6 +534,7 @@ golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -493,12 +551,14 @@ golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210520170846-37e1c6afe023 h1:ADo5wSpq2gqaCGQWzk7S5vd//0iyyLeAratkEoG5dLE= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -526,6 +586,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -549,6 +610,7 @@ golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -561,6 +623,7 @@ golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2 h1:c8PlLMqBbOHoqtjteWm5/kbe6 golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -571,10 +634,13 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -617,12 +683,15 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -639,6 +708,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -662,6 +732,7 @@ google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -687,6 +758,7 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -694,6 +766,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -726,15 +799,23 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= +k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= +k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -744,17 +825,24 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc= sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 4df79c2e1..e4b4d6cdc 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -126,7 +127,7 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return bucket } @@ -136,14 +137,14 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) return bucket } @@ -158,22 +159,32 @@ func BucketReadyMessage(bucket Bucket) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *Bucket) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *Bucket) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *Bucket) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index 9344e3763..c2cfc8e4c 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -215,7 +216,7 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -226,7 +227,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -234,7 +235,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -249,22 +250,32 @@ func GitRepositoryReadyMessage(repository GitRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *GitRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *GitRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 51c04781d..78a20852b 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) // HelmChartKind is the string representation of a HelmChart. @@ -152,7 +153,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&chart, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return chart } @@ -162,7 +163,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&chart, meta.ReadyCondition, reason, message) return chart } @@ -170,7 +171,7 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&chart, meta.ReadyCondition, reason, message) return chart } @@ -185,22 +186,26 @@ func HelmChartReadyMessage(chart HelmChart) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { - return in.Status.Artifact +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions } -// GetStatusConditions returns a pointer to the Status.Conditions slice -func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { - return &in.Status.Conditions +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmChart) GetInterval() metav1.Duration { +// GetInterval returns the interval at which the source is reconciled. +func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + // GetValuesFiles returns a merged list of ValuesFiles. func (in *HelmChart) GetValuesFiles() []string { valuesFiles := in.Spec.ValuesFiles @@ -212,6 +217,12 @@ func (in *HelmChart) GetValuesFiles() []string { return valuesFiles } +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 0af0d4cf6..65b924042 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -113,7 +114,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -123,7 +124,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -131,7 +132,7 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -146,22 +147,32 @@ func HelmRepositoryReadyMessage(repository HelmRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *HelmRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index f613db849..7a26ead34 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -93,7 +93,7 @@ spec: description: The name of the secret containing authentication credentials for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -183,7 +183,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index d3f644be1..fd6c2138a 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -87,7 +87,7 @@ spec: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -125,7 +125,7 @@ spec: description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -153,7 +153,7 @@ spec: description: SecretRef containing the public keys of all trusted Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -264,7 +264,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index b45e88211..f0571e0b5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -200,7 +200,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index bcce23a7f..981a1f88d 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -75,7 +75,7 @@ spec: description: The name of the secret containing authentication credentials for the Helm repository. For HTTP/S basic auth the secret must contain username and password fields. For TLS the secret must contain a certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -167,7 +167,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 9dd92290f..b58188a96 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -122,7 +122,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // check dependencies if len(repository.Spec.Include) > 0 { if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()) + repository = sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()) if err := r.updateStatus(ctx, req, repository.Status); err != nil { log.Error(err, "unable to update status for dependency not ready") return ctrl.Result{Requeue: true}, err @@ -278,7 +278,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour var gr sourcev1.GitRepository err := r.Get(context.Background(), dName, &gr) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } includedArtifacts = append(includedArtifacts, gr.GetArtifact()) } @@ -323,11 +323,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour for i, incl := range repository.Spec.Include { toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } } diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index cb9838b15..f762fcd08 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -129,9 +129,9 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -146,7 +146,7 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) When("Setting valid valuesFiles attribute", func() { @@ -161,12 +161,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -184,12 +184,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -207,12 +207,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -228,12 +228,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -250,12 +250,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -271,12 +271,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -682,7 +682,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Committing a new version in the chart metadata") @@ -727,9 +727,9 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*now.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*now.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values).ToNot(BeNil()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) @@ -744,7 +744,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) @@ -762,12 +762,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -785,12 +785,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -808,12 +808,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -834,16 +834,16 @@ var _ = Describe("HelmChartReconciler", func() { // Use status condition to be sure. for _, condn := range got.Status.Conditions { if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && - storage.ArtifactExist(*got.Status.Artifact) { + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { return true } } return false }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -860,12 +860,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -970,7 +970,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) }) }) @@ -1213,9 +1213,9 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -1232,12 +1232,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1255,12 +1255,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1278,12 +1278,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1299,12 +1299,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -1321,12 +1321,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e7d945a60..69a3656e9 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -99,7 +99,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Updating the chart index") @@ -112,7 +112,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) updated := &sourcev1.HelmRepository{} @@ -291,7 +291,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") @@ -385,7 +385,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go new file mode 100644 index 000000000..303eb7933 --- /dev/null +++ b/controllers/legacy_suite_test.go @@ -0,0 +1,184 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "math/rand" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/getter" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var ginkgoTestEnv *envtest.Environment +var ginkgoTestStorage *Storage + +var examplePublicKey []byte +var examplePrivateKey []byte +var exampleCA []byte + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger( + zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), + ) + + By("bootstrapping test environment") + t := true + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + ginkgoTestEnv = &envtest.Environment{ + UseExistingCluster: &t, + } + } else { + ginkgoTestEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + } + + var err error + cfg, err = ginkgoTestEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = sourcev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + Expect(loadExampleKeys()).To(Succeed()) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") + + ginkgoTestStorage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&GitRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") + + err = (&HelmRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") + + err = (&HelmChartReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + + go func() { + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred()) + }() + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + if ginkgoTestStorage != nil { + err := os.RemoveAll(ginkgoTestStorage.BasePath) + Expect(err).NotTo(HaveOccurred()) + } + err := ginkgoTestEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func loadExampleKeys() (err error) { + examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + return err + } + examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + return err + } + exampleCA, err = os.ReadFile("testdata/certs/ca.pem") + return err +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/controllers/storage.go b/controllers/storage.go index d765e4303..448194150 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -53,7 +53,7 @@ type Storage struct { Timeout time.Duration `json:"timeout"` } -// NewStorage creates the storage helper for a given path and hostname +// NewStorage creates the storage helper for a given path and hostname. func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { return nil, fmt.Errorf("invalid dir path: %s", basePath) @@ -81,7 +81,11 @@ func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { if artifact.Path == "" { return } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) + format := "http://%s/%s" + if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { + format = "%s/%s" + } + artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) } // SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 5f5341155..9ba2c472b 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,173 +17,131 @@ limitations under the License. package controllers import ( + "fmt" "math/rand" - "net/http" "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage +const ( + timeout = 10 * time.Second + interval = 1 * time.Second +) -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte +var ( + testEnv *testenv.Environment + testStorage *Storage + testServer *testserver.ArtifactServer + testEventsH controller.Events + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte +) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) +func init() { + rand.Seed(time.Now().UnixNano()) } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) +func TestMain(m *testing.M) { + initTestTLS() - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + testServer, err = testserver.NewTempArtifactServer() + if err != nil { + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) + } + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) + if err != nil { + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) + } + + testEventsH = controller.MakeEvents(testEnv, "source-controller-test", nil) + testMetricsH = controller.MustMakeMetrics(testEnv) + + //if err := (&GitRepositoryReconciler{ + // Client: testEnv, + // Events: testEventsH, + // Metrics: testMetricsH, + // Storage: testStorage, + //}).SetupWithManager(testEnv); err != nil { + // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + //} go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - Expect(err).ToNot(HaveOccurred()) + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } }() + <-testEnv.Manager.Elected() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + code := m.Run() - close(done) -}, 60) + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } -var _ = AfterSuite(func() { - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) -func init() { - rand.Seed(time.Now().UnixNano()) + os.Exit(code) } -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + panic(err) + } + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") if err != nil { - return err + panic(err) } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") if err != nil { - return err + panic(err) } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err } -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] +func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { + storage, err := NewStorage(s.Root(), s.URL(), timeout) + if err != nil { + return nil, err } - return string(b) + return storage, nil } diff --git a/docs/api/source.md b/docs/api/source.md index ba84a7c63..b917f9c71 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -313,10 +313,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference <td> <em>(Optional)</em> <p>The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.</p> +For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.</p> </td> </tr> <tr> @@ -372,7 +370,7 @@ GitRepositoryVerification </td> <td> <em>(Optional)</em> -<p>Verify OpenPGP signature for the Git commit HEAD points to.</p> +<p>Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.</p> </td> </tr> <tr> @@ -384,9 +382,8 @@ string </td> <td> <em>(Optional)</em> -<p>Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.</p> +<p>Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.</p> </td> </tr> <tr> @@ -398,7 +395,8 @@ bool </td> <td> <em>(Optional)</em> -<p>This flag tells the controller to suspend the reconciliation of this source.</p> +<p>Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.</p> </td> </tr> <tr> @@ -423,8 +421,7 @@ bool </td> <td> <em>(Optional)</em> -<p>When enabled, after the clone is created, initializes all submodules within, -using their default settings. +<p>When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.</p> </td> </tr> @@ -438,7 +435,8 @@ This option is available only when using the ‘go-git’ GitImplementat </em> </td> <td> -<p>Extra git repositories to map into the repository</p> +<p>Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.</p> </td> </tr> <tr> @@ -1349,10 +1347,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference <td> <em>(Optional)</em> <p>The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.</p> +For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.</p> </td> </tr> <tr> @@ -1408,7 +1404,7 @@ GitRepositoryVerification </td> <td> <em>(Optional)</em> -<p>Verify OpenPGP signature for the Git commit HEAD points to.</p> +<p>Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.</p> </td> </tr> <tr> @@ -1420,9 +1416,8 @@ string </td> <td> <em>(Optional)</em> -<p>Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.</p> +<p>Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.</p> </td> </tr> <tr> @@ -1434,7 +1429,8 @@ bool </td> <td> <em>(Optional)</em> -<p>This flag tells the controller to suspend the reconciliation of this source.</p> +<p>Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.</p> </td> </tr> <tr> @@ -1459,8 +1455,7 @@ bool </td> <td> <em>(Optional)</em> -<p>When enabled, after the clone is created, initializes all submodules within, -using their default settings. +<p>When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.</p> </td> </tr> @@ -1474,7 +1469,8 @@ This option is available only when using the ‘go-git’ GitImplementat </em> </td> <td> -<p>Extra git repositories to map into the repository</p> +<p>Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.</p> </td> </tr> <tr> @@ -1547,8 +1543,7 @@ string </td> <td> <em>(Optional)</em> -<p>URL is the download link for the artifact output of the last repository -sync.</p> +<p>URL is the download link for the artifact output of the last repository sync.</p> </td> </tr> <tr> @@ -1623,7 +1618,7 @@ string </em> </td> <td> -<p>Mode describes what git object should be verified, currently (‘head’).</p> +<p>Mode describes what Git object should be verified, currently (‘head’).</p> </td> </tr> <tr> @@ -1636,7 +1631,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference </em> </td> <td> -<p>The secret name containing the public keys of all trusted Git authors.</p> +<p>SecretRef containing the public keys of all trusted Git authors.</p> </td> </tr> </tbody> diff --git a/go.mod b/go.mod index f781aa473..e43b24a53 100644 --- a/go.mod +++ b/go.mod @@ -15,13 +15,14 @@ require ( github.com/cyphar/filepath-securejoin v0.2.2 github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect - github.com/fluxcd/pkg/apis/meta v0.10.1 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 github.com/fluxcd/pkg/gittestserver v0.4.2 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.2.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.12.0 + github.com/fluxcd/pkg/runtime v0.13.0-rc.3 github.com/fluxcd/pkg/ssh v0.1.0 + github.com/fluxcd/pkg/testserver v0.1.0 github.com/fluxcd/pkg/untar v0.1.0 github.com/fluxcd/pkg/version v0.1.0 github.com/fluxcd/source-controller/api v0.19.2 diff --git a/go.sum b/go.sum index 0acdc7568..d9ed22c79 100644 --- a/go.sum +++ b/go.sum @@ -269,9 +269,8 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.1 h1:biCgZMjpDSv3Q4mZPikUJILx3t2MuNXR4Oa5jRQxaNQ= github.com/fluxcd/pkg/apis/acl v0.0.1/go.mod h1:y3qOXUFObVWk7jzOjubMnr/u18j1kCeSi6olycnxr/E= -github.com/fluxcd/pkg/apis/meta v0.10.0/go.mod h1:CW9X9ijMTpNe7BwnokiUOrLl/h13miwVr/3abEQLbKE= -github.com/fluxcd/pkg/apis/meta v0.10.1 h1:zISenRlqNG7WK8TP3HxZTvv+1Z7JZOUIQvZrOr6pQ2w= -github.com/fluxcd/pkg/apis/meta v0.10.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 h1:RHHrztAFv9wmjM+Pk7Svt1UdD+1SdnQSp76MWFiM7Hg= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= github.com/fluxcd/pkg/gittestserver v0.4.2 h1:XqoiemTnnUNldnOw8N7OTdalu2iZp1FTRhp9uUauDJQ= github.com/fluxcd/pkg/gittestserver v0.4.2/go.mod h1:hUPx21fe/6oox336Wih/XF1fnmzLmptNMOvATbTZXNY= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -280,8 +279,8 @@ github.com/fluxcd/pkg/helmtestserver v0.2.0 h1:cE7YHDmrWI0hr9QpaaeQ0vQ16Z0IiqZKi github.com/fluxcd/pkg/helmtestserver v0.2.0/go.mod h1:Yie8n7xuu5Nvf1Q7302LKsubJhWpwzCaK0rLJvmF7aI= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.12.0 h1:BPZZ8bBkimpqGAPXqOf3LTaw+tcw6HgbWyCuzbbsJGs= -github.com/fluxcd/pkg/runtime v0.12.0/go.mod h1:EyaTR2TOYcjL5U//C4yH3bt2tvTgIOSXpVRbWxUn/C4= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3 h1:VxtmEL/m3/9wJBhhhWQ48fz8m93B7UiyVi5cXYbiy3E= +github.com/fluxcd/pkg/runtime v0.13.0-rc.3/go.mod h1:5ioX9wb63+RUvHBdjRsFG4uYn6Ll/Yoa7Ema6XKIIuQ= github.com/fluxcd/pkg/ssh v0.1.0 h1:cym2bqiT4IINOdLV0J6GYxer16Ii/7b2+RlK3CG+CnA= github.com/fluxcd/pkg/ssh v0.1.0/go.mod h1:KUuVhaB6AX3IHTGCd3Ti/nesn5t1Nz4zCThFkkjHctM= github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= @@ -1125,6 +1124,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1262,6 +1262,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1541,33 +1542,32 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= -k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= -k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/apiserver v0.22.2 h1:TdIfZJc6YNhu2WxeAOWq1TvukHF0Sfx0+ln4XK9qnL4= k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= k8s.io/cli-runtime v0.21.0 h1:/V2Kkxtf6x5NI2z+Sd/mIrq4FQyQ8jzZAUD6N5RnN7Y= k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= -k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= @@ -1594,8 +1594,9 @@ rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= +sigs.k8s.io/controller-runtime v0.9.2/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc= sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= sigs.k8s.io/kustomize/api v0.8.5 h1:bfCXGXDAbFbb/Jv5AhMj2BB8a5VAJuuQ5/KU69WtDjQ= From c1668ef7a1b621eabb6665dde6f5def00a825392 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 12:54:45 +0200 Subject: [PATCH 22/53] Introduce `artifactSet` to replace `hasArtifactUpdated` NOTE: Remove `hasArtifactUpdated` in the future once it's no longer used. Signed-off-by: Hidde Beydals <hello@hidde.co> --- controllers/artifact.go | 38 ++++++++++++++++++++++++ controllers/artifact_test.go | 56 ++++++++++++++++++++++-------------- 2 files changed, 73 insertions(+), 21 deletions(-) diff --git a/controllers/artifact.go b/controllers/artifact.go index 0e16fd03c..fa21bd0a4 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,9 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +type artifactSet []*sourcev1.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} + // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts +// NOTE: artifactSet is a replacement for this. Remove this once it's not used +// anywhere. func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { if len(current) != len(updated) { return true diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 959661615..935c93bf7 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -1,26 +1,40 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "testing" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestHasUpdated(t *testing.T) { +func Test_artifactSet_Diff(t *testing.T) { tests := []struct { name string - current []*sourcev1.Artifact - updated []*sourcev1.Artifact + current artifactSet + updated artifactSet expected bool }{ { - name: "not updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, no diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -28,13 +42,13 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "bar", }, @@ -42,8 +56,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "not updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, no diff", + current: artifactSet{ { Revision: "foo", }, @@ -51,7 +65,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -62,8 +76,8 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, diff", + current: artifactSet{ { Revision: "foo", }, @@ -71,7 +85,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -82,8 +96,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "updated different artifact count", - current: []*sourcev1.Artifact{ + name: "different artifact count", + current: artifactSet{ { Revision: "foo", }, @@ -91,7 +105,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -101,7 +115,7 @@ func TestHasUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := hasArtifactUpdated(tt.current, tt.updated) + result := tt.current.Diff(tt.updated) if result != tt.expected { t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) } From e5806c331d2a75b40ad870ded0d0616ce7b722d9 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 21:49:03 +0530 Subject: [PATCH 23/53] source: Add `GetRequeueAfter` The problem with `GetInterval()` was that the returned type was of `metav1.Duration`, while almost anywhere it was used, a type of `time.Duration` was requested. The result of this was that we had to call `GetInterval().Duration` all the time, which would become a bit cumbersome after awhile. To prevent this, we introduce a new `GetRequeueAfter() time.Duration` method, which both results the right type, and bears a name that is easier to remember where the value is used most; while setting the `Result.RequeueAfter` during reconcile operations. The introduction of this method deprecates `GetInterval()`, which should be removed in a future MINOR release. Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 8 ++++++++ api/v1beta1/gitrepository_types.go | 8 ++++++++ api/v1beta1/helmchart_types.go | 8 ++++++++ api/v1beta1/helmrepository_types.go | 8 ++++++++ api/v1beta1/source.go | 5 +++++ 5 files changed, 37 insertions(+) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index e4b4d6cdc..b03c03a10 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -169,7 +171,13 @@ func (in *Bucket) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in Bucket) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index c2cfc8e4c..d4dcbbe85 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -260,7 +262,13 @@ func (in *GitRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in GitRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 78a20852b..76ed4914b 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -196,7 +198,13 @@ func (in *HelmChart) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 65b924042..cd585f2d0 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -157,7 +159,13 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/source.go b/api/v1beta1/source.go index 5c10d00fc..f22780dd1 100644 --- a/api/v1beta1/source.go +++ b/api/v1beta1/source.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,9 +31,12 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + // GetRequeueAfter returns the duration after which the source must be reconciled again. + GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. GetArtifact() *Artifact // GetInterval returns the interval at which the source is updated. + // Deprecated: use GetRequeueAfter instead. GetInterval() metav1.Duration } From adb131cccd09f3d612e955c38e3884e7161416da Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 22:30:20 +0530 Subject: [PATCH 24/53] Use new events and metrics helpers in main.go Signed-off-by: Sunny <darkowlzz@protonmail.com> --- main.go | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/main.go b/main.go index 67f00a920..73f015456 100644 --- a/main.go +++ b/main.go @@ -33,13 +33,12 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" @@ -114,6 +113,7 @@ func main() { clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(logger.NewLogger(logOptions)) @@ -132,9 +132,6 @@ func main() { } } - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -163,6 +160,12 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + eventsH := helper.MakeEvents(mgr, controllerName, eventRecorder) + metricsH := helper.MustMakeMetrics(mgr) + + // NOTE: Temporarily, to be able to keep reconcilers-dev branches in sync. + _ = eventsH + if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) } @@ -174,7 +177,7 @@ func main() { Storage: storage, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -189,7 +192,7 @@ func main() { Getters: getters, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -203,7 +206,7 @@ func main() { Getters: getters, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -216,7 +219,7 @@ func main() { Storage: storage, EventRecorder: mgr.GetEventRecorderFor(controllerName), ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From 2b7a308687f82c5f553ff35bd679628d7445e694 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 25 Nov 2021 00:03:11 +0530 Subject: [PATCH 25/53] Move Artifact conditions to conditions Also, introduce FetchFailedCondition for generic fetch failures. Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/condition_types.go | 16 ++++++++++++++++ api/v1beta1/gitrepository_types.go | 8 -------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/api/v1beta1/condition_types.go b/api/v1beta1/condition_types.go index 4077a2ab6..1a9db62ad 100644 --- a/api/v1beta1/condition_types.go +++ b/api/v1beta1/condition_types.go @@ -18,6 +18,22 @@ package v1beta1 const SourceFinalizer = "finalizers.fluxcd.io" +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the + // Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" +) + const ( // URLInvalidReason represents the fact that a given source has an invalid URL. URLInvalidReason string = "URLInvalid" diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index d4dcbbe85..145ac6bb0 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -38,10 +38,6 @@ const ( ) const ( - // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactUnavailableCondition string = "ArtifactUnavailable" - // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. @@ -55,10 +51,6 @@ const ( // exist, or does not have an Artifact. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. IncludeUnavailableCondition string = "IncludeUnavailable" - - // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactOutdatedCondition string = "ArtifactOutdated" ) // GitRepositorySpec defines the desired state of a Git repository. From e82a472c1e55436782bc64651db0134989c1285d Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Fri, 26 Nov 2021 00:18:08 +0530 Subject: [PATCH 26/53] Add gomega matcher for artifact Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- controllers/artifact_matchers_test.go | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 controllers/artifact_matchers_test.go diff --git a/controllers/artifact_matchers_test.go b/controllers/artifact_matchers_test.go new file mode 100644 index 000000000..63ff81ced --- /dev/null +++ b/controllers/artifact_matchers_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *sourcev1.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*sourcev1.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} From b65e9bf8b18b943cdc304dcf6dbb211909acb8e1 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Sun, 19 Dec 2021 19:41:32 +0530 Subject: [PATCH 27/53] Add internal packages error and result - internal/error - Contains internal error type used across the source-controller reconcilers. - internal/reconcile - Contains helper abstractions for the controller-runtime reconcile Result type and functions to interact with the abstractions. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- internal/error/error.go | 37 +++++++++++++ internal/reconcile/reconcile.go | 82 ++++++++++++++++++++++++++++ internal/reconcile/reconcile_test.go | 47 ++++++++++++++++ 3 files changed, 166 insertions(+) create mode 100644 internal/error/error.go create mode 100644 internal/reconcile/reconcile.go create mode 100644 internal/reconcile/reconcile_test.go diff --git a/internal/error/error.go b/internal/error/error.go new file mode 100644 index 000000000..84b396cb1 --- /dev/null +++ b/internal/error/error.go @@ -0,0 +1,37 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +// StallingError is the reconciliation stalled state error. It contains an error +// and a reason for the stalled condition. +type StallingError struct { + // Reason is the stalled condition reason string. + Reason string + // Err is the error that caused stalling. This can be used as the message in + // stalled condition. + Err error +} + +// Error implements error interface. +func (se *StallingError) Error() string { + return se.Err.Error() +} + +// Unwrap returns the underlying error. +func (se *StallingError) Unwrap() error { + return se.Err +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 000000000..88e65a08e --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,82 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + ctrl "sigs.k8s.io/controller-runtime" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +) + +// Result is a type for creating an abstraction for the controller-runtime +// reconcile Result to simplify the Result values. +type Result int + +const ( + // ResultEmpty indicates a reconcile result which does not requeue. + ResultEmpty Result = iota + // ResultRequeue indicates a reconcile result which should immediately + // requeue. + ResultRequeue + // ResultSuccess indicates a reconcile result which should be + // requeued on the interval as defined on the reconciled object. + ResultSuccess +) + +// BuildRuntimeResult converts a given Result and error into the +// return values of a controller's Reconcile function. +func BuildRuntimeResult(obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { + // NOTE: The return values can be modified based on the error type. + // For example, if an error signifies a short requeue period that's + // not equal to the requeue period of the object, the error can be checked + // and an appropriate result with the period can be returned. + // + // Example: + // if e, ok := err.(*waitError); ok { + // return ctrl.Result{RequeueAfter: e.RequeueAfter}, err + // } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true}, err + case ResultSuccess: + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err + default: + return ctrl.Result{}, err + } +} + +// LowestRequeuingResult returns the ReconcileResult with the lowest requeue +// period. +// Weightage: +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue +func LowestRequeuingResult(i, j Result) Result { + switch { + case i == ResultEmpty: + return j + case j == ResultEmpty: + return i + case i == ResultRequeue: + return i + case j == ResultRequeue: + return j + default: + return j + } +} diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go new file mode 100644 index 000000000..bb0cf4c44 --- /dev/null +++ b/internal/reconcile/reconcile_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestLowestRequeuingResult(t *testing.T) { + tests := []struct { + name string + i Result + j Result + wantResult Result + }{ + {"bail,requeue", ResultEmpty, ResultRequeue, ResultRequeue}, + {"bail,requeueInterval", ResultEmpty, ResultSuccess, ResultSuccess}, + {"requeue,bail", ResultRequeue, ResultEmpty, ResultRequeue}, + {"requeue,requeueInterval", ResultRequeue, ResultSuccess, ResultRequeue}, + {"requeueInterval,requeue", ResultSuccess, ResultRequeue, ResultRequeue}, + {"requeueInterval,requeueInterval", ResultSuccess, ResultSuccess, ResultSuccess}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(LowestRequeuingResult(tt.i, tt.j)).To(Equal(tt.wantResult)) + }) + } +} From a956216868983331a8c14603a0ed712fdf3c4479 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 22:08:52 +0530 Subject: [PATCH 28/53] bucket: Replace GetInterval() with GetRequeueAfter() Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 86911102e..511c74432 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -171,10 +171,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", time.Since(start).String(), - bucket.GetInterval().Duration.String(), + bucket.GetRequeueAfter().String(), )) - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil } func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { From 27455c4891dda60dc76ba975ffb2bc21addc143a Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Sat, 31 Jul 2021 03:58:43 +0200 Subject: [PATCH 29/53] Rewrite `BucketReconciler` to new standards This commit rewrites the `BucketReconciler` to new standards, while implementing the newly introduced Condition types, and trying to adhere better to Kubernetes API conventions. More specifically it introduces: - Implementation of more explicit Condition types to highlight abnormalities. - Extensive usage of the `conditions` subpackage from `runtime`. - Better and more conflict-resilient (status)patching of reconciled objects using the `patch` subpackage from runtime. - Proper implementation of kstatus' `Reconciling` and `Stalled` conditions. - Refactor of reconciler logic, including more efficient detection of changes to bucket objects by making use of the etag data available, and downloading of object files in parallel with a limited number of workers (4). - Integration tests that solely rely on `testenv` and do not use Ginkgo. There are a couple of TODOs marked in-code, these are suggestions for the future and should be non-blocking. In addition to the TODOs, more complex and/or edge-case test scenarios may be added as well. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 60 +- controllers/bucket_controller.go | 959 +++++++++++++++----------- controllers/bucket_controller_test.go | 671 +++++++++++++++++- controllers/suite_test.go | 9 + main.go | 10 +- 5 files changed, 1235 insertions(+), 474 deletions(-) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index b03c03a10..23a0ff1d7 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -19,12 +19,10 @@ package v1beta1 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -32,6 +30,19 @@ const ( BucketKind = "Bucket" ) +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" +) + +const ( + // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + DownloadFailedCondition string = "DownloadFailed" +) + // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). @@ -85,12 +96,6 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } -const ( - GenericBucketProvider string = "generic" - AmazonBucketProvider string = "aws" - GoogleBucketProvider string = "gcp" -) - // BucketStatus defines the observed state of a bucket type BucketStatus struct { // ObservedGeneration is the last observed generation. @@ -122,45 +127,6 @@ const ( BucketOperationFailedReason string = "BucketOperationFailed" ) -// BucketProgressing resets the conditions of the Bucket to metav1.Condition of -// type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason -// reason and message. It returns the modified Bucket. -func BucketProgressing(bucket Bucket) Bucket { - bucket.Status.ObservedGeneration = bucket.Generation - bucket.Status.URL = "" - bucket.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return bucket -} - -// BucketReady sets the given Artifact and URL on the Bucket and sets the -// meta.ReadyCondition to 'True', with the given reason and message. It returns -// the modified Bucket. -func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { - bucket.Status.Artifact = &artifact - bucket.Status.URL = url - conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with -// the given reason and message. It returns the modified Bucket. -func BucketNotReady(bucket Bucket, reason, message string) Bucket { - conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func BucketReadyMessage(bucket Bucket) string { - if c := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in Bucket) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 511c74432..e46c8b3f9 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -18,25 +18,26 @@ package controllers import ( "context" - "crypto/sha1" + "crypto/sha256" "fmt" "os" "path/filepath" + "sort" "strings" "time" - "github.com/go-logr/logr" + gcpstorage "cloud.google.com/go/storage" + "github.com/fluxcd/source-controller/pkg/gcp" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" + kerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -44,10 +45,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" "github.com/fluxcd/source-controller/pkg/sourceignore" @@ -61,11 +63,10 @@ import ( // BucketReconciler reconciles a Bucket object type BucketReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + helper.Events + helper.Metrics + + Storage *Storage } type BucketReconcilerOptions struct { @@ -84,521 +85,683 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc Complete(r) } -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, bucket) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } - - // Return early if the object is suspended. - if bucket.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") - } - - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize the Ready condition based on abnormalities that may have been observed + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } + } + + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + return ctrl.Result{Requeue: true}, nil } - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } - r.recordReadiness(ctx, reconciledBucket) - - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - bucket.GetRequeueAfter().String(), - )) - return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil + // Reconcile actual object + return r.reconcile(ctx, obj) } -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - var err error - var sourceBucket sourcev1.Bucket - tempDir, err := os.MkdirTemp("", bucket.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer os.RemoveAll(tempDir) - if bucket.Spec.Provider == sourcev1.GoogleBucketProvider { - sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } else { - sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) - } - return bucket, nil + // Reconcile the storage data + if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { + return result, err } - // create artifact dir - err = r.Storage.MkdirAll(artifact) + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) + return ctrl.Result{}, err } + defer os.RemoveAll(tmpDir) - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the source from upstream + var artifact sourcev1.Artifact + if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err } - defer unlock() - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the artifact to storage + if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { + return result, err } - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil +} - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") + return ctrl.Result{Requeue: true}, nil + } + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { + var secret corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + if err := r.Get(ctx, secretName, &secret); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + // Return error as the world as observed may change + return ctrl.Result{}, err + } } - // Record deleted status - r.recordReadiness(ctx, bucket) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err + switch obj.Spec.Provider { + case sourcev1.GoogleBucketProvider: + return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + default: + return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil } -// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket -// using a gcp client -func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - log := logr.FromContext(ctx) - gcpClient, err := r.authGCP(ctx, bucket) +// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + // Build the client with the configuration from the object and secret + s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } - defer gcpClient.Close(log) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } - // Look for file with ignore rules first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { - if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { + if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil) - // download bucket content - for { - object, err := objects.Next() - if err == gcp.IteratorDone { - break - } - if err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ + Recursive: true, + UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), + }) { + if err = object.Err; err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { continue } - - if matcher.Match(strings.Split(object.Name, "/"), false) { + // Ignore matches + if matcher.Match(strings.Split(object.Key, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Name) - if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + index[object.Key] = object.ETag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath, minio.GetObjectOptions{}); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return sourcev1.Bucket{}, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// reconcileWithMinio handles getting objects from an S3 compatible bucket -// using a minio client -func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - s3Client, err := r.authMinio(ctx, bucket) +// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } + defer gcpClient.Close(ctrl.LoggerFrom(ctx)) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } // Look for file with ignore rules first - // NB: S3 has flat filepath keys making it impossible to look - // for files in "subdirectories" without building up a tree first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { - if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if err != gcpstorage.ErrObjectNotExist { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) + for { + object, err := objects.Next() + if err != nil { + if err == gcp.IteratorDone { + break + } + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { continue } - if matcher.Match(strings.Split(object.Key, "/"), false) { + if matcher.Match(strings.Split(object.Name, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } + index[object.Name] = object.Etag } - return sourcev1.Bucket{}, nil -} -// authGCP creates a new Google Cloud Platform storage client -// to interact with the storage service. -func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { - var client *gcp.GCPClient - var err error - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { - return nil, err - } - client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) - if err != nil { - return nil, err - } - } else { - client, err = gcp.NewClient(ctx) - if err != nil { - return nil, err + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return client, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// authMinio creates a new Minio client to interact with S3 -// compatible storage services. -func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact does not differ from the object's current, it returns early. +// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is +// updated to its path. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact() != nil { + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + } + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "Stored artifact for revision '%s'", artifact.Revision) + } + }() + + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + } + + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") + return ctrl.Result{}, err + } else if !f.IsDir() { + err := fmt.Errorf("source path '%s' is not a directory", dir) + ctrl.LoggerFrom(ctx).Error(err, "invalid target path") + return ctrl.Result{}, err } - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - - accesskey := "" - secretkey := "" - if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) - } - if accesskey == "" || secretkey == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + return ctrl.Result{}, err } + unlock, err := r.Storage.Lock(artifact) + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + return ctrl.Result{}, err + } + defer unlock() - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, nil); err != nil { + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + "Unable to archive artifact to storage: %s", err) + return ctrl.Result{}, err } + r.Events.EventWithMetaf(ctx, obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, events.EventSeverityInfo, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) - return minio.New(bucket.Spec.Endpoint, &opt) -} + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() -// checksum calculates the SHA1 checksum of the given root directory. -// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the -// list with relative file paths and their checksums. -func (r *BucketReconciler) checksum(root string) (string, error) { - sum := sha1.New() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - data, err := os.ReadFile(path) - if err != nil { - return err - } - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath))) - return nil - }); err != nil { - return "", err + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.Events.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, + "Failed to update status URL symlink: %s", err) } - return fmt.Sprintf("%x", sum.Sum(nil)), nil + if url != "" { + obj.Status.URL = url + } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true - } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true +// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err } - return bucket, false + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil } -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) +// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + r.Eventf(ctx, obj, events.EventSeverityError, "GarbageCollectionFailed", + "Garbage collection for deleted resource failed: %s", err) + return err + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(ctx, obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", + "Garbage collected artifacts for deleted resource") + return nil } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + r.Eventf(ctx, obj, events.EventSeverityError, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) + return err + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(ctx, obj, events.EventSeverityInfo, "GarbageCollectionSucceeded", "Garbage collected old artifacts") } return nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := logr.FromContext(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) +// buildMinioClient constructs a minio.Client with the data from the given object and Secret. +// It returns an error if the Secret does not have the required fields, or if there is no credential handler +// configured. +func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) { + opts := minio.Options{ + Region: obj.Spec.Region, + Secure: !obj.Spec.Insecure, } - if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to send event") - return + if secret != nil { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) } + if accessKey == "" || secretKey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opts.Creds = credentials.NewStaticV4(accessKey, secretKey, "") + } else if obj.Spec.Provider == sourcev1.AmazonBucketProvider { + opts.Creds = credentials.NewIAM("") } + return minio.New(obj.Spec.Endpoint, &opts) } -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := logr.FromContext(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) +// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) { + var client *gcp.GCPClient + var err error + if secret != nil { + if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { + return nil, err + } + client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) + if err != nil { + return nil, err + } } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) + client, err = gcp.NewClient(ctx) + if err != nil { + return nil, err + } } + return client, nil } -func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) { - if r.MetricsRecorder == nil { - return - } - log := logr.FromContext(ctx) +// etagIndex is an index of bucket keys and their Etag values. +type etagIndex map[string]string - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !bucket.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend) +// Revision calculates the SHA256 checksum of the index. +// The keys are sorted to ensure a stable order, and the SHA256 sum is then calculated for the string representations of +// the key/value pairs, each pair written on a newline +// The sum result is returned as a string. +func (i etagIndex) Revision() (string, error) { + keyIndex := make([]string, 0, len(i)) + for k := range i { + keyIndex = append(keyIndex, k) } -} - -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err + sort.Strings(keyIndex) + sum := sha256.New() + for _, k := range keyIndex { + if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i[k]))); err != nil { + return "", err + } } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus - - return r.Status().Patch(ctx, &bucket, patch) + return fmt.Sprintf("%x", sum.Sum(nil)), nil } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 01ff20d87..02164412b 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -17,59 +17,567 @@ limitations under the License. package controllers import ( + "context" + "crypto/md5" + "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" + "path" "path/filepath" + "strings" "testing" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestBucketReconciler_checksum(t *testing.T) { +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := newS3Server("test-bucket") + s3Server.Objects = []*s3MockObject{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { - name string - beforeFunc func(root string) - want string - wantErr bool + name string + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + want ctrl.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string }{ { - name: "empty root", - want: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: ctrl.Result{Requeue: true}, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &BucketReconciler{ + Storage: testStorage, + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + got, err := r.reconcileStorage(context.TODO(), obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + }) + } +} + +func TestBucketReconciler_reconcileMinioSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3MockObject + middleware http.Handler + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + // TODO(hidde): test the lesser happy paths + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", + Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + }, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3MockServer + if tt.bucketName != "" { + server = newS3Server(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + artifact sourcev1.Artifact + beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ { - name: "with file", - beforeFunc: func(root string) { - mockFile(root, "a/b/c.txt", "a dummy string") + name: "artifact revision up-to-date", + artifact: sourcev1.Artifact{ + Revision: "existing", + }, + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Status.Artifact = &artifact + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, - want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0", }, { - name: "with file in different path", - beforeFunc: func(root string) { - mockFile(root, "a/b.txt", "a dummy string") + name: "dir path deleted", + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + _ = os.RemoveAll(dir) }, - want: "e28c62b5cc488849950c4355dddc5523712616d4", + wantErr: true, }, + //{ + // name: "dir path empty", + //}, + //{ + // name: "success", + // artifact: sourcev1.Artifact{ + // Revision: "existing", + // }, + // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // obj.Status.Artifact = &artifact + // }, + // assertConditions: []metav1.Condition{ + // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), + // }, + //}, + //{ + // name: "symlink", + //}, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - root, err := os.MkdirTemp("", "bucket-checksum-") - if err != nil { - t.Fatal(err) + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, } - defer os.RemoveAll(root) + if tt.beforeFunc != nil { - tt.beforeFunc(root) + tt.beforeFunc(obj, tt.artifact, tmpDir) } - got, err := (&BucketReconciler{}).checksum(root) + + r := &BucketReconciler{ + Storage: testStorage, + } + + got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, tt.artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func Test_etagIndex_Revision(t *testing.T) { + tests := []struct { + name string + list etagIndex + want string + wantErr bool + }{ + { + name: "index with items", + list: map[string]string{ + "one": "one", + "two": "two", + "three": "three", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "index with items in different order", + list: map[string]string{ + "three": "three", + "one": "one", + "two": "two", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "empty index", + list: map[string]string{}, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "nil index", + list: nil, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.list.Revision() if (err != nil) != tt.wantErr { - t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("checksum() got = %v, want %v", got, tt.want) + t.Errorf("revision() got = %v, want %v", got, tt.want) } }) } } +// helpers + func mockFile(root, path, content string) error { filePath := filepath.Join(root, path) if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { @@ -80,3 +588,120 @@ func mockFile(root, path, content string) error { } return nil } + +type s3MockObject struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +type s3MockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*s3MockObject +} + +func newS3Server(bucketName string) *s3MockServer { + s := &s3MockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle(fmt.Sprintf("/%s/", s.BucketName), http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *s3MockServer) Start() { + s.srv.Start() +} + +func (s *s3MockServer) Stop() { + s.srv.Close() +} + +func (s *s3MockServer) HTTPAddress() string { + return s.srv.URL +} + +func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + return + } + + q := r.URL.Query() + + if q["location"] != nil { + fmt.Fprint(w, ` +<?xml version="1.0" encoding="UTF-8"?> +<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">Europe</LocationConstraint> + `) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + <Contents> + <Key>%s</Key> + <LastModified>%s</LastModified> + <Size>%d</Size> + <ETag>"%b"</ETag> + <StorageClass>STANDARD</StorageClass> + </Contents>`, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` +<?xml version="1.0" encoding="UTF-8"?> +<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> + <Name>%s</Name> + <Prefix/> + <Marker/> + <KeyCount>%d</KeyCount> + <MaxKeys>1000</MaxKeys> + <IsTruncated>false</IsTruncated> + %s +</ListBucketResult> + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *s3MockObject + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%b\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + return + } + + w.Write(found.Content) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 9ba2c472b..23d649f37 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -98,6 +98,15 @@ func TestMain(m *testing.M) { // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) //} + if err := (&BucketReconciler{ + Client: testEnv, + Events: testEventsH, + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } + go func() { fmt.Println("Starting the test environment") if err := testEnv.Start(ctx); err != nil { diff --git a/main.go b/main.go index 73f015456..4e3a1f539 100644 --- a/main.go +++ b/main.go @@ -214,12 +214,10 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + Events: eventsH, + Metrics: metricsH, + Storage: storage, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From 009acb6d52f323edecb0f2b2dd93caaf22df3348 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Mon, 9 Aug 2021 13:24:49 +0200 Subject: [PATCH 30/53] Consolidate condition types into `FetchFailed` This commit consolidates the `DownloadFailed` and `CheckoutFailed` Condition types into a new more generic `FetchFailed` type to simplify the API and observations by consumers. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 7 ---- controllers/bucket_controller.go | 46 +++++++++++++-------------- controllers/bucket_controller_test.go | 8 ++--- 3 files changed, 27 insertions(+), 34 deletions(-) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 23a0ff1d7..9ab5d09a0 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -36,13 +36,6 @@ const ( GoogleBucketProvider string = "gcp" ) -const ( - // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the - // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - DownloadFailedCondition string = "DownloadFailed" -) - // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index e46c8b3f9..7513968fe 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -122,12 +122,12 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res meta.ReadyCondition, conditions.WithConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), conditions.WithNegativePolarityConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), ) @@ -137,7 +137,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res patch.WithOwnedConditions{ Conditions: []string{ sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, meta.ReadyCondition, meta.ReconcilingCondition, @@ -259,7 +259,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret -// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// fails, it records v1beta1.FetchFailedCondition=True and returns early. // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { @@ -270,7 +270,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu Name: obj.Spec.SecretRef.Name, } if err := r.Get(ctx, secretName, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) @@ -292,8 +292,8 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // // The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into // account. In case of an error during the download process (including transient errors), it records -// v1beta1.DownloadFailedCondition=True and returns early. -// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. // @@ -303,7 +303,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) @@ -316,12 +316,12 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source defer cancel() exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -332,7 +332,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source path := filepath.Join(dir, sourceignore.IgnoreFile) if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -341,7 +341,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -362,7 +362,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), }) { if err = object.Err; err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -415,7 +415,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -424,7 +424,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) @@ -446,7 +446,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 secret *corev1.Secret, dir string) (ctrl.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) @@ -460,12 +460,12 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 defer cancel() exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -476,7 +476,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 path := filepath.Join(dir, sourceignore.IgnoreFile) if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if err != gcpstorage.ErrObjectNotExist { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -485,7 +485,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -508,7 +508,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err == gcp.IteratorDone { break } - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -560,7 +560,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -569,7 +569,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 02164412b..50403fa7d 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -305,7 +305,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -323,7 +323,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), }, }, { @@ -334,7 +334,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), }, }, { @@ -345,7 +345,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), }, }, { From 421f50d44d18c046b85acdc29672dc03c1c4e160 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Tue, 10 Aug 2021 03:48:11 +0530 Subject: [PATCH 31/53] BucketReconciler: Add reconcileArtifact tests Add `BucketReconciler.reconcileArtifact` tests based on `GitRepositoryReconciler.reconcileArtifact` test cases. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller_test.go | 143 +++++++++++++++++++------- 1 file changed, 104 insertions(+), 39 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 50403fa7d..79da563f3 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -164,7 +164,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", - Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", URL: testStorage.Hostname + "/reconcile-storage/c.txt", }, assertPaths: []string{ @@ -197,7 +197,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { obj.Status.Artifact = &sourcev1.Artifact{ Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: "http://outdated.com/reconcile-storage/hostname.txt", } if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { @@ -214,7 +214,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", }, }, @@ -441,57 +441,113 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } func TestBucketReconciler_reconcileArtifact(t *testing.T) { + // testChecksum is the checksum value of the artifacts created in this + // test. + const testChecksum = "4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1" + tests := []struct { name string - artifact sourcev1.Artifact - beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) want ctrl.Result wantErr bool assertConditions []metav1.Condition }{ { - name: "artifact revision up-to-date", - artifact: sourcev1.Artifact{ - Revision: "existing", + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactUnavailableCondition after creating artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} }, - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Status.Artifact = &artifact + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) }, + want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, }, { - name: "dir path deleted", - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - _ = os.RemoveAll(dir) + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + wantErr: true, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + defer f.Close() + t.Expect(err).ToNot(HaveOccurred()) + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, wantErr: true, }, - //{ - // name: "dir path empty", - //}, - //{ - // name: "success", - // artifact: sourcev1.Artifact{ - // Revision: "existing", - // }, - // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - // obj.Status.Artifact = &artifact - // }, - // assertConditions: []metav1.Condition{ - // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), - // }, - //}, - //{ - // name: "symlink", - //}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + r := &BucketReconciler{ + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) @@ -501,27 +557,36 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { Kind: sourcev1.BucketKind, }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-bucket", + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", }, Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } - if tt.beforeFunc != nil { - tt.beforeFunc(obj, tt.artifact, tmpDir) - } + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + artifact.Checksum = testChecksum - r := &BucketReconciler{ - Storage: testStorage, + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, tmpDir) } - got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, tt.artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) - //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } }) } } From 066f74a3a1b415bbe16e80f96c6cc5ff1b84a33a Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Sun, 28 Nov 2021 00:50:26 +0530 Subject: [PATCH 32/53] Add more reconcileMinioSource test cases Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 9 +-- controllers/bucket_controller_test.go | 79 +++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 4 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 7513968fe..35f9a29ff 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -263,13 +263,14 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { - var secret corev1.Secret + var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name, } - if err := r.Get(ctx, secretName, &secret); err != nil { + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, @@ -281,9 +282,9 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu switch obj.Spec.Provider { case sourcev1.GoogleBucketProvider: - return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + return r.reconcileGCPSource(ctx, obj, artifact, secret, dir) default: - return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) + return r.reconcileMinioSource(ctx, obj, artifact, secret, dir) } } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 79da563f3..0f0063993 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -380,6 +380,85 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + } + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 18fd284d26e969d08f96f166de50b580243d9f7d Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Mon, 29 Nov 2021 17:42:41 +0530 Subject: [PATCH 33/53] Add bucket controller tests for reconcileGCPSource - Introduce mock GCP Server to test the gcp bucket client against mocked gcp server results. - Add tests for reconcileGCPSource(). - Patch GCPClient.BucketExists() to return no error when the bucket doesn't exists. This keeps the GCP client compatible with the minio client. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller_test.go | 414 ++++++++++++++++++++++++++ pkg/gcp/gcp.go | 3 +- 2 files changed, 416 insertions(+), 1 deletion(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 0f0063993..f7f0498ea 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "context" "crypto/md5" + "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -32,6 +33,7 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + raw "google.golang.org/api/storage/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -46,6 +48,9 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) +// Environment variable to set the GCP Storage host for the GCP client. +const ENV_GCP_STORAGE_HOST = "STORAGE_EMULATOR_HOST" + func TestBucketReconciler_Reconcile(t *testing.T) { g := NewWithT(t) @@ -519,6 +524,271 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } } +func TestBucketReconciler_reconcileGCPSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcpMockObject + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", + Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + } + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Test bucket object. + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GoogleBucketProvider, + }, + } + + // Set up the mock GCP bucket server. + server := newGCPServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(ENV_GCP_STORAGE_HOST, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(ENV_GCP_STORAGE_HOST)).ToNot(HaveOccurred()) + }() + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + func TestBucketReconciler_reconcileArtifact(t *testing.T) { // testChecksum is the checksum value of the artifacts created in this // test. @@ -849,3 +1119,147 @@ func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { w.Write(found.Content) } } + +type gcpMockObject struct { + Key string + ContentType string + Content []byte +} + +type gcpMockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Etag string + Objects []*gcpMockObject + Close func() +} + +func newGCPServer(bucketName string) *gcpMockServer { + s := &gcpMockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (gs *gcpMockServer) Start() { + gs.srv.Start() +} + +func (gs *gcpMockServer) Stop() { + gs.srv.Close() +} + +func (gs *gcpMockServer) HTTPAddress() string { + return gs.srv.URL +} + +func (gs *gcpMockServer) GetAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range gs.Objects { + objs.Items = append(objs.Items, getGCPObject(gs.BucketName, *o)) + } + return objs +} + +func (gs *gcpMockServer) GetObjectFile(key string) ([]byte, error) { + for _, o := range gs.Objects { + if o.Key == key { + return o.Content, nil + } + } + return nil, fmt.Errorf("not found") +} + +func (gs *gcpMockServer) handler(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.RequestURI, "/b/") { + // Handle the bucket info related queries. + if r.RequestURI == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName) { + // Return info about the bucket. + response := getGCPBucket(gs.BucketName, gs.Etag) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } else if strings.Contains(r.RequestURI, "/o/") { + // Return info about object in the bucket. + var obj *gcpMockObject + for _, o := range gs.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if r.RequestURI == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName, url.QueryEscape(o.Key)) { + obj = o + } + } + if obj != nil { + response := getGCPObject(gs.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else if strings.Contains(r.RequestURI, "/o?") { + // Return info about all the objects in the bucket. + response := gs.GetAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else { + // Handle object file query. + bucketPrefix := fmt.Sprintf("/%s/", gs.BucketName) + if strings.HasPrefix(r.RequestURI, bucketPrefix) { + // The URL path is of the format /<bucket>/included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + // Handle returning object file in a bucket. + response, err := gs.GetObjectFile(key) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCPObject(bucket string, obj gcpMockObject) *raw.Object { + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + } +} + +func getGCPBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/pkg/gcp/gcp.go b/pkg/gcp/gcp.go index 9127fcde3..f98e498c4 100644 --- a/pkg/gcp/gcp.go +++ b/pkg/gcp/gcp.go @@ -73,7 +73,8 @@ func ValidateSecret(secret map[string][]byte, name string) error { func (c *GCPClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) if err == gcpstorage.ErrBucketNotExist { - return false, err + // Not returning error to be compatible with minio's API. + return false, nil } if err != nil { return false, err From bd0ee6e4d9c9083bdad6c98e46cc82ecabae6a58 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 9 Dec 2021 03:42:54 +0530 Subject: [PATCH 34/53] bucket: Ignore patch error not found on delete Ignore "not found" error while patching when the delete timestamp is set. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 35f9a29ff..23acb3725 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -35,6 +35,7 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -167,6 +168,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Finally, patch the resource if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } retErr = kerrors.NewAggregate([]error{retErr, err}) } From 878856cb8f790a686483b6a70b4a2d49bea84256 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Tue, 21 Dec 2021 04:43:16 +0530 Subject: [PATCH 35/53] bucket: Add more reconciler design improvements - Remove ArtifactUnavailable condition and use Reconciling condition to convey the same. - Make Reconciling condition affect the ready condition. - Introduce summarizeAndPatch() to calculate the final status conditions and patch them. - Introduce reconcile() to iterate through the sub-reconcilers and execute them. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 325 ++++++++++++++------------ controllers/bucket_controller_test.go | 74 ++++-- 2 files changed, 230 insertions(+), 169 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 23acb3725..d1b4e79b8 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -36,7 +36,6 @@ import ( "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" ctrl "sigs.k8s.io/controller-runtime" @@ -53,6 +52,8 @@ import ( "github.com/fluxcd/pkg/runtime/predicates" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/pkg/sourceignore" ) @@ -74,6 +75,10 @@ type BucketReconcilerOptions struct { MaxConcurrentReconciles int } +// bucketReconcilerFunc is the function type for all the bucket reconciler +// functions. +type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) + func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) } @@ -111,69 +116,14 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, err } + var recResult sreconcile.Result + // Always attempt to patch the object and status after each reconciliation + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. defer func() { - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Summarize the Ready condition based on abnormalities that may have been observed - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - conditions.WithNegativePolarityConditions( - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - ) - - // Patch the object, ignoring conflicts on the conditions owned by this controller - patchOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: []string{ - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - }, - } - - // Determine if the resource is still being reconciled, or if it has stalled, and record this observation - if retErr == nil && (result.IsZero() || !result.Requeue) { - // We are no longer reconciling - conditions.Delete(obj, meta.ReconcilingCondition) - - // We have now observed this generation - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) - - readyCondition := conditions.Get(obj, meta.ReadyCondition) - switch readyCondition.Status { - case metav1.ConditionFalse: - // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled - conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) - case metav1.ConditionTrue: - // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled - conditions.Delete(obj, meta.StalledCondition) - } - } - - // Finally, patch the resource - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - retErr = kerrors.NewAggregate([]error{retErr, err}) - } + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -183,60 +133,150 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Add finalizer first if not exist to avoid the race condition between init and delete if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue return ctrl.Result{Requeue: true}, nil } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, obj) + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(obj, res, err) } // Reconcile actual object - return r.reconcile(ctx, obj) + reconcilers := []bucketReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(obj, recResult, err) } -// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that -// produces an error. -func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { - // Mark the resource as under reconciliation - conditions.MarkReconciling(obj, meta.ProgressingReason, "") +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. +func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.Bucket, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Remove reconciling condition on successful reconciliation. + if recErr == nil && res == sreconcile.ResultSuccess { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Record the value of the reconciliation request if any. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize the Ready condition based on abnormalities that may have been observed. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReconcilingCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReconcilingCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Analyze the reconcile error. + switch t := recErr.(type) { + case *serror.StallingError: + if res == sreconcile.ResultEmpty { + // The current generation has been reconciled successfully and it has + // resulted in a stalled state. Return no error to stop further + // requeuing. + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + conditions.MarkFalse(obj, meta.ReadyCondition, t.Reason, t.Error()) + conditions.MarkStalled(obj, t.Reason, t.Error()) + recErr = nil + } + // ?: Log when there's a stalling error and non-empty result? This + // indicates incorrect returned values. + case nil: + // The reconciliation didn't result in any error, we are not in stalled + // state. If a requeue is requested, the current generation has not been + // reconciled successfully. + if res != sreconcile.ResultRequeue { + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + } + conditions.Delete(obj, meta.StalledCondition) + default: + // The reconcile resulted in some error, but we are not in stalled + // state. + conditions.Delete(obj, meta.StalledCondition) + } + + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return recErr +} - // Reconcile the storage data - if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { - return result, err +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "Reconciling new generation %d", obj.Generation) } + var artifact sourcev1.Artifact + // Create temp working dir tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create temporary directory") r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } defer os.RemoveAll(tmpDir) - // Reconcile the source from upstream - var artifact sourcev1.Artifact - if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err - } - - // Reconcile the artifact to storage - if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { - return result, err + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) + if err != nil { + resErr = err + break + } } - - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return res, resErr } // reconcileStorage ensures the current state of the storage matches the desired and previously observed state. // // All artifacts for the resource except for the current one are garbage collected from the storage. // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. -// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. // If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -248,26 +288,23 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") - return ctrl.Result{Requeue: true}, nil + conditions.MarkReconciling(obj, "NoArtifact", "No artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) // Always update URLs to ensure hostname is up-to-date // TODO(hidde): we may want to send out an event only if we notice the URL has changed r.Storage.SetArtifactURL(obj.GetArtifact()) obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret // fails, it records v1beta1.FetchFailedCondition=True and returns early. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ @@ -281,7 +318,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) // Return error as the world as observed may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } } @@ -302,10 +339,8 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (ctrl.Result, error) { + secret *corev1.Secret, dir string) (sreconcile.Result, error) { // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { @@ -314,7 +349,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Confirm bucket exists @@ -324,14 +359,14 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source if err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } if !exists { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) - return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) + return sreconcile.ResultEmpty, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } // Look for file with ignore rules first @@ -342,7 +377,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) @@ -351,7 +386,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // In-spec patterns take precedence if obj.Spec.Ignore != nil { @@ -372,7 +407,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Ignore directories and the .sourceignore file @@ -391,13 +426,14 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source revision, err := index.Revision() if err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } if !obj.GetArtifact().HasRevision(revision) { // Mark observations about the revision on the object - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New upstream revision '%s'", revision) + message := fmt.Sprintf("New upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) // Download the files in parallel, but with a limited number of workers group, groupCtx := errgroup.WithContext(ctx) @@ -425,7 +461,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) @@ -434,7 +470,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the @@ -446,10 +482,8 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (ctrl.Result, error) { + secret *corev1.Secret, dir string) (sreconcile.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, @@ -457,7 +491,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } defer gcpClient.Close(ctrl.LoggerFrom(ctx)) @@ -468,14 +502,14 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } if !exists { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) - return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) + return sreconcile.ResultEmpty, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } // Look for file with ignore rules first @@ -486,7 +520,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) @@ -495,7 +529,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // In-spec patterns take precedence if obj.Spec.Ignore != nil { @@ -518,7 +552,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { @@ -536,13 +570,14 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 revision, err := index.Revision() if err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } if !obj.GetArtifact().HasRevision(revision) { // Mark observations about the revision on the object - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New upstream revision '%s'", revision) + message := fmt.Sprintf("New upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) // Download the files in parallel, but with a limited number of workers group, groupCtx := errgroup.WithContext(ctx) @@ -570,7 +605,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } r.Eventf(ctx, obj, events.EventSeverityInfo, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) @@ -579,25 +614,19 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the // given data. // // The inspection of the given data to the object is differed, ensuring any stale observations as -// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. // If the given artifact does not differ from the object's current, it returns early. // On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is // updated to its path. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Always restore the Ready condition in case it got removed due to a transient error defer func() { - if obj.GetArtifact() != nil { - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) - } if obj.GetArtifact().HasRevision(artifact.Revision) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, @@ -608,36 +637,41 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1. // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) { ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "New upstream revision '%s'", artifact.Revision) + // Ensure target path exists and is a directory if f, err := os.Stat(dir); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } else if !f.IsDir() { err := fmt.Errorf("source path '%s' is not a directory", dir) ctrl.LoggerFrom(ctx).Error(err, "invalid target path") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(artifact); err != nil { + if err := r.Storage.MkdirAll(*artifact); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") - return ctrl.Result{}, err + r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to create artifact directory: %s", err) + return sreconcile.ResultEmpty, err } - unlock, err := r.Storage.Lock(artifact) + unlock, err := r.Storage.Lock(*artifact) if err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } defer unlock() // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, nil); err != nil { + if err := r.Storage.Archive(artifact, dir, nil); err != nil { r.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Unable to archive artifact to storage: %s", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } r.Events.EventWithMetaf(ctx, obj, map[string]string{ "revision": artifact.Revision, @@ -648,7 +682,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1. obj.Status.Artifact = artifact.DeepCopy() // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") if err != nil { r.Events.Eventf(ctx, obj, events.EventSeverityError, sourcev1.StorageOperationFailedReason, "Failed to update status URL symlink: %s", err) @@ -656,23 +690,24 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1. if url != "" { obj.Status.URL = url } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the // artifact storage, if successful, the finalizer is removed from the object. -func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +// func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } // garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index f7f0498ea..5d147647b 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -37,7 +37,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" @@ -46,6 +45,7 @@ import ( "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) // Environment variable to set the GCP Storage host for the GCP client. @@ -140,7 +140,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact *sourcev1.Artifact assertConditions []metav1.Condition @@ -166,6 +166,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, + want: sreconcile.ResultSuccess, assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", @@ -188,12 +189,12 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, - want: ctrl.Result{Requeue: true}, + want: sreconcile.ResultSuccess, assertPaths: []string{ "!/reconcile-storage/invalid.txt", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "No artifact for resource in storage"), }, }, { @@ -213,6 +214,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { } return nil }, + want: sreconcile.ResultSuccess, assertPaths: []string{ "/reconcile-storage/hostname.txt", }, @@ -241,7 +243,9 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } - got, err := r.reconcileStorage(context.TODO(), obj) + var artifact sourcev1.Artifact + + got, err := r.reconcileStorage(context.TODO(), obj, &artifact, "") g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -271,7 +275,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { middleware http.Handler secret *corev1.Secret beforeFunc func(obj *sourcev1.Bucket) - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact sourcev1.Artifact assertConditions []metav1.Condition @@ -287,12 +291,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), }, }, // TODO(hidde): middleware for mock server @@ -377,12 +383,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, { @@ -412,12 +420,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), }, }, { @@ -436,6 +446,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", @@ -456,12 +467,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), }, }, } @@ -531,7 +544,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { bucketObjects []*gcpMockObject secret *corev1.Secret beforeFunc func(obj *sourcev1.Bucket) - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact sourcev1.Artifact assertConditions []metav1.Condition @@ -561,12 +574,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), }, }, { @@ -577,6 +592,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), @@ -595,6 +611,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), @@ -606,6 +623,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), @@ -617,6 +635,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), @@ -642,12 +661,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), }, }, { @@ -674,12 +695,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), }, }, { @@ -697,6 +720,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", @@ -716,12 +740,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", }, assertConditions: []metav1.Condition{ *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), }, }, // TODO: Middleware for mock server to test authentication using secret. @@ -798,7 +824,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { name string beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -807,9 +833,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'existing'"), }, }, { @@ -821,18 +848,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(obj.Status.URL).To(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), - }, - }, - { - name: "Removes ArtifactUnavailableCondition after creating artifact", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, @@ -843,9 +859,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'existing'"), }, }, { @@ -860,9 +877,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { t.Expect(err).NotTo(HaveOccurred()) t.Expect(localPath).To(Equal(targetFile)) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'existing'"), }, }, { @@ -870,7 +888,11 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, + want: sreconcile.ResultEmpty, wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'existing'"), + }, }, { name: "Dir path is not a directory", @@ -885,7 +907,11 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, + want: sreconcile.ResultEmpty, wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "New upstream revision 'existing'"), + }, }, } @@ -922,7 +948,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { tt.beforeFunc(g, obj, artifact, tmpDir) } - got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, log.NullLogger{}), obj, &artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) From c2793efd5e4fa7535d7d0c2c7d59487414a687df Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 11:17:22 +0200 Subject: [PATCH 36/53] Introduce more explicit Condition types This commit introduces new Condition types to the v1beta1 API, facilitating easier observation of (potentially) problematic state for end-users. - `ArtifactUnavailableCondition`: indicates there is no artifact available for the resource. This Condition should be set by the reconciler as soon as it observes the absence of an artifact for a source. - `CheckoutFailedCondition`: indicates a transient or persistent checkout failure. This Condition should be set by the reconciler as soon as it observes a Git checkout failure, including any prerequisites like the unavailability of the referenced Secret used for authentication. It should be deleted as soon as a successful checkout has been observed again. - `SourceVerifiedCondition`: indicates the integrity of the source has been verified. The Condition should be set to True or False by the reconciler based on the result of the integrity check. If there is no verification mode and/or secret configured, the Condition should be removed. - `IncludeUnavailableCondition`: indicates one of the referenced includes is not available. This Condition should for example be set by the reconciler when the include does not exist, or does not have an artifact. If the includes become available, it should be deleted. - `ArtifactOutdatedCondition`: indicates the current artifact of the source is outdated. This Condition should for example be set by the reconciler when it notices there is a newer revision for an artifact, or the previously included artifacts differ from the current available ones. The Condition should be removed after writing a new artifact to the storage. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/gitrepository_types.go | 57 ++++++++++++------- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 12 ++-- 2 files changed, 44 insertions(+), 25 deletions(-) diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index c1014e6b7..9344e3763 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -34,6 +34,30 @@ const ( LibGit2Implementation = "libgit2" ) +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + CheckoutFailedCondition string = "CheckoutFailed" + + // SourceVerifiedCondition indicates the integrity of the Source has been verified. If True, the integrity check + // succeeded. If False, it failed. The Condition is only present on the resource if the integrity has been verified. + SourceVerifiedCondition string = "SourceVerified" + + // IncludeUnavailableCondition indicates one of the includes is not available. For example, because it does not + // exist, or does not have an Artifact. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + IncludeUnavailableCondition string = "IncludeUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" +) + // GitRepositorySpec defines the desired state of a Git repository. type GitRepositorySpec struct { // The repository URL, can be a HTTP/S or SSH address. @@ -42,10 +66,8 @@ type GitRepositorySpec struct { URL string `json:"url"` // The secret name containing the Git credentials. - // For HTTPS repositories the secret must contain username and password - // fields. - // For SSH repositories the secret must contain identity, identity.pub and - // known_hosts fields. + // For HTTPS repositories the secret must contain username and password fields. + // For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. // +optional SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` @@ -63,16 +85,16 @@ type GitRepositorySpec struct { // +optional Reference *GitRepositoryRef `json:"ref,omitempty"` - // Verify OpenPGP signature for the Git commit HEAD points to. + // Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. // +optional Verification *GitRepositoryVerification `json:"verify,omitempty"` - // Ignore overrides the set of excluded patterns in the .sourceignore format - // (which is the same as .gitignore). If not provided, a default will be used, - // consult the documentation for your version to find out what those are. + // Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). + // If not provided, a default will be used, consult the documentation for your version to find out what those are. // +optional Ignore *string `json:"ignore,omitempty"` + // Suspend tells the controller to suspend the reconciliation of this source. // This flag tells the controller to suspend the reconciliation of this source. // +optional Suspend bool `json:"suspend,omitempty"` @@ -84,13 +106,13 @@ type GitRepositorySpec struct { // +optional GitImplementation string `json:"gitImplementation,omitempty"` - // When enabled, after the clone is created, initializes all submodules within, - // using their default settings. + // When enabled, after the clone is created, initializes all submodules within, using their default settings. // This option is available only when using the 'go-git' GitImplementation. // +optional RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` - // Extra git repositories to map into the repository + // Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for + // this resource. Include []GitRepositoryInclude `json:"include,omitempty"` // AccessFrom defines an Access Control List for allowing cross-namespace references to this object. @@ -144,11 +166,11 @@ type GitRepositoryRef struct { // GitRepositoryVerification defines the OpenPGP signature verification process. type GitRepositoryVerification struct { - // Mode describes what git object should be verified, currently ('head'). + // Mode describes what Git object should be verified, currently ('head'). // +kubebuilder:validation:Enum=head Mode string `json:"mode"` - // The secret name containing the public keys of all trusted Git authors. + // SecretRef containing the public keys of all trusted Git authors. SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"` } @@ -162,8 +184,7 @@ type GitRepositoryStatus struct { // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` - // URL is the download link for the artifact output of the last repository - // sync. + // URL is the download link for the artifact output of the last repository sync. // +optional URL string `json:"url,omitempty"` @@ -179,12 +200,10 @@ type GitRepositoryStatus struct { } const ( - // GitOperationSucceedReason represents the fact that the git clone, pull - // and checkout operations succeeded. + // GitOperationSucceedReason represents the fact that the git clone, pull and checkout operations succeeded. GitOperationSucceedReason string = "GitOperationSucceed" - // GitOperationFailedReason represents the fact that the git clone, pull or - // checkout operations failed. + // GitOperationFailedReason represents the fact that the git clone, pull or checkout operations failed. GitOperationFailedReason string = "GitOperationFailed" ) diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index 4f5de9a06..d3f644be1 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -76,7 +76,7 @@ spec: description: Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). If not provided, a default will be used, consult the documentation for your version to find out what those are. type: string include: - description: Extra git repositories to map into the repository + description: Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for this resource. items: description: GitRepositoryInclude defines a source with a from and to path. properties: @@ -122,7 +122,7 @@ spec: type: string type: object secretRef: - description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain identity, identity.pub and known_hosts fields. + description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. properties: name: description: Name of the referent @@ -131,7 +131,7 @@ spec: - name type: object suspend: - description: This flag tells the controller to suspend the reconciliation of this source. + description: Suspend tells the controller to suspend the reconciliation of this source. This flag tells the controller to suspend the reconciliation of this source. type: boolean timeout: default: 20s @@ -142,15 +142,15 @@ spec: pattern: ^(http|https|ssh):// type: string verify: - description: Verify OpenPGP signature for the Git commit HEAD points to. + description: Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to. properties: mode: - description: Mode describes what git object should be verified, currently ('head'). + description: Mode describes what Git object should be verified, currently ('head'). enum: - head type: string secretRef: - description: The secret name containing the public keys of all trusted Git authors. + description: SecretRef containing the public keys of all trusted Git authors. properties: name: description: Name of the referent From d7e0faa17dd2c0dd6add2d7b8ff3163015953205 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 12:33:18 +0200 Subject: [PATCH 37/53] Implement new runtime interfaces, prepare testenv This commit ensures all API objects implement the interfaces used by the runtime package to work with conditions, etc., and prepares the test suite to work with the `pkg/runtime/testenv` wrapper. Changes are made in a backwards compatible way (that being: the existing code can still be build and works as expected), but without proper dependency boundaries. The result of this is that the API package temporary depends on the runtime package, which is resolved when all reconcilers have been refactored and the API package does no longer contain condition modifying functions. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/go.mod | 23 +- api/go.sum | 34 ++- api/v1beta1/bucket_types.go | 33 ++- api/v1beta1/gitrepository_types.go | 33 ++- api/v1beta1/helmchart_types.go | 35 ++- api/v1beta1/helmrepository_types.go | 33 ++- .../source.toolkit.fluxcd.io_buckets.yaml | 4 +- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 8 +- .../source.toolkit.fluxcd.io_helmcharts.yaml | 2 +- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 4 +- controllers/bucket_controller.go | 14 +- controllers/gitrepository_controller.go | 23 +- controllers/helmchart_controller.go | 14 +- controllers/helmchart_controller_test.go | 116 ++++----- controllers/helmrepository_controller.go | 14 +- controllers/helmrepository_controller_test.go | 8 +- controllers/legacy_suite_test.go | 190 +++++++++++++++ controllers/storage.go | 8 +- controllers/suite_test.go | 223 +++++++----------- docs/api/source.md | 51 ++-- go.mod | 6 +- go.sum | 11 +- main.go | 24 +- 23 files changed, 552 insertions(+), 359 deletions(-) create mode 100644 controllers/legacy_suite_test.go diff --git a/api/go.mod b/api/go.mod index ce4aef76c..2af43091b 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,25 +4,46 @@ go 1.17 require ( github.com/fluxcd/pkg/apis/acl v0.0.3 - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 + // TODO(hidde): introduction of the runtime package is temporary, and the dependency should be removed as soon as + // all APIs have been updated to the runtime standards (more specifically; have dropped their condition modifying + // functions). + github.com/fluxcd/pkg/runtime v0.13.0-rc.6 k8s.io/apimachinery v0.23.1 sigs.k8s.io/controller-runtime v0.11.0 ) require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/go-logr/logr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.6 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/onsi/gomega v1.17.0 // indirect + github.com/pkg/errors v0.9.1 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect + golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect + golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.27.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + k8s.io/api v0.23.0 // indirect + k8s.io/client-go v0.23.0 // indirect k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index ffe31a8af..927fd8a67 100644 --- a/api/go.sum +++ b/api/go.sum @@ -68,6 +68,7 @@ github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiU github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= @@ -76,7 +77,9 @@ github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -118,13 +121,16 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6 h1:MsxiKYGsuRzEvyreQG5ocNaIZDwKhqvQ711/w4rTkCo= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -148,6 +154,7 @@ github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -194,6 +201,7 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -236,6 +244,7 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -250,9 +259,11 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -270,6 +281,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -309,6 +321,7 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -359,6 +372,7 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -369,21 +383,25 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -466,15 +484,18 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4 go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -584,6 +605,7 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -663,6 +685,7 @@ golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -678,6 +701,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -743,6 +767,7 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -772,6 +797,7 @@ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -852,6 +878,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -895,11 +922,13 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= +k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= @@ -908,6 +937,7 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 4df79c2e1..e4b4d6cdc 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -126,7 +127,7 @@ func BucketProgressing(bucket Bucket) Bucket { bucket.Status.ObservedGeneration = bucket.Generation bucket.Status.URL = "" bucket.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return bucket } @@ -136,14 +137,14 @@ func BucketProgressing(bucket Bucket) Bucket { func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { bucket.Status.Artifact = &artifact bucket.Status.URL = url - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) return bucket } // BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with // the given reason and message. It returns the modified Bucket. func BucketNotReady(bucket Bucket, reason, message string) Bucket { - meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) return bucket } @@ -158,22 +159,32 @@ func BucketReadyMessage(bucket Bucket) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in Bucket) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *Bucket) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *Bucket) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *Bucket) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index 9344e3763..c2cfc8e4c 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -215,7 +216,7 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -226,7 +227,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt repository.Status.Artifact = &artifact repository.Status.IncludedArtifacts = includedArtifacts repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -234,7 +235,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt // to 'False', with the given reason and message. It returns the modified // GitRepository. func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -249,22 +250,32 @@ func GitRepositoryReadyMessage(repository GitRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in GitRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *GitRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in GitRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *GitRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *GitRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *GitRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 51c04781d..78a20852b 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) // HelmChartKind is the string representation of a HelmChart. @@ -152,7 +153,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { chart.Status.ObservedGeneration = chart.Generation chart.Status.URL = "" chart.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&chart, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return chart } @@ -162,7 +163,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart { func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart { chart.Status.Artifact = &artifact chart.Status.URL = url - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&chart, meta.ReadyCondition, reason, message) return chart } @@ -170,7 +171,7 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str // 'False', with the given reason and message. It returns the modified // HelmChart. func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart { - meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&chart, meta.ReadyCondition, reason, message) return chart } @@ -185,22 +186,26 @@ func HelmChartReadyMessage(chart HelmChart) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. -func (in *HelmChart) GetArtifact() *Artifact { - return in.Status.Artifact +// GetConditions returns the status conditions of the object. +func (in HelmChart) GetConditions() []metav1.Condition { + return in.Status.Conditions } -// GetStatusConditions returns a pointer to the Status.Conditions slice -func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { - return &in.Status.Conditions +// SetConditions sets the status conditions on the object. +func (in *HelmChart) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmChart) GetInterval() metav1.Duration { +// GetInterval returns the interval at which the source is reconciled. +func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *HelmChart) GetArtifact() *Artifact { + return in.Status.Artifact +} + // GetValuesFiles returns a merged list of ValuesFiles. func (in *HelmChart) GetValuesFiles() []string { valuesFiles := in.Spec.ValuesFiles @@ -212,6 +217,12 @@ func (in *HelmChart) GetValuesFiles() []string { return valuesFiles } +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. +func (in *HelmChart) GetStatusConditions() *[]metav1.Condition { + return &in.Status.Conditions +} + // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 0af0d4cf6..65b924042 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -22,6 +22,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -113,7 +114,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { repository.Status.ObservedGeneration = repository.Generation repository.Status.URL = "" repository.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress") + conditions.MarkUnknown(&repository, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") return repository } @@ -123,7 +124,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository { func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository { repository.Status.Artifact = &artifact repository.Status.URL = url - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message) + conditions.MarkTrue(&repository, meta.ReadyCondition, reason, message) return repository } @@ -131,7 +132,7 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas // HelmRepository to 'False', with the given reason and message. It returns the // modified HelmRepository. func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository { - meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message) + conditions.MarkFalse(&repository, meta.ReadyCondition, reason, message) return repository } @@ -146,22 +147,32 @@ func HelmRepositoryReadyMessage(repository HelmRepository) string { return "" } -// GetArtifact returns the latest artifact from the source if present in the -// status sub-resource. +// GetConditions returns the status conditions of the object. +func (in HelmRepository) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetInterval returns the interval at which the source is reconciled. +func (in HelmRepository) GetInterval() metav1.Duration { + return in.Spec.Interval +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. func (in *HelmRepository) GetArtifact() *Artifact { return in.Status.Artifact } -// GetStatusConditions returns a pointer to the Status.Conditions slice +// GetStatusConditions returns a pointer to the Status.Conditions slice. +// Deprecated: use GetConditions instead. func (in *HelmRepository) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } -// GetInterval returns the interval at which the source is updated. -func (in *HelmRepository) GetInterval() metav1.Duration { - return in.Spec.Interval -} - // +genclient // +genclient:Namespaced // +kubebuilder:object:root=true diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index f613db849..7a26ead34 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -93,7 +93,7 @@ spec: description: The name of the secret containing authentication credentials for the Bucket. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -183,7 +183,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index d3f644be1..fd6c2138a 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -87,7 +87,7 @@ spec: description: Reference to a GitRepository to include. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -125,7 +125,7 @@ spec: description: The secret name containing the Git credentials. For HTTPS repositories the secret must contain username and password fields. For SSH repositories the secret must contain 'identity', 'identity.pub' and 'known_hosts' fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -153,7 +153,7 @@ spec: description: SecretRef containing the public keys of all trusted Git authors. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -264,7 +264,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index b45e88211..f0571e0b5 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -200,7 +200,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index bcce23a7f..981a1f88d 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -75,7 +75,7 @@ spec: description: The name of the secret containing authentication credentials for the Helm repository. For HTTP/S basic auth the secret must contain username and password fields. For TLS the secret must contain a certFile and keyFile, and/or caCert fields. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -167,7 +167,7 @@ spec: type: object type: array lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string observedGeneration: description: ObservedGeneration is the last observed generation. diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a25587d1a..a795453e4 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -541,21 +541,11 @@ func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, "Normal", severity, msg) + r.EventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go index 7642a1614..d2e92d5bb 100644 --- a/controllers/gitrepository_controller.go +++ b/controllers/gitrepository_controller.go @@ -122,7 +122,7 @@ func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reques // check dependencies if len(repository.Spec.Include) > 0 { if err := r.checkDependencies(repository); err != nil { - repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()) + repository = sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()) if err := r.updateStatus(ctx, req, repository.Status); err != nil { log.Error(err, "unable to update status for dependency not ready") return ctrl.Result{Requeue: true}, err @@ -284,7 +284,7 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour var gr sourcev1.GitRepository err := r.Get(context.Background(), dName, &gr) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } includedArtifacts = append(includedArtifacts, gr.GetArtifact()) } @@ -329,11 +329,11 @@ func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sour for i, incl := range repository.Spec.Include { toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath()) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath) if err != nil { - return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err + return sourcev1.GitRepositoryNotReady(repository, "DependencyNotReady", err.Error()), err } } @@ -423,22 +423,11 @@ func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error { // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) + r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmchart_controller.go b/controllers/helmchart_controller.go index e63f8e458..46da4db08 100644 --- a/controllers/helmchart_controller.go +++ b/controllers/helmchart_controller.go @@ -634,21 +634,11 @@ func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error { // event emits a Kubernetes event and forwards the event to notification // controller if configured. func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&chart, "Normal", severity, msg) + r.EventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &chart) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&chart, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go index cb9838b15..f762fcd08 100644 --- a/controllers/helmchart_controller_test.go +++ b/controllers/helmchart_controller_test.go @@ -129,9 +129,9 @@ var _ = Describe("HelmChartReconciler", func() { got := &sourcev1.HelmChart{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -146,7 +146,7 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) When("Setting valid valuesFiles attribute", func() { @@ -161,12 +161,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -184,12 +184,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -207,12 +207,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -228,12 +228,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -250,12 +250,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -271,12 +271,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -682,7 +682,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Committing a new version in the chart metadata") @@ -727,9 +727,9 @@ var _ = Describe("HelmChartReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*now.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*now.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values).ToNot(BeNil()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) @@ -744,7 +744,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Revision != updated.Status.Artifact.Revision && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) Expect(got.Status.Artifact.Revision).To(ContainSubstring(updated.Status.Artifact.Revision)) Expect(got.Status.Artifact.Revision).To(ContainSubstring(commit.String()[0:12])) @@ -762,12 +762,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -785,12 +785,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -808,12 +808,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -834,16 +834,16 @@ var _ = Describe("HelmChartReconciler", func() { // Use status condition to be sure. for _, condn := range got.Status.Conditions { if strings.Contains(condn.Message, "with merged values files [./testdata/charts/helmchart/override.yaml]") && - storage.ArtifactExist(*got.Status.Artifact) { + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) { return true } } return false }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -860,12 +860,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -970,7 +970,7 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) }) }) @@ -1213,9 +1213,9 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeFalse()) @@ -1232,12 +1232,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1255,12 +1255,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1278,12 +1278,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(helmChart.Values["testDefault"]).To(BeTrue()) Expect(helmChart.Values["testOverride"]).To(BeTrue()) @@ -1299,12 +1299,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact.Checksum != updated.Status.Artifact.Checksum && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) @@ -1321,12 +1321,12 @@ var _ = Describe("HelmChartReconciler", func() { Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) return got.Status.ObservedGeneration > updated.Status.ObservedGeneration && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) - f, err := os.Stat(storage.LocalPath(*got.Status.Artifact)) + f, err := os.Stat(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) Expect(f.Size()).To(BeNumerically(">", 0)) - helmChart, err := loader.Load(storage.LocalPath(*got.Status.Artifact)) + helmChart, err := loader.Load(ginkgoTestStorage.LocalPath(*got.Status.Artifact)) Expect(err).NotTo(HaveOccurred()) _, exists := helmChart.Values["testDefault"] Expect(exists).To(BeFalse()) diff --git a/controllers/helmrepository_controller.go b/controllers/helmrepository_controller.go index d82bdad69..a03f6b61b 100644 --- a/controllers/helmrepository_controller.go +++ b/controllers/helmrepository_controller.go @@ -335,21 +335,11 @@ func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error // event emits a Kubernetes event and forwards the event to notification controller if configured func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) { - log := ctrl.LoggerFrom(ctx) if r.EventRecorder != nil { - r.EventRecorder.Eventf(&repository, "Normal", severity, msg) + r.EventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } if r.ExternalEventRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &repository) - if err != nil { - log.Error(err, "unable to send event") - return - } - - if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil { - log.Error(err, "unable to send event") - return - } + r.ExternalEventRecorder.Eventf(&repository, corev1.EventTypeNormal, severity, msg) } } diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go index e7d945a60..69a3656e9 100644 --- a/controllers/helmrepository_controller_test.go +++ b/controllers/helmrepository_controller_test.go @@ -99,7 +99,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, got) - return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact) + return got.Status.Artifact != nil && ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Updating the chart index") @@ -112,7 +112,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { _ = k8sClient.Get(context.Background(), key, now) // Test revision change and garbage collection return now.Status.Artifact.Revision != got.Status.Artifact.Revision && - !storage.ArtifactExist(*got.Status.Artifact) + !ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) updated := &sourcev1.HelmRepository{} @@ -291,7 +291,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") @@ -385,7 +385,7 @@ var _ = Describe("HelmRepositoryReconciler", func() { got := &sourcev1.HelmRepository{} _ = k8sClient.Get(context.Background(), key, got) return got.Status.Artifact != nil && - storage.ArtifactExist(*got.Status.Artifact) + ginkgoTestStorage.ArtifactExist(*got.Status.Artifact) }, timeout, interval).Should(BeTrue()) By("Expecting missing secret error") diff --git a/controllers/legacy_suite_test.go b/controllers/legacy_suite_test.go new file mode 100644 index 000000000..4b4d1f274 --- /dev/null +++ b/controllers/legacy_suite_test.go @@ -0,0 +1,190 @@ +/* +Copyright 2020 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "math/rand" + "net/http" + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "helm.sh/helm/v3/pkg/getter" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + // +kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var k8sManager ctrl.Manager +var ginkgoTestEnv *envtest.Environment +var ginkgoTestStorage *Storage + +var examplePublicKey []byte +var examplePrivateKey []byte +var exampleCA []byte +var lctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func(done Done) { + logf.SetLogger( + zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), + ) + lctx, cancel = context.WithCancel(ctx) + + By("bootstrapping test environment") + t := true + if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { + ginkgoTestEnv = &envtest.Environment{ + UseExistingCluster: &t, + } + } else { + ginkgoTestEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + } + } + + var err error + cfg, err = ginkgoTestEnv.Start() + Expect(err).ToNot(HaveOccurred()) + Expect(cfg).ToNot(BeNil()) + + err = sourcev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + + Expect(loadExampleKeys()).To(Succeed()) + + tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") + + ginkgoTestStorage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) + Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") + // serve artifacts from the filesystem, as done in main.go + fs := http.FileServer(http.Dir(tmpStoragePath)) + http.Handle("/", fs) + go http.ListenAndServe(":5050", nil) + + k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ + MetricsBindAddress: "0", + Scheme: scheme.Scheme, + }) + Expect(err).ToNot(HaveOccurred()) + + err = (&GitRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") + + err = (&HelmRepositoryReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") + + err = (&HelmChartReconciler{ + Client: k8sManager.GetClient(), + Scheme: scheme.Scheme, + Storage: ginkgoTestStorage, + Getters: getter.Providers{getter.Provider{ + Schemes: []string{"http", "https"}, + New: getter.NewHTTPGetter, + }}, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + + go func() { + err = k8sManager.Start(lctx) + Expect(err).ToNot(HaveOccurred()) + }() + + k8sClient = k8sManager.GetClient() + Expect(k8sClient).ToNot(BeNil()) + + close(done) +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + defer GinkgoRecover() + cancel() + if ginkgoTestStorage != nil { + err := os.RemoveAll(ginkgoTestStorage.BasePath) + Expect(err).NotTo(HaveOccurred()) + } + err := ginkgoTestEnv.Stop() + Expect(err).ToNot(HaveOccurred()) +}) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func loadExampleKeys() (err error) { + examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + return err + } + examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + if err != nil { + return err + } + exampleCA, err = os.ReadFile("testdata/certs/ca.pem") + return err +} + +var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") + +func randStringRunes(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letterRunes[rand.Intn(len(letterRunes))] + } + return string(b) +} diff --git a/controllers/storage.go b/controllers/storage.go index 5c1f7be02..d58508f40 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -53,7 +53,7 @@ type Storage struct { Timeout time.Duration `json:"timeout"` } -// NewStorage creates the storage helper for a given path and hostname +// NewStorage creates the storage helper for a given path and hostname. func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) { if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() { return nil, fmt.Errorf("invalid dir path: %s", basePath) @@ -81,7 +81,11 @@ func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) { if artifact.Path == "" { return } - artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path) + format := "http://%s/%s" + if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") { + format = "%s/%s" + } + artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/")) } // SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result. diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 9520bcbb6..a1e09b69b 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -17,179 +17,128 @@ limitations under the License. package controllers import ( - "context" + "fmt" "math/rand" - "net/http" "os" "path/filepath" "testing" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "helm.sh/helm/v3/pkg/getter" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/envtest/printer" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + + "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/testenv" + "github.com/fluxcd/pkg/testserver" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" // +kubebuilder:scaffold:imports ) -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. +// These tests make use of plain Go using Gomega for assertions. +// At the beginning of every (sub)test Gomega can be initialized +// using gomega.NewWithT. +// Refer to http://onsi.github.io/gomega/ to learn more about +// Gomega. -var cfg *rest.Config -var k8sClient client.Client -var k8sManager ctrl.Manager -var testEnv *envtest.Environment -var storage *Storage +const ( + timeout = 10 * time.Second + interval = 1 * time.Second +) -var examplePublicKey []byte -var examplePrivateKey []byte -var exampleCA []byte -var ctx context.Context -var cancel context.CancelFunc +var ( + testEnv *testenv.Environment + testStorage *Storage + testServer *testserver.ArtifactServer + testMetricsH controller.Metrics + ctx = ctrl.SetupSignalHandler() +) -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) +var ( + tlsPublicKey []byte + tlsPrivateKey []byte + tlsCA []byte +) - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{printer.NewlineReporter{}}) +func init() { + rand.Seed(time.Now().UnixNano()) } -var _ = BeforeSuite(func(done Done) { - logf.SetLogger( - zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)), - ) - ctx, cancel = context.WithCancel(context.TODO()) - - By("bootstrapping test environment") - t := true - if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { - testEnv = &envtest.Environment{ - UseExistingCluster: &t, - } - } else { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - } +func TestMain(m *testing.M) { + initTestTLS() + + utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme)) + + testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases"))) var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = sourcev1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - Expect(loadExampleKeys()).To(Succeed()) - - tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-") - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir") - - storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30) - Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage") - // serve artifacts from the filesystem, as done in main.go - fs := http.FileServer(http.Dir(tmpStoragePath)) - http.Handle("/", fs) - go http.ListenAndServe(":5050", nil) - - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: scheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - - err = (&GitRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler") - - err = (&HelmRepositoryReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler") - - err = (&HelmChartReconciler{ - Client: k8sManager.GetClient(), - Scheme: scheme.Scheme, - Storage: storage, - Getters: getter.Providers{getter.Provider{ - Schemes: []string{"http", "https"}, - New: getter.NewHTTPGetter, - }}, - }).SetupWithManager(k8sManager) - Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler") + testServer, err = testserver.NewTempArtifactServer() + if err != nil { + panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err)) + } + fmt.Println("Starting the test storage server") + testServer.Start() + + testStorage, err = newTestStorage(testServer.HTTPServer) + if err != nil { + panic(fmt.Sprintf("Failed to create a test storage: %v", err)) + } + + testMetricsH = controller.MustMakeMetrics(testEnv) + + //if err := (&GitRepositoryReconciler{ + // Client: testEnv, + // Metrics: testMetricsH, + // Storage: testStorage, + //}).SetupWithManager(testEnv); err != nil { + // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) + //} go func() { - defer GinkgoRecover() - err = k8sManager.Start(ctx) - Expect(err).ToNot(HaveOccurred()) + fmt.Println("Starting the test environment") + if err := testEnv.Start(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the test environment manager: %v", err)) + } }() + <-testEnv.Manager.Elected() - k8sClient = k8sManager.GetClient() - Expect(k8sClient).ToNot(BeNil()) + code := m.Run() - close(done) -}, 60) + fmt.Println("Stopping the test environment") + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the test environment: %v", err)) + } -var _ = AfterSuite(func() { - cancel() - By("tearing down the test environment") - if storage != nil { - err := os.RemoveAll(storage.BasePath) - Expect(err).NotTo(HaveOccurred()) + fmt.Println("Stopping the storage server") + testServer.Stop() + if err := os.RemoveAll(testServer.Root()); err != nil { + panic(fmt.Sprintf("Failed to remove storage server dir: %v", err)) } - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) -func init() { - rand.Seed(time.Now().UnixNano()) + os.Exit(code) } -func loadExampleKeys() (err error) { - examplePublicKey, err = os.ReadFile("testdata/certs/server.pem") +func initTestTLS() { + var err error + tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem") + if err != nil { + panic(err) + } + tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") if err != nil { - return err + panic(err) } - examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem") + tlsCA, err = os.ReadFile("testdata/certs/ca.pem") if err != nil { - return err + panic(err) } - exampleCA, err = os.ReadFile("testdata/certs/ca.pem") - return err } -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") - -func randStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] +func newTestStorage(s *testserver.HTTPServer) (*Storage, error) { + storage, err := NewStorage(s.Root(), s.URL(), timeout) + if err != nil { + return nil, err } - return string(b) + return storage, nil } diff --git a/docs/api/source.md b/docs/api/source.md index 8cc4f8ddf..4a506c472 100644 --- a/docs/api/source.md +++ b/docs/api/source.md @@ -313,10 +313,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference <td> <em>(Optional)</em> <p>The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.</p> +For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.</p> </td> </tr> <tr> @@ -372,7 +370,7 @@ GitRepositoryVerification </td> <td> <em>(Optional)</em> -<p>Verify OpenPGP signature for the Git commit HEAD points to.</p> +<p>Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.</p> </td> </tr> <tr> @@ -384,9 +382,8 @@ string </td> <td> <em>(Optional)</em> -<p>Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.</p> +<p>Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.</p> </td> </tr> <tr> @@ -398,7 +395,8 @@ bool </td> <td> <em>(Optional)</em> -<p>This flag tells the controller to suspend the reconciliation of this source.</p> +<p>Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.</p> </td> </tr> <tr> @@ -423,8 +421,7 @@ bool </td> <td> <em>(Optional)</em> -<p>When enabled, after the clone is created, initializes all submodules within, -using their default settings. +<p>When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.</p> </td> </tr> @@ -438,7 +435,8 @@ This option is available only when using the ‘go-git’ GitImplementat </em> </td> <td> -<p>Extra git repositories to map into the repository</p> +<p>Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.</p> </td> </tr> <tr> @@ -1349,10 +1347,8 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference <td> <em>(Optional)</em> <p>The secret name containing the Git credentials. -For HTTPS repositories the secret must contain username and password -fields. -For SSH repositories the secret must contain identity, identity.pub and -known_hosts fields.</p> +For HTTPS repositories the secret must contain username and password fields. +For SSH repositories the secret must contain ‘identity’, ‘identity.pub’ and ‘known_hosts’ fields.</p> </td> </tr> <tr> @@ -1408,7 +1404,7 @@ GitRepositoryVerification </td> <td> <em>(Optional)</em> -<p>Verify OpenPGP signature for the Git commit HEAD points to.</p> +<p>Verification defines the configuration to verify the OpenPGP signature for the Git commit HEAD points to.</p> </td> </tr> <tr> @@ -1420,9 +1416,8 @@ string </td> <td> <em>(Optional)</em> -<p>Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.</p> +<p>Ignore overrides the set of excluded patterns in the .sourceignore format (which is the same as .gitignore). +If not provided, a default will be used, consult the documentation for your version to find out what those are.</p> </td> </tr> <tr> @@ -1434,7 +1429,8 @@ bool </td> <td> <em>(Optional)</em> -<p>This flag tells the controller to suspend the reconciliation of this source.</p> +<p>Suspend tells the controller to suspend the reconciliation of this source. +This flag tells the controller to suspend the reconciliation of this source.</p> </td> </tr> <tr> @@ -1459,8 +1455,7 @@ bool </td> <td> <em>(Optional)</em> -<p>When enabled, after the clone is created, initializes all submodules within, -using their default settings. +<p>When enabled, after the clone is created, initializes all submodules within, using their default settings. This option is available only when using the ‘go-git’ GitImplementation.</p> </td> </tr> @@ -1474,7 +1469,8 @@ This option is available only when using the ‘go-git’ GitImplementat </em> </td> <td> -<p>Extra git repositories to map into the repository</p> +<p>Include defines a list of GitRepository resources which artifacts should be included in the artifact produced for +this resource.</p> </td> </tr> <tr> @@ -1547,8 +1543,7 @@ string </td> <td> <em>(Optional)</em> -<p>URL is the download link for the artifact output of the last repository -sync.</p> +<p>URL is the download link for the artifact output of the last repository sync.</p> </td> </tr> <tr> @@ -1623,7 +1618,7 @@ string </em> </td> <td> -<p>Mode describes what git object should be verified, currently (‘head’).</p> +<p>Mode describes what Git object should be verified, currently (‘head’).</p> </td> </tr> <tr> @@ -1636,7 +1631,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference </em> </td> <td> -<p>The secret name containing the public keys of all trusted Git authors.</p> +<p>SecretRef containing the public keys of all trusted Git authors.</p> </td> </tr> </tbody> diff --git a/go.mod b/go.mod index 1fe12d2d4..acb9c6686 100644 --- a/go.mod +++ b/go.mod @@ -10,13 +10,14 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b - github.com/fluxcd/pkg/apis/meta v0.10.2 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 github.com/fluxcd/pkg/gittestserver v0.5.0 github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.12.3 + github.com/fluxcd/pkg/runtime v0.13.0-rc.6 github.com/fluxcd/pkg/ssh v0.2.0 + github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 github.com/fluxcd/pkg/version v0.1.0 github.com/fluxcd/source-controller/api v0.20.1 @@ -77,7 +78,6 @@ require ( github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.7.0 // indirect github.com/fluxcd/pkg/apis/acl v0.0.3 // indirect - github.com/fluxcd/pkg/testserver v0.1.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-errors/errors v1.0.1 // indirect github.com/go-git/gcfg v1.5.0 // indirect diff --git a/go.sum b/go.sum index 69b819334..816f774c5 100644 --- a/go.sum +++ b/go.sum @@ -298,8 +298,8 @@ github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8S github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc= github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU= -github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI= -github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 h1:YY6RlaHIMXawgEOJhJbSrm4NpD9fJTCWFGKgtNfQ0/g= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3/go.mod h1:ki5wJE4nuFOZt78q0RSYkrKwINgIBPynuswZhnTOSoI= github.com/fluxcd/pkg/gittestserver v0.5.0 h1:pPdaz7pUsukt4eQ+xQeNwoypOXGGOHFHnPjIHQAv0tE= github.com/fluxcd/pkg/gittestserver v0.5.0/go.mod h1:mFEF/Xrg+CjQH4VFCRCou2qZmhWKo7EYcjr7MIoX6+s= github.com/fluxcd/pkg/gitutil v0.1.0 h1:VO3kJY/CKOCO4ysDNqfdpTg04icAKBOSb3lbR5uE/IE= @@ -308,12 +308,13 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0 h1:RT0G5buw5qrzEfIIH0fklppIvPAaQF//p github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglAU+fRTO/sPPV/Kj3gQ= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.12.3 h1:h21AZ3YG5MAP7DxFF9hfKrP+vFzys2L7CkUbPFjbP/0= -github.com/fluxcd/pkg/runtime v0.12.3/go.mod h1:imJ2xYy/d4PbSinX2IefmZk+iS2c1P5fY0js8mCE4SM= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6 h1:MsxiKYGsuRzEvyreQG5ocNaIZDwKhqvQ711/w4rTkCo= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= -github.com/fluxcd/pkg/testserver v0.1.0 h1:nOYgM1HYFZNNSUFykuWDmrsxj4jQxUCvmLHWOQeqmyA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= +github.com/fluxcd/pkg/testserver v0.2.0 h1:Mj0TapmKaywI6Fi5wvt1LAZpakUHmtzWQpJNKQ0Krt4= +github.com/fluxcd/pkg/testserver v0.2.0/go.mod h1:bgjjydkXsZTeFzjz9Cr4heGANr41uTB1Aj1Q5qzuYVk= github.com/fluxcd/pkg/untar v0.1.0 h1:k97V/xV5hFrAkIkVPuv5AVhyxh1ZzzAKba/lbDfGo6o= github.com/fluxcd/pkg/untar v0.1.0/go.mod h1:aGswNyzB1mlz/T/kpOS58mITBMxMKc9tlJBH037A2HY= github.com/fluxcd/pkg/version v0.1.0 h1:v+SmCanmCB5Tj2Cx9TXlj+kNRfPGbAvirkeqsp7ZEAQ= diff --git a/main.go b/main.go index 67f00a920..18409ce85 100644 --- a/main.go +++ b/main.go @@ -123,18 +123,6 @@ func main() { helm.MaxChartSize = helmChartLimit helm.MaxChartFileSize = helmChartFileLimit - var eventRecorder *events.Recorder - if eventsAddr != "" { - var err error - if eventRecorder, err = events.NewRecorder(eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } - } - - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) - watchNamespace := "" if !watchAllNamespaces { watchNamespace = os.Getenv("RUNTIME_NAMESPACE") @@ -163,6 +151,18 @@ func main() { probes.SetupChecks(mgr, setupLog) pprof.SetupHandlers(mgr, setupLog) + var eventRecorder *events.Recorder + if eventsAddr != "" { + var err error + if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) + } + } + + metricsRecorder := metrics.NewRecorder() + crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) } From 7d752b7569e488a1431e7760c3395b21481f3845 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Fri, 30 Jul 2021 12:54:45 +0200 Subject: [PATCH 38/53] Introduce `artifactSet` to replace `hasArtifactUpdated` NOTE: Remove `hasArtifactUpdated` in the future once it's no longer used. Signed-off-by: Hidde Beydals <hello@hidde.co> --- controllers/artifact.go | 38 ++++++++++++++++++++++++ controllers/artifact_test.go | 56 ++++++++++++++++++++++-------------- 2 files changed, 73 insertions(+), 21 deletions(-) diff --git a/controllers/artifact.go b/controllers/artifact.go index 0e16fd03c..fa21bd0a4 100644 --- a/controllers/artifact.go +++ b/controllers/artifact.go @@ -1,9 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" +type artifactSet []*sourcev1.Artifact + +// Diff returns true if any of the revisions in the artifactSet does not match any of the given artifacts. +func (s artifactSet) Diff(set artifactSet) bool { + if len(s) != len(set) { + return true + } + +outer: + for _, j := range s { + for _, k := range set { + if k.HasRevision(j.Revision) { + continue outer + } + } + return true + } + return false +} + // hasArtifactUpdated returns true if any of the revisions in the current artifacts // does not match any of the artifacts in the updated artifacts +// NOTE: artifactSet is a replacement for this. Remove this once it's not used +// anywhere. func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool { if len(current) != len(updated) { return true diff --git a/controllers/artifact_test.go b/controllers/artifact_test.go index 959661615..935c93bf7 100644 --- a/controllers/artifact_test.go +++ b/controllers/artifact_test.go @@ -1,26 +1,40 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package controllers import ( "testing" - - sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestHasUpdated(t *testing.T) { +func Test_artifactSet_Diff(t *testing.T) { tests := []struct { name string - current []*sourcev1.Artifact - updated []*sourcev1.Artifact + current artifactSet + updated artifactSet expected bool }{ { - name: "not updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, no diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -28,13 +42,13 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated single", - current: []*sourcev1.Artifact{ + name: "one artifact, diff", + current: artifactSet{ { Revision: "foo", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "bar", }, @@ -42,8 +56,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "not updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, no diff", + current: artifactSet{ { Revision: "foo", }, @@ -51,7 +65,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -62,8 +76,8 @@ func TestHasUpdated(t *testing.T) { expected: false, }, { - name: "updated multiple", - current: []*sourcev1.Artifact{ + name: "multiple artifacts, diff", + current: artifactSet{ { Revision: "foo", }, @@ -71,7 +85,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -82,8 +96,8 @@ func TestHasUpdated(t *testing.T) { expected: true, }, { - name: "updated different artifact count", - current: []*sourcev1.Artifact{ + name: "different artifact count", + current: artifactSet{ { Revision: "foo", }, @@ -91,7 +105,7 @@ func TestHasUpdated(t *testing.T) { Revision: "bar", }, }, - updated: []*sourcev1.Artifact{ + updated: artifactSet{ { Revision: "foo", }, @@ -101,7 +115,7 @@ func TestHasUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := hasArtifactUpdated(tt.current, tt.updated) + result := tt.current.Diff(tt.updated) if result != tt.expected { t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected) } From d9ad8739164ca847f8278a41e8048f47374ec96a Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 21:49:03 +0530 Subject: [PATCH 39/53] source: Add `GetRequeueAfter` The problem with `GetInterval()` was that the returned type was of `metav1.Duration`, while almost anywhere it was used, a type of `time.Duration` was requested. The result of this was that we had to call `GetInterval().Duration` all the time, which would become a bit cumbersome after awhile. To prevent this, we introduce a new `GetRequeueAfter() time.Duration` method, which both results the right type, and bears a name that is easier to remember where the value is used most; while setting the `Result.RequeueAfter` during reconcile operations. The introduction of this method deprecates `GetInterval()`, which should be removed in a future MINOR release. Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 8 ++++++++ api/v1beta1/gitrepository_types.go | 8 ++++++++ api/v1beta1/helmchart_types.go | 8 ++++++++ api/v1beta1/helmrepository_types.go | 8 ++++++++ api/v1beta1/source.go | 5 +++++ 5 files changed, 37 insertions(+) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index e4b4d6cdc..b03c03a10 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -169,7 +171,13 @@ func (in *Bucket) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in Bucket) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index c2cfc8e4c..d4dcbbe85 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -260,7 +262,13 @@ func (in *GitRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in GitRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in GitRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go index 78a20852b..76ed4914b 100644 --- a/api/v1beta1/helmchart_types.go +++ b/api/v1beta1/helmchart_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -196,7 +198,13 @@ func (in *HelmChart) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmChart) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmChart) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go index 65b924042..cd585f2d0 100644 --- a/api/v1beta1/helmrepository_types.go +++ b/api/v1beta1/helmrepository_types.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -157,7 +159,13 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) { in.Status.Conditions = conditions } +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in HelmRepository) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + // GetInterval returns the interval at which the source is reconciled. +// Deprecated: use GetRequeueAfter instead. func (in HelmRepository) GetInterval() metav1.Duration { return in.Spec.Interval } diff --git a/api/v1beta1/source.go b/api/v1beta1/source.go index 5c10d00fc..f22780dd1 100644 --- a/api/v1beta1/source.go +++ b/api/v1beta1/source.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 import ( + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -29,9 +31,12 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + // GetRequeueAfter returns the duration after which the source must be reconciled again. + GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the // status sub-resource. GetArtifact() *Artifact // GetInterval returns the interval at which the source is updated. + // Deprecated: use GetRequeueAfter instead. GetInterval() metav1.Duration } From 1c25c8c3bce91f849e319e0f85eb0681fedde3dc Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 22:30:20 +0530 Subject: [PATCH 40/53] Use new events and metrics helpers in main.go Signed-off-by: Sunny <darkowlzz@protonmail.com> --- main.go | 55 +++++++++++++++++++++++++------------------------------ 1 file changed, 25 insertions(+), 30 deletions(-) diff --git a/main.go b/main.go index 18409ce85..c9e903f30 100644 --- a/main.go +++ b/main.go @@ -33,13 +33,12 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ctrl "sigs.k8s.io/controller-runtime" - crtlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics" "github.com/fluxcd/pkg/runtime/client" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" "github.com/fluxcd/pkg/runtime/leaderelection" "github.com/fluxcd/pkg/runtime/logger" - "github.com/fluxcd/pkg/runtime/metrics" "github.com/fluxcd/pkg/runtime/pprof" "github.com/fluxcd/pkg/runtime/probes" @@ -114,6 +113,7 @@ func main() { clientOptions.BindFlags(flag.CommandLine) logOptions.BindFlags(flag.CommandLine) leaderElectionOptions.BindFlags(flag.CommandLine) + flag.Parse() ctrl.SetLogger(logger.NewLogger(logOptions)) @@ -160,8 +160,7 @@ func main() { } } - metricsRecorder := metrics.NewRecorder() - crtlmetrics.Registry.MustRegister(metricsRecorder.Collectors()...) + metricsH := helper.MustMakeMetrics(mgr) if storageAdvAddr == "" { storageAdvAddr = determineAdvStorageAddr(storageAddr, setupLog) @@ -169,12 +168,11 @@ func main() { storage := mustInitStorage(storagePath, storageAdvAddr, setupLog) if err = (&controllers.GitRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.GitRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, DependencyRequeueInterval: requeueDependency, @@ -183,13 +181,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmRepositoryReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmRepositoryReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -197,13 +194,12 @@ func main() { os.Exit(1) } if err = (&controllers.HelmChartReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - Getters: getters, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + Getters: getters, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.HelmChartReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { @@ -211,12 +207,11 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: mgr.GetEventRecorderFor(controllerName), - ExternalEventRecorder: eventRecorder, - MetricsRecorder: metricsRecorder, + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Storage: storage, + EventRecorder: eventRecorder, + MetricsRecorder: metricsH.MetricsRecorder, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From f927c76c7386727a4f3a094e61f11d5c866f04ad Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 25 Nov 2021 00:03:11 +0530 Subject: [PATCH 41/53] Move Artifact conditions to conditions Also, introduce FetchFailedCondition for generic fetch failures. Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/condition_types.go | 16 ++++++++++++++++ api/v1beta1/gitrepository_types.go | 8 -------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/api/v1beta1/condition_types.go b/api/v1beta1/condition_types.go index 4077a2ab6..1a9db62ad 100644 --- a/api/v1beta1/condition_types.go +++ b/api/v1beta1/condition_types.go @@ -18,6 +18,22 @@ package v1beta1 const SourceFinalizer = "finalizers.fluxcd.io" +const ( + // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactUnavailableCondition string = "ArtifactUnavailable" + + // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + ArtifactOutdatedCondition string = "ArtifactOutdated" + + // FetchFailedCondition indicates a transient or persistent fetch failure of an upstream Source. + // If True, observations on the upstream Source revision may be impossible, and the Artifact available for the + // Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + FetchFailedCondition string = "FetchFailed" +) + const ( // URLInvalidReason represents the fact that a given source has an invalid URL. URLInvalidReason string = "URLInvalid" diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go index d4dcbbe85..145ac6bb0 100644 --- a/api/v1beta1/gitrepository_types.go +++ b/api/v1beta1/gitrepository_types.go @@ -38,10 +38,6 @@ const ( ) const ( - // ArtifactUnavailableCondition indicates there is no Artifact available for the Source. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactUnavailableCondition string = "ArtifactUnavailable" - // CheckoutFailedCondition indicates a transient or persistent checkout failure. If True, observations on the // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. @@ -55,10 +51,6 @@ const ( // exist, or does not have an Artifact. // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. IncludeUnavailableCondition string = "IncludeUnavailable" - - // ArtifactOutdatedCondition indicates the current Artifact of the Source is outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - ArtifactOutdatedCondition string = "ArtifactOutdated" ) // GitRepositorySpec defines the desired state of a Git repository. From 34b5dcfceaabde8adbf01c714029eeeb866f6ce0 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Fri, 26 Nov 2021 00:18:08 +0530 Subject: [PATCH 42/53] Add gomega matcher for artifact Signed-off-by: Sunny <darkowlzz@protonmail.com> Co-authored-by: Hidde Beydals <hello@hidde.co> --- controllers/artifact_matchers_test.go | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 controllers/artifact_matchers_test.go diff --git a/controllers/artifact_matchers_test.go b/controllers/artifact_matchers_test.go new file mode 100644 index 000000000..63ff81ced --- /dev/null +++ b/controllers/artifact_matchers_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +// MatchArtifact returns a custom matcher to check equality of a v1beta1.Artifact, the timestamp and URL are ignored. +func MatchArtifact(expected *sourcev1.Artifact) types.GomegaMatcher { + return &matchArtifact{ + expected: expected, + } +} + +type matchArtifact struct { + expected *sourcev1.Artifact +} + +func (m matchArtifact) Match(actual interface{}) (success bool, err error) { + actualArtifact, ok := actual.(*sourcev1.Artifact) + if !ok { + return false, fmt.Errorf("actual should be a pointer to an Artifact") + } + + if ok, _ := BeNil().Match(m.expected); ok { + return BeNil().Match(actual) + } + + if ok, err = Equal(m.expected.Path).Match(actualArtifact.Path); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { + return ok, err + } + if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok { + return ok, err + } + + return ok, err +} + +func (m matchArtifact) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto match\n\t%#v\n", actual, m.expected) +} + +func (m matchArtifact) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("expected\n\t%#v\nto not match\n\t%#v\n", actual, m.expected) +} From 45484efda3f2deb6c86b9fdf1b9dc5263a6e21ba Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 20 Jan 2022 01:09:39 +0530 Subject: [PATCH 43/53] api: Embed runtime.Object in Source interface Embedding runtime.Object in Source interface makes the Source type more useful to interact with k8s API machinery. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- api/v1beta1/source.go | 2 ++ api/v1beta1/zz_generated.deepcopy.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/api/v1beta1/source.go b/api/v1beta1/source.go index f22780dd1..1bf8bd975 100644 --- a/api/v1beta1/source.go +++ b/api/v1beta1/source.go @@ -20,6 +20,7 @@ import ( "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) const ( @@ -31,6 +32,7 @@ const ( // Source interface must be supported by all API types. // +k8s:deepcopy-gen=false type Source interface { + runtime.Object // GetRequeueAfter returns the duration after which the source must be reconciled again. GetRequeueAfter() time.Duration // GetArtifact returns the latest artifact from the source if present in the diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index d5e4f4892..79c2bdbb7 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ import ( "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. From c3e20875fe7140fc0a6caccee61f8499bb475d08 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Sun, 19 Dec 2021 19:41:32 +0530 Subject: [PATCH 44/53] Add internal packages error and reconcile - internal/error - Contains internal error type used across the source-controller reconcilers. - internal/reconcile - Contains helper abstractions for the controller-runtime reconcile Result type and functions to interact with the abstractions. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- internal/error/error.go | 54 ++++++++++ internal/reconcile/reconcile.go | 148 +++++++++++++++++++++++++++ internal/reconcile/reconcile_test.go | 47 +++++++++ 3 files changed, 249 insertions(+) create mode 100644 internal/error/error.go create mode 100644 internal/reconcile/reconcile.go create mode 100644 internal/reconcile/reconcile_test.go diff --git a/internal/error/error.go b/internal/error/error.go new file mode 100644 index 000000000..65d1feb62 --- /dev/null +++ b/internal/error/error.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package error + +// Stalling is the reconciliation stalled state error. It contains an error +// and a reason for the stalled condition. +type Stalling struct { + // Reason is the stalled condition reason string. + Reason string + // Err is the error that caused stalling. This can be used as the message in + // stalled condition. + Err error +} + +// Error implements error interface. +func (se *Stalling) Error() string { + return se.Err.Error() +} + +// Unwrap returns the underlying error. +func (se *Stalling) Unwrap() error { + return se.Err +} + +// Event is an error event. It can be used to construct an event to be +// recorded. +type Event struct { + Reason string + Err error +} + +// Error implements error interface. +func (ee *Event) Error() string { + return ee.Err.Error() +} + +// Unwrap returns the underlying error. +func (ee *Event) Unwrap() error { + return ee.Err +} diff --git a/internal/reconcile/reconcile.go b/internal/reconcile/reconcile.go new file mode 100644 index 000000000..f7593fcee --- /dev/null +++ b/internal/reconcile/reconcile.go @@ -0,0 +1,148 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + kuberecorder "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + "github.com/fluxcd/pkg/runtime/patch" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + serror "github.com/fluxcd/source-controller/internal/error" +) + +// Result is a type for creating an abstraction for the controller-runtime +// reconcile Result to simplify the Result values. +type Result int + +const ( + // ResultEmpty indicates a reconcile result which does not requeue. + ResultEmpty Result = iota + // ResultRequeue indicates a reconcile result which should immediately + // requeue. + ResultRequeue + // ResultSuccess indicates a reconcile result which should be + // requeued on the interval as defined on the reconciled object. + ResultSuccess +) + +// BuildRuntimeResult converts a given Result and error into the +// return values of a controller's Reconcile function. +func BuildRuntimeResult(ctx context.Context, recorder kuberecorder.EventRecorder, obj sourcev1.Source, rr Result, err error) (ctrl.Result, error) { + // NOTE: The return values can be modified based on the error type. + // For example, if an error signifies a short requeue period that's + // not equal to the requeue period of the object, the error can be checked + // and an appropriate result with the period can be returned. + // + // Example: + // if e, ok := err.(*waitError); ok { + // return ctrl.Result{RequeueAfter: e.RequeueAfter}, err + // } + + // Log and record event based on the error. + switch e := err.(type) { + case *serror.Event: + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + case *serror.Stalling: + // Stalling errors are not returned to the runtime. Log it explicitly. + ctrl.LoggerFrom(ctx).Error(e, "reconciliation stalled") + recorder.Eventf(obj, corev1.EventTypeWarning, e.Reason, e.Error()) + } + + switch rr { + case ResultRequeue: + return ctrl.Result{Requeue: true}, err + case ResultSuccess: + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err + default: + return ctrl.Result{}, err + } +} + +// ComputeReconcileResult analyzes the reconcile results (result + error), +// updates the status conditions of the object with any corrections and returns +// result patch configuration and any error to the caller. The caller is +// responsible for using the patch option to patch the object in the API server. +func ComputeReconcileResult(obj conditions.Setter, res Result, recErr error, ownedConditions []string) ([]patch.Option, error) { + // Remove reconciling condition on successful reconciliation. + if recErr == nil && res == ResultSuccess { + conditions.Delete(obj, meta.ReconcilingCondition) + } + + // Patch the object, ignoring conflicts on the conditions owned by this controller. + pOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: ownedConditions, + }, + } + + // Analyze the reconcile error. + switch t := recErr.(type) { + case *serror.Stalling: + if res == ResultEmpty { + // The current generation has been reconciled successfully and it + // has resulted in a stalled state. Return no error to stop further + // requeuing. + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + conditions.MarkStalled(obj, t.Reason, t.Error()) + return pOpts, nil + } + // NOTE: Non-empty result with stalling error indicates that the + // returned result is incorrect. + case nil: + // The reconcile didn't result in any error, we are not in stalled + // state. If a requeue is requested, the current generation has not been + // reconciled successfully. + if res != ResultRequeue { + pOpts = append(pOpts, patch.WithStatusObservedGeneration{}) + } + conditions.Delete(obj, meta.StalledCondition) + default: + // The reconcile resulted in some error, but we are not in stalled + // state. + conditions.Delete(obj, meta.StalledCondition) + } + + return pOpts, recErr +} + +// LowestRequeuingResult returns the ReconcileResult with the lowest requeue +// period. +// Weightage: +// ResultRequeue - immediate requeue (lowest) +// ResultSuccess - requeue at an interval +// ResultEmpty - no requeue +func LowestRequeuingResult(i, j Result) Result { + switch { + case i == ResultEmpty: + return j + case j == ResultEmpty: + return i + case i == ResultRequeue: + return i + case j == ResultRequeue: + return j + default: + return j + } +} diff --git a/internal/reconcile/reconcile_test.go b/internal/reconcile/reconcile_test.go new file mode 100644 index 000000000..bb0cf4c44 --- /dev/null +++ b/internal/reconcile/reconcile_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestLowestRequeuingResult(t *testing.T) { + tests := []struct { + name string + i Result + j Result + wantResult Result + }{ + {"bail,requeue", ResultEmpty, ResultRequeue, ResultRequeue}, + {"bail,requeueInterval", ResultEmpty, ResultSuccess, ResultSuccess}, + {"requeue,bail", ResultRequeue, ResultEmpty, ResultRequeue}, + {"requeue,requeueInterval", ResultRequeue, ResultSuccess, ResultRequeue}, + {"requeueInterval,requeue", ResultSuccess, ResultRequeue, ResultRequeue}, + {"requeueInterval,requeueInterval", ResultSuccess, ResultSuccess, ResultSuccess}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(LowestRequeuingResult(tt.i, tt.j)).To(Equal(tt.wantResult)) + }) + } +} From 8908d8061fe075e917cb4a319647ee57d26c2465 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Mon, 17 Jan 2022 01:36:21 +0530 Subject: [PATCH 45/53] Use runtime-events-fixes branch temporarily Signed-off-by: Sunny <darkowlzz@protonmail.com> --- go.mod | 6 +++--- go.sum | 9 ++++++--- main.go | 9 +++------ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index acb9c6686..84e6cdfce 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/fluxcd/pkg/gitutil v0.1.0 github.com/fluxcd/pkg/helmtestserver v0.4.0 github.com/fluxcd/pkg/lockedfile v0.1.0 - github.com/fluxcd/pkg/runtime v0.13.0-rc.6 + github.com/fluxcd/pkg/runtime v0.13.0-rc.6.0.20220119182053-85644747ea3d github.com/fluxcd/pkg/ssh v0.2.0 github.com/fluxcd/pkg/testserver v0.2.0 github.com/fluxcd/pkg/untar v0.1.0 @@ -174,8 +174,8 @@ require ( golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.4.2 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect - golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect - golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 // indirect golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect diff --git a/go.sum b/go.sum index 816f774c5..cdfd60673 100644 --- a/go.sum +++ b/go.sum @@ -308,8 +308,9 @@ github.com/fluxcd/pkg/helmtestserver v0.4.0 h1:RT0G5buw5qrzEfIIH0fklppIvPAaQF//p github.com/fluxcd/pkg/helmtestserver v0.4.0/go.mod h1:JOI9f3oXUFIWmMKWMBan7FjglAU+fRTO/sPPV/Kj3gQ= github.com/fluxcd/pkg/lockedfile v0.1.0 h1:YsYFAkd6wawMCcD74ikadAKXA4s2sukdxrn7w8RB5eo= github.com/fluxcd/pkg/lockedfile v0.1.0/go.mod h1:EJLan8t9MiOcgTs8+puDjbE6I/KAfHbdvIy9VUgIjm8= -github.com/fluxcd/pkg/runtime v0.13.0-rc.6 h1:MsxiKYGsuRzEvyreQG5ocNaIZDwKhqvQ711/w4rTkCo= github.com/fluxcd/pkg/runtime v0.13.0-rc.6/go.mod h1:4oKUO19TeudXrnCRnxCfMSS7EQTYpYlgfXwlQuDJ/Eg= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6.0.20220119182053-85644747ea3d h1:RllfQpqtWKw+ZkrphMDIrX9J4X4u5am58T+rjVGkzsE= +github.com/fluxcd/pkg/runtime v0.13.0-rc.6.0.20220119182053-85644747ea3d/go.mod h1:uGPudgMUNC3wu7Zoh6AgJM8WSH3VpmnzjrwkVb86d3Y= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= github.com/fluxcd/pkg/testserver v0.1.0/go.mod h1:fvt8BHhXw6c1+CLw1QFZxcQprlcXzsrL4rzXaiGM+Iw= @@ -1113,8 +1114,9 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1214,8 +1216,9 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk= golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08 h1:WecRHqgE09JBkh/584XIE6PMz5KKE/vER4izNUi30AQ= +golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/main.go b/main.go index c9e903f30..514cc34c4 100644 --- a/main.go +++ b/main.go @@ -152,12 +152,9 @@ func main() { pprof.SetupHandlers(mgr, setupLog) var eventRecorder *events.Recorder - if eventsAddr != "" { - var err error - if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { - setupLog.Error(err, "unable to create event recorder") - os.Exit(1) - } + if eventRecorder, err = events.NewRecorder(mgr, ctrl.Log, eventsAddr, controllerName); err != nil { + setupLog.Error(err, "unable to create event recorder") + os.Exit(1) } metricsH := helper.MustMakeMetrics(mgr) From 991692f5eca2a6209c5219c8bb50314e3f6ce11c Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Wed, 24 Nov 2021 22:08:52 +0530 Subject: [PATCH 46/53] bucket: Replace GetInterval() with GetRequeueAfter() Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index a795453e4..3603f6f56 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -171,10 +171,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", time.Since(start).String(), - bucket.GetInterval().Duration.String(), + bucket.GetRequeueAfter().String(), )) - return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil + return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil } func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { From 34b27a0e2fe05ed4faa149a2cc5acb73115e5f18 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Sat, 31 Jul 2021 03:58:43 +0200 Subject: [PATCH 47/53] Rewrite `BucketReconciler` to new standards This commit rewrites the `BucketReconciler` to new standards, while implementing the newly introduced Condition types, and trying to adhere better to Kubernetes API conventions. More specifically it introduces: - Implementation of more explicit Condition types to highlight abnormalities. - Extensive usage of the `conditions` subpackage from `runtime`. - Better and more conflict-resilient (status)patching of reconciled objects using the `patch` subpackage from runtime. - Proper implementation of kstatus' `Reconciling` and `Stalled` conditions. - Refactor of reconciler logic, including more efficient detection of changes to bucket objects by making use of the etag data available, and downloading of object files in parallel with a limited number of workers (4). - Integration tests that solely rely on `testenv` and do not use Ginkgo. There are a couple of TODOs marked in-code, these are suggestions for the future and should be non-blocking. In addition to the TODOs, more complex and/or edge-case test scenarios may be added as well. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 60 +- controllers/bucket_controller.go | 956 +++++++++++++++----------- controllers/bucket_controller_test.go | 677 +++++++++++++++++- controllers/suite_test.go | 10 + main.go | 9 +- 5 files changed, 1242 insertions(+), 470 deletions(-) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index b03c03a10..23a0ff1d7 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -19,12 +19,10 @@ package v1beta1 import ( "time" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/conditions" ) const ( @@ -32,6 +30,19 @@ const ( BucketKind = "Bucket" ) +const ( + GenericBucketProvider string = "generic" + AmazonBucketProvider string = "aws" + GoogleBucketProvider string = "gcp" +) + +const ( + // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the + // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. + // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. + DownloadFailedCondition string = "DownloadFailed" +) + // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). @@ -85,12 +96,6 @@ type BucketSpec struct { AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } -const ( - GenericBucketProvider string = "generic" - AmazonBucketProvider string = "aws" - GoogleBucketProvider string = "gcp" -) - // BucketStatus defines the observed state of a bucket type BucketStatus struct { // ObservedGeneration is the last observed generation. @@ -122,45 +127,6 @@ const ( BucketOperationFailedReason string = "BucketOperationFailed" ) -// BucketProgressing resets the conditions of the Bucket to metav1.Condition of -// type meta.ReadyCondition with status 'Unknown' and meta.ProgressingReason -// reason and message. It returns the modified Bucket. -func BucketProgressing(bucket Bucket) Bucket { - bucket.Status.ObservedGeneration = bucket.Generation - bucket.Status.URL = "" - bucket.Status.Conditions = []metav1.Condition{} - conditions.MarkUnknown(&bucket, meta.ReadyCondition, meta.ProgressingReason, "reconciliation in progress") - return bucket -} - -// BucketReady sets the given Artifact and URL on the Bucket and sets the -// meta.ReadyCondition to 'True', with the given reason and message. It returns -// the modified Bucket. -func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket { - bucket.Status.Artifact = &artifact - bucket.Status.URL = url - conditions.MarkTrue(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with -// the given reason and message. It returns the modified Bucket. -func BucketNotReady(bucket Bucket, reason, message string) Bucket { - conditions.MarkFalse(&bucket, meta.ReadyCondition, reason, message) - return bucket -} - -// BucketReadyMessage returns the message of the metav1.Condition of type -// meta.ReadyCondition with status 'True' if present, or an empty string. -func BucketReadyMessage(bucket Bucket) string { - if c := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); c != nil { - if c.Status == metav1.ConditionTrue { - return c.Message - } - } - return "" -} - // GetConditions returns the status conditions of the object. func (in Bucket) GetConditions() []metav1.Condition { return in.Status.Conditions diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 3603f6f56..e2621a927 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -18,24 +18,27 @@ package controllers import ( "context" - "crypto/sha1" + "crypto/sha256" "fmt" "os" "path/filepath" + "sort" "strings" "time" + gcpstorage "cloud.google.com/go/storage" + "github.com/fluxcd/source-controller/pkg/gcp" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/s3utils" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -43,10 +46,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" - "github.com/fluxcd/source-controller/pkg/gcp" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" "github.com/fluxcd/source-controller/pkg/sourceignore" @@ -60,11 +63,10 @@ import ( // BucketReconciler reconciles a Bucket object type BucketReconciler struct { client.Client - Scheme *runtime.Scheme - Storage *Storage - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + kuberecorder.EventRecorder + helper.Metrics + + Storage *Storage } type BucketReconcilerOptions struct { @@ -83,519 +85,683 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc Complete(r) } -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := ctrl.LoggerFrom(ctx) - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { + // Fetch the Bucket + obj := &sourcev1.Bucket{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } // Record suspended status metric - defer r.recordSuspension(ctx, bucket) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) { - patch := client.MergeFrom(bucket.DeepCopy()) - controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Patch(ctx, &bucket, patch); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) - // Examine if the object is under deletion - if !bucket.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, bucket) - } - - // Return early if the object is suspended. - if bucket.Spec.Suspend { + // Return early if the object is suspended + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - // record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - return ctrl.Result{}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, start) - } - - // set initial status - if resetBucket, ok := r.resetStatus(bucket); ok { - bucket = resetBucket - if err := r.updateStatus(ctx, req, bucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } - r.recordReadiness(ctx, bucket) - } - - // record the value of the reconciliation request, if any - // TODO(hidde): would be better to defer this in combination with - // always patching the status sub-resource after a reconciliation. - if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok { - bucket.Status.SetLastHandledReconcileRequest(v) - } - - // purge old artifacts from storage - if err := r.gc(bucket); err != nil { - log.Error(err, "unable to purge old artifacts") + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // reconcile bucket by downloading its content - reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy()) - - // update status with the reconciliation result - if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil { - log.Error(err, "unable to update status") - return ctrl.Result{Requeue: true}, err - } + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Summarize the Ready condition based on abnormalities that may have been observed + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + conditions.WithNegativePolarityConditions( + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.DownloadFailedCondition, + sourcev1.ArtifactUnavailableCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } + + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } + } + + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) + } + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() - // if reconciliation failed, record the failure and requeue immediately - if reconcileErr != nil { - r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error()) - r.recordReadiness(ctx, reconciledBucket) - return ctrl.Result{Requeue: true}, reconcileErr + // Add finalizer first if not exist to avoid the race condition between init and delete + if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { + controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + return ctrl.Result{Requeue: true}, nil } - // emit revision change event - if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision { - r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket)) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } - r.recordReadiness(ctx, reconciledBucket) - log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s", - time.Since(start).String(), - bucket.GetRequeueAfter().String(), - )) - - return ctrl.Result{RequeueAfter: bucket.GetRequeueAfter()}, nil + // Reconcile actual object + return r.reconcile(ctx, obj) } -func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - var err error - var sourceBucket sourcev1.Bucket - - tempDir, err := os.MkdirTemp("", bucket.Name) - if err != nil { - err = fmt.Errorf("tmp dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } - defer func() { - if err := os.RemoveAll(tempDir); err != nil { - log.Error(err, "failed to remove working directory", "path", tempDir) - } - }() +// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that +// produces an error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") - if bucket.Spec.Provider == sourcev1.GoogleBucketProvider { - sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } else { - sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir) - if err != nil { - return sourceBucket, err - } - } - revision, err := r.checksum(tempDir) - if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the storage data + if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { + return result, err } - // return early on unchanged revision - artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision)) - if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) { - if artifact.URL != bucket.GetArtifact().URL { - r.Storage.SetArtifactURL(bucket.GetArtifact()) - bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL) - } - return bucket, nil - } - - // create artifact dir - err = r.Storage.MkdirAll(artifact) + // Create temp working dir + tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - err = fmt.Errorf("mkdir dir error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) + return ctrl.Result{}, err } + defer os.RemoveAll(tmpDir) - // acquire lock - unlock, err := r.Storage.Lock(artifact) - if err != nil { - err = fmt.Errorf("unable to acquire lock: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the source from upstream + var artifact sourcev1.Artifact + if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err } - defer unlock() - // archive artifact and check integrity - if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil { - err = fmt.Errorf("storage archive error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err + // Reconcile the artifact to storage + if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { + return result, err } - // update latest symlink - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") - if err != nil { - err = fmt.Errorf("storage symlink error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err - } + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil +} - message := fmt.Sprintf("Fetched revision: %s", artifact.Revision) - return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil +// reconcileStorage ensures the current state of the storage matches the desired and previously observed state. +// +// All artifacts for the resource except for the current one are garbage collected from the storage. +// If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. +// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. +// If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect previous advertised artifact(s) from storage + _ = r.garbageCollect(ctx, obj) + + // Determine if the advertised artifact is still in storage + if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) { + obj.Status.Artifact = nil + obj.Status.URL = "" + } + + // Record that we do not have an artifact + if obj.GetArtifact() == nil { + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") + return ctrl.Result{Requeue: true}, nil + } + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) + + // Always update URLs to ensure hostname is up-to-date + // TODO(hidde): we may want to send out an event only if we notice the URL has changed + r.Storage.SetArtifactURL(obj.GetArtifact()) + obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) + + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) { - if err := r.gc(bucket); err != nil { - r.event(ctx, bucket, events.EventSeverityError, - fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error())) - // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err +// reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the +// result. +// If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret +// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { + var secret corev1.Secret + if obj.Spec.SecretRef != nil { + secretName := types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.Spec.SecretRef.Name, + } + if err := r.Get(ctx, secretName, &secret); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, + "Failed to get secret '%s': %s", secretName.String(), err.Error()) + // Return error as the world as observed may change + return ctrl.Result{}, err + } } - // Record deleted status - r.recordReadiness(ctx, bucket) - - // Remove our finalizer from the list and update it - controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer) - if err := r.Update(ctx, &bucket); err != nil { - return ctrl.Result{}, err + switch obj.Spec.Provider { + case sourcev1.GoogleBucketProvider: + return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + default: + return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) } - - // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil } -// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket -// using a gcp client -func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - log := ctrl.LoggerFrom(ctx) - gcpClient, err := r.authGCP(ctx, bucket) +// reconcileMinioSource ensures the upstream Minio client compatible bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + // Build the client with the configuration from the object and secret + s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to construct S3 client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } - defer gcpClient.Close(log) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } - // Look for file with ignore rules first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { - if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Look for file with ignore rules first + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { + if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil) - // download bucket content - for { - object, err := objects.Next() - if err == gcp.IteratorDone { - break - } - if err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + for object := range s3Client.ListObjects(ctxTimeout, obj.Spec.BucketName, minio.ListObjectsOptions{ + Recursive: true, + UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), + }) { + if err = object.Err; err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { + // Ignore directories and the .sourceignore file + if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { continue } - - if matcher.Match(strings.Split(object.Name, "/"), false) { + // Ignore matches + if matcher.Match(strings.Split(object.Key, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Name) - if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + index[object.Key] = object.ETag + } + + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } + + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath, minio.GetObjectOptions{}); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return sourcev1.Bucket{}, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// reconcileWithMinio handles getting objects from an S3 compatible bucket -// using a minio client -func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) { - s3Client, err := r.authMinio(ctx, bucket) +// reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the +// declared configuration, and observes its state. +// +// The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into +// account. In case of an error during the download process (including transient errors), it records +// v1beta1.DownloadFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. +// If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, + secret *corev1.Secret, dir string) (ctrl.Result, error) { + gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - err = fmt.Errorf("auth error: %w", err) - return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to construct GCP client: %s", err.Error()) + // Return error as the contents of the secret may change + return ctrl.Result{}, err } + defer gcpClient.Close(ctrl.LoggerFrom(ctx)) - ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration) + // Confirm bucket exists + ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() - - exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName) + exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } if !exists { - err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Bucket '%s' does not exist", obj.Spec.BucketName) + return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) } // Look for file with ignore rules first - // NB: S3 has flat filepath keys making it impossible to look - // for files in "subdirectories" without building up a tree first. - path := filepath.Join(tempDir, sourceignore.IgnoreFile) - if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { - if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + path := filepath.Join(dir, sourceignore.IgnoreFile) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { + if err != gcpstorage.ErrObjectNotExist { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) + return ctrl.Result{}, err } // In-spec patterns take precedence - if bucket.Spec.Ignore != nil { - ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...) + if obj.Spec.Ignore != nil { + ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), nil)...) } matcher := sourceignore.NewMatcher(ps) - // download bucket content - for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{ - Recursive: true, - UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), - }) { - if object.Err != nil { - err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err + // Build up an index of object keys and their etags + // As the keys define the paths and the etags represent a change in file contents, this should be sufficient to + // detect both structural and file changes + var index = make(etagIndex) + objects := gcpClient.ListObjects(ctxTimeout, obj.Spec.BucketName, nil) + for { + object, err := objects.Next() + if err != nil { + if err == gcp.IteratorDone { + break + } + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) + return ctrl.Result{}, err } - if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile { + if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { continue } - if matcher.Match(strings.Split(object.Key, "/"), false) { + if matcher.Match(strings.Split(object.Name, "/"), false) { continue } - localPath := filepath.Join(tempDir, object.Key) - err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{}) - if err != nil { - err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err) - return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err - } + index[object.Name] = object.Etag } - return sourcev1.Bucket{}, nil -} -// authGCP creates a new Google Cloud Platform storage client -// to interact with the storage service. -func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) { - var client *gcp.GCPClient - var err error - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, - } + // Calculate revision checksum from the collected index values + revision, err := index.Revision() + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") + return ctrl.Result{}, err + } - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) - } - if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { - return nil, err - } - client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) - if err != nil { - return nil, err - } - } else { - client, err = gcp.NewClient(ctx) - if err != nil { - return nil, err + if !obj.GetArtifact().HasRevision(revision) { + // Mark observations about the revision on the object + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", + "New upstream revision '%s'", revision) + + // Download the files in parallel, but with a limited number of workers + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + const workers = 4 + sem := semaphore.NewWeighted(workers) + for key := range index { + k := key + if err := sem.Acquire(groupCtx, 1); err != nil { + return err + } + group.Go(func() error { + defer sem.Release(1) + localPath := filepath.Join(dir, k) + if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, k, localPath); err != nil { + return fmt.Errorf("failed to get '%s' file: %w", k, err) + } + return nil + }) + } + return nil + }) + if err = group.Wait(); err != nil { + conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, + "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) + return ctrl.Result{}, err } + r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - return client, nil + conditions.Delete(obj, sourcev1.DownloadFailedCondition) + // Create potential new artifact + *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// authMinio creates a new Minio client to interact with S3 -// compatible storage services. -func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) { - opt := minio.Options{ - Region: bucket.Spec.Region, - Secure: !bucket.Spec.Insecure, - } - - if bucket.Spec.SecretRef != nil { - secretName := types.NamespacedName{ - Namespace: bucket.GetNamespace(), - Name: bucket.Spec.SecretRef.Name, +// reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the +// given data. +// +// The inspection of the given data to the object is differed, ensuring any stale observations as +// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. +// If the given artifact does not differ from the object's current, it returns early. +// On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is +// updated to its path. +// +// The caller should assume a failure if an error is returned, or the Result is zero. +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { + // Always restore the Ready condition in case it got removed due to a transient error + defer func() { + if obj.GetArtifact() != nil { + conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) } - - var secret corev1.Secret - if err := r.Get(ctx, secretName, &secret); err != nil { - return nil, fmt.Errorf("credentials secret error: %w", err) + if obj.GetArtifact().HasRevision(artifact.Revision) { + conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) + conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, + "Stored artifact for revision '%s'", artifact.Revision) } + }() - accesskey := "" - secretkey := "" - if k, ok := secret.Data["accesskey"]; ok { - accesskey = string(k) - } - if k, ok := secret.Data["secretkey"]; ok { - secretkey = string(k) - } - if accesskey == "" || secretkey == "" { - return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) - } - opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "") - } else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider { - opt.Creds = credentials.NewIAM("") + // The artifact is up-to-date + if obj.GetArtifact().HasRevision(artifact.Revision) { + ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } - if opt.Creds == nil { - return nil, fmt.Errorf("no bucket credentials found") + // Ensure target path exists and is a directory + if f, err := os.Stat(dir); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") + return ctrl.Result{}, err + } else if !f.IsDir() { + err := fmt.Errorf("source path '%s' is not a directory", dir) + ctrl.LoggerFrom(ctx).Error(err, "invalid target path") + return ctrl.Result{}, err } - return minio.New(bucket.Spec.Endpoint, &opt) -} - -// checksum calculates the SHA1 checksum of the given root directory. -// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the -// list with relative file paths and their checksums. -func (r *BucketReconciler) checksum(root string) (string, error) { - sum := sha1.New() - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.Mode().IsRegular() { - return nil - } - data, err := os.ReadFile(path) - if err != nil { - return err - } - relPath, err := filepath.Rel(root, path) - if err != nil { - return err - } - sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath))) - return nil - }); err != nil { - return "", err + // Ensure artifact directory exists and acquire lock + if err := r.Storage.MkdirAll(artifact); err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") + return ctrl.Result{}, err } - return fmt.Sprintf("%x", sum.Sum(nil)), nil -} - -// resetStatus returns a modified v1beta1.Bucket and a boolean indicating -// if the status field has been reset. -func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) { - // We do not have an artifact, or it does no longer exist - if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) { - bucket = sourcev1.BucketProgressing(bucket) - bucket.Status.Artifact = nil - return bucket, true + unlock, err := r.Storage.Lock(artifact) + if err != nil { + ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") + return ctrl.Result{}, err } - if bucket.Generation != bucket.Status.ObservedGeneration { - return sourcev1.BucketProgressing(bucket), true + defer unlock() + + // Archive directory to storage + if err := r.Storage.Archive(&artifact, dir, nil); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Unable to archive artifact to storage: %s", err) + return ctrl.Result{}, err } - return bucket, false -} + r.AnnotatedEventf(obj, map[string]string{ + "revision": artifact.Revision, + "checksum": artifact.Checksum, + }, corev1.EventTypeNormal, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) -// gc performs a garbage collection for the given v1beta1.Bucket. -// It removes all but the current artifact except for when the -// deletion timestamp is set, which will result in the removal of -// all artifacts for the resource. -func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error { - if !bucket.DeletionTimestamp.IsZero() { - return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*")) + // Record it on the object + obj.Status.Artifact = artifact.DeepCopy() + + // Update symlink on a "best effort" basis + url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + if err != nil { + r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "Failed to update status URL symlink: %s", err) } - if bucket.GetArtifact() != nil { - return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact()) + if url != "" { + obj.Status.URL = url } - return nil + return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil } -// event emits a Kubernetes event and forwards the event to notification controller if configured -func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) { - if r.EventRecorder != nil { - r.EventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) - } - if r.ExternalEventRecorder != nil { - r.ExternalEventRecorder.Eventf(&bucket, corev1.EventTypeNormal, severity, msg) +// reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the +// artifact storage, if successful, the finalizer is removed from the object. +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { + // Garbage collect the resource's artifacts + if err := r.garbageCollect(ctx, obj); err != nil { + // Return the error so we retry the failed garbage collection + return ctrl.Result{}, err } + + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) + + // Stop reconciliation as the object is being deleted + return ctrl.Result{}, nil } -func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) { - log := ctrl.LoggerFrom(ctx) - if r.MetricsRecorder == nil { - return - } - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record readiness metric") - return +// garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current +// artifact except for when the deletion timestamp is set, which will result in the removal of all artifacts for the +// resource. +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { + if !obj.DeletionTimestamp.IsZero() { + if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { + r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", + "Garbage collection for deleted resource failed: %s", err) + return err + } + obj.Status.Artifact = nil + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "Garbage collected artifacts for deleted resource") + return nil } - if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !bucket.DeletionTimestamp.IsZero()) + if obj.GetArtifact() != nil { + if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) + return err + } + // TODO(hidde): we should only push this event if we actually garbage collected something + r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "Garbage collected old artifacts") } + return nil } -func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) { - if r.MetricsRecorder == nil { - return +// buildMinioClient constructs a minio.Client with the data from the given object and Secret. +// It returns an error if the Secret does not have the required fields, or if there is no credential handler +// configured. +func (r *BucketReconciler) buildMinioClient(obj *sourcev1.Bucket, secret *corev1.Secret) (*minio.Client, error) { + opts := minio.Options{ + Region: obj.Spec.Region, + Secure: !obj.Spec.Insecure, } - log := ctrl.LoggerFrom(ctx) - - objRef, err := reference.GetReference(r.Scheme, &bucket) - if err != nil { - log.Error(err, "unable to record suspended metric") - return + if secret != nil { + var accessKey, secretKey string + if k, ok := secret.Data["accesskey"]; ok { + accessKey = string(k) + } + if k, ok := secret.Data["secretkey"]; ok { + secretKey = string(k) + } + if accessKey == "" || secretKey == "" { + return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name) + } + opts.Creds = credentials.NewStaticV4(accessKey, secretKey, "") + } else if obj.Spec.Provider == sourcev1.AmazonBucketProvider { + opts.Creds = credentials.NewIAM("") } + return minio.New(obj.Spec.Endpoint, &opts) +} - if !bucket.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) +// buildGCPClient constructs a gcp.GCPClient with the data from the given Secret. +// It returns an error if the Secret does not have the required field, or if the client construction fails. +func (r *BucketReconciler) buildGCPClient(ctx context.Context, secret *corev1.Secret) (*gcp.GCPClient, error) { + var client *gcp.GCPClient + var err error + if secret != nil { + if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil { + return nil, err + } + client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"])) + if err != nil { + return nil, err + } } else { - r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend) + client, err = gcp.NewClient(ctx) + if err != nil { + return nil, err + } } + return client, nil } -func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error { - var bucket sourcev1.Bucket - if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil { - return err - } - - patch := client.MergeFrom(bucket.DeepCopy()) - bucket.Status = newStatus +// etagIndex is an index of bucket keys and their Etag values. +type etagIndex map[string]string - return r.Status().Patch(ctx, &bucket, patch) +// Revision calculates the SHA256 checksum of the index. +// The keys are sorted to ensure a stable order, and the SHA256 sum is then calculated for the string representations of +// the key/value pairs, each pair written on a newline +// The sum result is returned as a string. +func (i etagIndex) Revision() (string, error) { + keyIndex := make([]string, 0, len(i)) + for k := range i { + keyIndex = append(keyIndex, k) + } + sort.Strings(keyIndex) + sum := sha256.New() + for _, k := range keyIndex { + if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i[k]))); err != nil { + return "", err + } + } + return fmt.Sprintf("%x", sum.Sum(nil)), nil } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 01ff20d87..b5c9debeb 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -17,59 +17,573 @@ limitations under the License. package controllers import ( + "context" + "crypto/md5" + "fmt" + "net/http" + "net/http/httptest" + "net/url" "os" + "path" "path/filepath" + "strings" "testing" + "time" + + "github.com/go-logr/logr" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + + sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) -func TestBucketReconciler_checksum(t *testing.T) { +func TestBucketReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + + s3Server := newS3Server("test-bucket") + s3Server.Objects = []*s3MockObject{ + { + Key: "test.yaml", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + } + s3Server.Start() + defer s3Server.Stop() + + g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(s3Server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + }, + } + g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) + defer testEnv.Delete(ctx, secret) + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "bucket-reconcile-", + Namespace: "default", + }, + Spec: sourcev1.BucketSpec{ + Provider: "generic", + BucketName: s3Server.BucketName, + Endpoint: u.Host, + Insecure: true, + Interval: metav1.Duration{Duration: interval}, + Timeout: &metav1.Duration{Duration: timeout}, + SecretRef: &meta.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + g.Expect(testEnv.Create(ctx, obj)).To(Succeed()) + + key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace} + + // Wait for finalizer to be set + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + return len(obj.Finalizers) > 0 + }, timeout).Should(BeTrue()) + + // Wait for Bucket to be Ready + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return false + } + if !conditions.IsReady(obj) || obj.Status.Artifact == nil { + return false + } + readyCondition := conditions.Get(obj, meta.ReadyCondition) + return obj.Generation == readyCondition.ObservedGeneration && + obj.Generation == obj.Status.ObservedGeneration + }, timeout).Should(BeTrue()) + + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) + + // Wait for Bucket to be deleted + g.Eventually(func() bool { + if err := testEnv.Get(ctx, key, obj); err != nil { + return apierrors.IsNotFound(err) + } + return false + }, timeout).Should(BeTrue()) +} + +func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { - name string - beforeFunc func(root string) - want string - wantErr bool + name string + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error + want ctrl.Result + wantErr bool + assertArtifact *sourcev1.Artifact + assertConditions []metav1.Condition + assertPaths []string }{ { - name: "empty root", - want: "da39a3ee5e6b4b0d3255bfef95601890afd80709", + name: "garbage collects", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + revisions := []string{"a", "b", "c"} + for n := range revisions { + v := revisions[n] + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/%s.txt", v), + Revision: v, + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0644); err != nil { + return err + } + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/c.txt", + Revision: "c", + Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + URL: testStorage.Hostname + "/reconcile-storage/c.txt", + }, + assertPaths: []string{ + "/reconcile-storage/c.txt", + "!/reconcile-storage/b.txt", + "!/reconcile-storage/a.txt", + }, + }, + { + name: "notices missing artifact in storage", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/invalid.txt"), + Revision: "d", + } + testStorage.SetArtifactURL(obj.Status.Artifact) + return nil + }, + want: ctrl.Result{Requeue: true}, + assertPaths: []string{ + "!/reconcile-storage/invalid.txt", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + }, + }, + { + name: "updates hostname on diff from current", + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { + obj.Status.Artifact = &sourcev1.Artifact{ + Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: "http://outdated.com/reconcile-storage/hostname.txt", + } + if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { + return err + } + if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0644); err != nil { + return err + } + return nil + }, + assertPaths: []string{ + "/reconcile-storage/hostname.txt", + }, + assertArtifact: &sourcev1.Artifact{ + Path: "/reconcile-storage/hostname.txt", + Revision: "f", + Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + obj := &sourcev1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-", + }, + } + if tt.beforeFunc != nil { + g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) + } + + got, err := r.reconcileStorage(context.TODO(), obj) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact)) + if tt.assertArtifact != nil && tt.assertArtifact.URL != "" { + g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL)) + } + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + for _, p := range tt.assertPaths { + absoluteP := filepath.Join(testStorage.BasePath, p) + if !strings.HasPrefix(p, "!") { + g.Expect(absoluteP).To(BeAnExistingFile()) + continue + } + g.Expect(absoluteP).NotTo(BeAnExistingFile()) + } + }) + } +} + +func TestBucketReconciler_reconcileMinioSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*s3MockObject + middleware http.Handler + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, + // TODO(hidde): middleware for mock server + //{ + // name: "authenticates using secretRef", + // bucketName: "dummy", + //}, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + // TODO(hidde): test the lesser happy paths + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", + Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + }, }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, + } + + var server *s3MockServer + if tt.bucketName != "" { + server = newS3Server(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + u, err := url.Parse(server.HTTPAddress()) + g.Expect(err).NotTo(HaveOccurred()) + + obj.Spec.BucketName = tt.bucketName + obj.Spec.Endpoint = u.Host + // TODO(hidde): also test TLS + obj.Spec.Insecure = true + } + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func TestBucketReconciler_reconcileArtifact(t *testing.T) { + tests := []struct { + name string + artifact sourcev1.Artifact + beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + want ctrl.Result + wantErr bool + assertConditions []metav1.Condition + }{ { - name: "with file", - beforeFunc: func(root string) { - mockFile(root, "a/b/c.txt", "a dummy string") + name: "artifact revision up-to-date", + artifact: sourcev1.Artifact{ + Revision: "existing", + }, + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Status.Artifact = &artifact + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, - want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0", }, { - name: "with file in different path", - beforeFunc: func(root string) { - mockFile(root, "a/b.txt", "a dummy string") + name: "dir path deleted", + beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + _ = os.RemoveAll(dir) }, - want: "e28c62b5cc488849950c4355dddc5523712616d4", + wantErr: true, }, + //{ + // name: "dir path empty", + //}, + //{ + // name: "success", + // artifact: sourcev1.Artifact{ + // Revision: "existing", + // }, + // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // obj.Status.Artifact = &artifact + // }, + // assertConditions: []metav1.Condition{ + // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), + // }, + //}, + //{ + // name: "symlink", + //}, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - root, err := os.MkdirTemp("", "bucket-checksum-") - if err != nil { - t.Fatal(err) + g := NewWithT(t) + + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + Timeout: &metav1.Duration{Duration: timeout}, + }, } - defer os.RemoveAll(root) + if tt.beforeFunc != nil { - tt.beforeFunc(root) + tt.beforeFunc(obj, tt.artifact, tmpDir) } - got, err := (&BucketReconciler{}).checksum(root) + + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + + dlog := log.NewDelegatingLogSink(log.NullLogSink{}) + nullLogger := logr.New(dlog) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, tt.artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + +func Test_etagIndex_Revision(t *testing.T) { + tests := []struct { + name string + list etagIndex + want string + wantErr bool + }{ + { + name: "index with items", + list: map[string]string{ + "one": "one", + "two": "two", + "three": "three", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "index with items in different order", + list: map[string]string{ + "three": "three", + "one": "one", + "two": "two", + }, + want: "8afaa9c32d7c187e8acaeffe899226011001f67c095519cdd8b4c03487c5b8bc", + }, + { + name: "empty index", + list: map[string]string{}, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + { + name: "nil index", + list: nil, + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.list.Revision() if (err != nil) != tt.wantErr { - t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { - t.Errorf("checksum() got = %v, want %v", got, tt.want) + t.Errorf("revision() got = %v, want %v", got, tt.want) } }) } } +// helpers + func mockFile(root, path, content string) error { filePath := filepath.Join(root, path) if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { @@ -80,3 +594,120 @@ func mockFile(root, path, content string) error { } return nil } + +type s3MockObject struct { + Key string + LastModified time.Time + ContentType string + Content []byte +} + +type s3MockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Objects []*s3MockObject +} + +func newS3Server(bucketName string) *s3MockServer { + s := &s3MockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle(fmt.Sprintf("/%s/", s.BucketName), http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (s *s3MockServer) Start() { + s.srv.Start() +} + +func (s *s3MockServer) Stop() { + s.srv.Close() +} + +func (s *s3MockServer) HTTPAddress() string { + return s.srv.URL +} + +func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { + key := path.Base(r.URL.Path) + + switch key { + case s.BucketName: + w.Header().Add("Content-Type", "application/xml") + + if r.Method == http.MethodHead { + return + } + + q := r.URL.Query() + + if q["location"] != nil { + fmt.Fprint(w, ` +<?xml version="1.0" encoding="UTF-8"?> +<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">Europe</LocationConstraint> + `) + return + } + + contents := "" + for _, o := range s.Objects { + etag := md5.Sum(o.Content) + contents += fmt.Sprintf(` + <Contents> + <Key>%s</Key> + <LastModified>%s</LastModified> + <Size>%d</Size> + <ETag>"%b"</ETag> + <StorageClass>STANDARD</StorageClass> + </Contents>`, o.Key, o.LastModified.UTC().Format(time.RFC3339), len(o.Content), etag) + } + + fmt.Fprintf(w, ` +<?xml version="1.0" encoding="UTF-8"?> +<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> + <Name>%s</Name> + <Prefix/> + <Marker/> + <KeyCount>%d</KeyCount> + <MaxKeys>1000</MaxKeys> + <IsTruncated>false</IsTruncated> + %s +</ListBucketResult> + `, s.BucketName, len(s.Objects), contents) + default: + key, err := filepath.Rel("/"+s.BucketName, r.URL.Path) + if err != nil { + w.WriteHeader(500) + return + } + + var found *s3MockObject + for _, o := range s.Objects { + if key == o.Key { + found = o + } + } + if found == nil { + w.WriteHeader(404) + return + } + + etag := md5.Sum(found.Content) + lastModified := strings.Replace(found.LastModified.UTC().Format(time.RFC1123), "UTC", "GMT", 1) + + w.Header().Add("Content-Type", found.ContentType) + w.Header().Add("Last-Modified", lastModified) + w.Header().Add("ETag", fmt.Sprintf("\"%b\"", etag)) + w.Header().Add("Content-Length", fmt.Sprintf("%d", len(found.Content))) + + if r.Method == http.MethodHead { + return + } + + w.Write(found.Content) + } +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index a1e09b69b..02426e4ed 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "github.com/fluxcd/pkg/runtime/controller" @@ -95,6 +96,15 @@ func TestMain(m *testing.M) { // panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err)) //} + if err := (&BucketReconciler{ + Client: testEnv, + EventRecorder: record.NewFakeRecorder(32), + Metrics: testMetricsH, + Storage: testStorage, + }).SetupWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err)) + } + go func() { fmt.Println("Starting the test environment") if err := testEnv.Start(ctx); err != nil { diff --git a/main.go b/main.go index 514cc34c4..739e4dd5f 100644 --- a/main.go +++ b/main.go @@ -204,11 +204,10 @@ func main() { os.Exit(1) } if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Storage: storage, - EventRecorder: eventRecorder, - MetricsRecorder: metricsH.MetricsRecorder, + Client: mgr.GetClient(), + EventRecorder: eventRecorder, + Metrics: metricsH, + Storage: storage, }).SetupWithManagerAndOptions(mgr, controllers.BucketReconcilerOptions{ MaxConcurrentReconciles: concurrent, }); err != nil { From 3105ba924117b8abfa28726f551da38b0ad89840 Mon Sep 17 00:00:00 2001 From: Hidde Beydals <hello@hidde.co> Date: Mon, 9 Aug 2021 13:24:49 +0200 Subject: [PATCH 48/53] Consolidate condition types into `FetchFailed` This commit consolidates the `DownloadFailed` and `CheckoutFailed` Condition types into a new more generic `FetchFailed` type to simplify the API and observations by consumers. Signed-off-by: Hidde Beydals <hello@hidde.co> --- api/v1beta1/bucket_types.go | 7 ---- controllers/bucket_controller.go | 46 +++++++++++++-------------- controllers/bucket_controller_test.go | 8 ++--- 3 files changed, 27 insertions(+), 34 deletions(-) diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index 23a0ff1d7..9ab5d09a0 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -36,13 +36,6 @@ const ( GoogleBucketProvider string = "gcp" ) -const ( - // DownloadFailedCondition indicates a transient or persistent download failure. If True, observations on the - // upstream Source revision are not possible, and the Artifact available for the Source may be outdated. - // This is a "negative polarity" or "abnormal-true" type, and is only present on the resource if it is True. - DownloadFailedCondition string = "DownloadFailed" -) - // BucketSpec defines the desired state of an S3 compatible bucket type BucketSpec struct { // The S3 compatible storage provider name, default ('generic'). diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index e2621a927..c0b380429 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -122,12 +122,12 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res meta.ReadyCondition, conditions.WithConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), conditions.WithNegativePolarityConditions( sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, ), ) @@ -137,7 +137,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res patch.WithOwnedConditions{ Conditions: []string{ sourcev1.ArtifactOutdatedCondition, - sourcev1.DownloadFailedCondition, + sourcev1.FetchFailedCondition, sourcev1.ArtifactUnavailableCondition, meta.ReadyCondition, meta.ReconcilingCondition, @@ -259,7 +259,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret -// fails, it records v1beta1.DownloadFailedCondition=True and returns early. +// fails, it records v1beta1.FetchFailedCondition=True and returns early. // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { @@ -270,7 +270,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu Name: obj.Spec.SecretRef.Name, } if err := r.Get(ctx, secretName, &secret); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) @@ -292,8 +292,8 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // // The bucket contents are downloaded to the given dir using the defined configuration, while taking ignore rules into // account. In case of an error during the download process (including transient errors), it records -// v1beta1.DownloadFailedCondition=True and returns early. -// On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to +// v1beta1.FetchFailedCondition=True and returns early. +// On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. // @@ -303,7 +303,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: %s", err.Error()) @@ -316,12 +316,12 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source defer cancel() exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -332,7 +332,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source path := filepath.Join(dir, sourceignore.IgnoreFile) if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -341,7 +341,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -362,7 +362,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), }) { if err = object.Err; err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -415,7 +415,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -424,7 +424,7 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) @@ -446,7 +446,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 secret *corev1.Secret, dir string) (ctrl.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: %s", err.Error()) @@ -460,12 +460,12 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 defer cancel() exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) return ctrl.Result{}, err } if !exists { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Bucket '%s' does not exist", obj.Spec.BucketName) @@ -476,7 +476,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 path := filepath.Join(dir, sourceignore.IgnoreFile) if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if err != gcpstorage.ErrObjectNotExist { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -485,7 +485,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) @@ -508,7 +508,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err == gcp.IteratorDone { break } - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) @@ -560,7 +560,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) @@ -569,7 +569,7 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } - conditions.Delete(obj, sourcev1.DownloadFailedCondition) + conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index b5c9debeb..c08f0d1db 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -307,7 +307,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -325,7 +325,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), }, }, { @@ -336,7 +336,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), }, }, { @@ -347,7 +347,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.DownloadFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), }, }, { From cf7dca07711422b15e70553aa3b583d6707dbbfc Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Tue, 10 Aug 2021 03:48:11 +0530 Subject: [PATCH 49/53] BucketReconciler: Add reconcileArtifact tests Add `BucketReconciler.reconcileArtifact` tests based on `GitRepositoryReconciler.reconcileArtifact` test cases. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller_test.go | 145 +++++++++++++++++++------- 1 file changed, 105 insertions(+), 40 deletions(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index c08f0d1db..4c2060724 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -165,7 +165,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", - Checksum: "84a516841ba77a5b4648de2cd0dfcb30ea46dbb4", + Checksum: "2e7d2c03a9507ae265ecf5b5356885a53393a2029d241394997265a1a25aefc6", URL: testStorage.Hostname + "/reconcile-storage/c.txt", }, assertPaths: []string{ @@ -198,7 +198,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { obj.Status.Artifact = &sourcev1.Artifact{ Path: fmt.Sprintf("/reconcile-storage/hostname.txt"), Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: "http://outdated.com/reconcile-storage/hostname.txt", } if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil { @@ -215,7 +215,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", - Checksum: "971c419dd609331343dee105fffd0f4608dc0bf2", + Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80", URL: testStorage.Hostname + "/reconcile-storage/hostname.txt", }, }, @@ -444,57 +444,114 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } func TestBucketReconciler_reconcileArtifact(t *testing.T) { + // testChecksum is the checksum value of the artifacts created in this + // test. + const testChecksum = "4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1" + tests := []struct { name string - artifact sourcev1.Artifact - beforeFunc func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) want ctrl.Result wantErr bool assertConditions []metav1.Condition }{ { - name: "artifact revision up-to-date", - artifact: sourcev1.Artifact{ - Revision: "existing", + name: "Archiving artifact to storage makes Ready=True", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Up-to-date artifact should not update status", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + obj.Status.Artifact = artifact.DeepCopy() + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(obj.Status.URL).To(BeEmpty()) + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactUnavailableCondition after creating artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Removes ArtifactOutdatedCondition after creating a new artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") + }, + want: ctrl.Result{RequeueAfter: interval}, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + }, + }, + { + name: "Creates latest symlink to the created artifact", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + obj.Spec.Interval = metav1.Duration{Duration: interval} }, - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Status.Artifact = &artifact + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + localPath := testStorage.LocalPath(*obj.GetArtifact()) + symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") + targetFile, err := os.Readlink(symlinkPath) + t.Expect(err).NotTo(HaveOccurred()) + t.Expect(localPath).To(Equal(targetFile)) }, + want: ctrl.Result{RequeueAfter: interval}, assertConditions: []metav1.Condition{ *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), }, }, { - name: "dir path deleted", - beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - _ = os.RemoveAll(dir) + name: "Dir path deleted", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + }, + wantErr: true, + }, + { + name: "Dir path is not a directory", + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { + // Remove the given directory and create a file for the same + // path. + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) + f, err := os.Create(dir) + defer f.Close() + t.Expect(err).ToNot(HaveOccurred()) + }, + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { + t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, wantErr: true, }, - //{ - // name: "dir path empty", - //}, - //{ - // name: "success", - // artifact: sourcev1.Artifact{ - // Revision: "existing", - // }, - // beforeFunc: func(obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - // obj.Status.Artifact = &artifact - // }, - // assertConditions: []metav1.Condition{ - // *conditions.TrueCondition(sourcev1.ArtifactAvailableCondition, meta.SucceededReason, "Compressed source to artifact with revision 'existing'"), - // }, - //}, - //{ - // name: "symlink", - //}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-artifact-") g.Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll(tmpDir) @@ -504,30 +561,38 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { Kind: sourcev1.BucketKind, }, ObjectMeta: metav1.ObjectMeta{ - Name: "test-bucket", + GenerateName: "test-bucket-", + Generation: 1, + Namespace: "default", }, Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } - if tt.beforeFunc != nil { - tt.beforeFunc(obj, tt.artifact, tmpDir) - } + artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz") + artifact.Checksum = testChecksum - r := &BucketReconciler{ - EventRecorder: record.NewFakeRecorder(32), - Storage: testStorage, + if tt.beforeFunc != nil { + tt.beforeFunc(g, obj, artifact, tmpDir) } dlog := log.NewDelegatingLogSink(log.NullLogSink{}) nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, tt.artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) - //g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + // On error, artifact is empty. Check artifacts only on successful + // reconcile. + if !tt.wantErr { + g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy())) + } g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + + if tt.afterFunc != nil { + tt.afterFunc(g, obj, tmpDir) + } }) } } From af946e71006a3bddafd88069278cd3b02b390bff Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Sun, 28 Nov 2021 00:50:26 +0530 Subject: [PATCH 50/53] Add more reconcileMinioSource test cases Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 9 +-- controllers/bucket_controller_test.go | 79 +++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 4 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index c0b380429..5d5f1e816 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -263,13 +263,14 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // // The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { - var secret corev1.Secret + var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ Namespace: obj.GetNamespace(), Name: obj.Spec.SecretRef.Name, } - if err := r.Get(ctx, secretName, &secret); err != nil { + secret = &corev1.Secret{} + if err := r.Get(ctx, secretName, secret); err != nil { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '%s': %s", secretName.String(), err.Error()) r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, @@ -281,9 +282,9 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu switch obj.Spec.Provider { case sourcev1.GoogleBucketProvider: - return r.reconcileGCPSource(ctx, obj, artifact, &secret, dir) + return r.reconcileGCPSource(ctx, obj, artifact, secret, dir) default: - return r.reconcileMinioSource(ctx, obj, artifact, &secret, dir) + return r.reconcileMinioSource(ctx, obj, artifact, secret, dir) } } diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 4c2060724..587bdc116 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -382,6 +382,85 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*s3MockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + } + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*s3MockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + LastModified: time.Now(), + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", + Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { From 8dd51abef3084860665598307b7e8aacd93dc7be Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Mon, 29 Nov 2021 17:42:41 +0530 Subject: [PATCH 51/53] Add bucket controller tests for reconcileGCPSource - Introduce mock GCP Server to test the gcp bucket client against mocked gcp server results. - Add tests for reconcileGCPSource(). - Patch GCPClient.BucketExists() to return no error when the bucket doesn't exists. This keeps the GCP client compatible with the minio client. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller_test.go | 415 ++++++++++++++++++++++++++ pkg/gcp/gcp.go | 3 +- 2 files changed, 417 insertions(+), 1 deletion(-) diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 587bdc116..40fd9d0ca 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -19,6 +19,7 @@ package controllers import ( "context" "crypto/md5" + "encoding/json" "fmt" "net/http" "net/http/httptest" @@ -32,6 +33,7 @@ import ( "github.com/go-logr/logr" . "github.com/onsi/gomega" + raw "google.golang.org/api/storage/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,6 +49,9 @@ import ( sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" ) +// Environment variable to set the GCP Storage host for the GCP client. +const ENV_GCP_STORAGE_HOST = "STORAGE_EMULATOR_HOST" + func TestBucketReconciler_Reconcile(t *testing.T) { g := NewWithT(t) @@ -522,6 +527,272 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { } } +func TestBucketReconciler_reconcileGCPSource(t *testing.T) { + tests := []struct { + name string + bucketName string + bucketObjects []*gcpMockObject + secret *corev1.Secret + beforeFunc func(obj *sourcev1.Bucket) + want ctrl.Result + wantErr bool + assertArtifact sourcev1.Artifact + assertConditions []metav1.Condition + }{ + { + name: "reconciles source", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + ContentType: "text/plain", + Content: []byte("test"), + }, + }, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Data: map[string][]byte{ + "accesskey": []byte("key"), + "secretkey": []byte("secret"), + "serviceaccount": []byte("testsa"), + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + { + name: "observes non-existing secretRef", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + }, + }, + { + name: "observes invalid secretRef", + bucketName: "dummy", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + }, + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.SecretRef = &meta.LocalObjectReference{ + Name: "dummy", + } + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), + }, + }, + { + name: "observes non-existing bucket name", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.BucketName = "invalid" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + }, + }, + { + name: "transient bucket name API failure", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.Endpoint = "transient.example.com" + obj.Spec.BucketName = "unavailable" + }, + wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + }, + }, + { + name: ".sourceignore", + bucketName: "dummy", + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", + Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + }, + }, + { + name: "spec.ignore overrides .sourceignore", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + ignore := "included/file.txt" + obj.Spec.Ignore = &ignore + }, + bucketObjects: []*gcpMockObject{ + { + Key: ".sourceignore", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "ignored/file.txt", + Content: []byte("ignored/file.txt"), + ContentType: "text/plain", + }, + { + Key: "included/file.txt", + Content: []byte("included/file.txt"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", + Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + }, + }, + { + name: "up-to-date artifact", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Status.Artifact = &sourcev1.Artifact{ + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + } + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{}, + }, + { + name: "Removes FetchFailedCondition after reconciling source", + bucketName: "dummy", + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + }, + bucketObjects: []*gcpMockObject{ + { + Key: "test.txt", + Content: []byte("test"), + ContentType: "text/plain", + }, + }, + assertArtifact: sourcev1.Artifact{ + Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", + Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", + }, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + }, + }, + // TODO: Middleware for mock server to test authentication using secret. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme()) + if tt.secret != nil { + builder.WithObjects(tt.secret) + } + r := &BucketReconciler{ + EventRecorder: record.NewFakeRecorder(32), + Client: builder.Build(), + Storage: testStorage, + } + tmpDir, err := os.MkdirTemp("", "reconcile-bucket-source-") + g.Expect(err).ToNot(HaveOccurred()) + defer os.RemoveAll(tmpDir) + + // Test bucket object. + obj := &sourcev1.Bucket{ + TypeMeta: metav1.TypeMeta{ + Kind: sourcev1.BucketKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + }, + Spec: sourcev1.BucketSpec{ + BucketName: tt.bucketName, + Timeout: &metav1.Duration{Duration: timeout}, + Provider: sourcev1.GoogleBucketProvider, + }, + } + + // Set up the mock GCP bucket server. + server := newGCPServer(tt.bucketName) + server.Objects = tt.bucketObjects + server.Start() + defer server.Stop() + + g.Expect(server.HTTPAddress()).ToNot(BeEmpty()) + + obj.Spec.Endpoint = server.HTTPAddress() + obj.Spec.Insecure = true + + if tt.beforeFunc != nil { + tt.beforeFunc(obj) + } + + // Set the GCP storage host to be used by the GCP client. + g.Expect(os.Setenv(ENV_GCP_STORAGE_HOST, obj.Spec.Endpoint)).ToNot(HaveOccurred()) + defer func() { + g.Expect(os.Unsetenv(ENV_GCP_STORAGE_HOST)).ToNot(HaveOccurred()) + }() + + artifact := &sourcev1.Artifact{} + got, err := r.reconcileSource(context.TODO(), obj, artifact, tmpDir) + g.Expect(err != nil).To(Equal(tt.wantErr)) + g.Expect(got).To(Equal(tt.want)) + + g.Expect(artifact).To(MatchArtifact(tt.assertArtifact.DeepCopy())) + g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions)) + }) + } +} + func TestBucketReconciler_reconcileArtifact(t *testing.T) { // testChecksum is the checksum value of the artifacts created in this // test. @@ -855,3 +1126,147 @@ func (s *s3MockServer) handler(w http.ResponseWriter, r *http.Request) { w.Write(found.Content) } } + +type gcpMockObject struct { + Key string + ContentType string + Content []byte +} + +type gcpMockServer struct { + srv *httptest.Server + mux *http.ServeMux + + BucketName string + Etag string + Objects []*gcpMockObject + Close func() +} + +func newGCPServer(bucketName string) *gcpMockServer { + s := &gcpMockServer{BucketName: bucketName} + s.mux = http.NewServeMux() + s.mux.Handle("/", http.HandlerFunc(s.handler)) + + s.srv = httptest.NewUnstartedServer(s.mux) + + return s +} + +func (gs *gcpMockServer) Start() { + gs.srv.Start() +} + +func (gs *gcpMockServer) Stop() { + gs.srv.Close() +} + +func (gs *gcpMockServer) HTTPAddress() string { + return gs.srv.URL +} + +func (gs *gcpMockServer) GetAllObjects() *raw.Objects { + objs := &raw.Objects{} + for _, o := range gs.Objects { + objs.Items = append(objs.Items, getGCPObject(gs.BucketName, *o)) + } + return objs +} + +func (gs *gcpMockServer) GetObjectFile(key string) ([]byte, error) { + for _, o := range gs.Objects { + if o.Key == key { + return o.Content, nil + } + } + return nil, fmt.Errorf("not found") +} + +func (gs *gcpMockServer) handler(w http.ResponseWriter, r *http.Request) { + if strings.HasPrefix(r.RequestURI, "/b/") { + // Handle the bucket info related queries. + if r.RequestURI == fmt.Sprintf("/b/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName) { + // Return info about the bucket. + response := getGCPBucket(gs.BucketName, gs.Etag) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } else if strings.Contains(r.RequestURI, "/o/") { + // Return info about object in the bucket. + var obj *gcpMockObject + for _, o := range gs.Objects { + // The object key in the URI is escaped. + // e.g.: /b/dummy/o/included%2Ffile.txt?alt=json&prettyPrint=false&projection=full + if r.RequestURI == fmt.Sprintf("/b/%s/o/%s?alt=json&prettyPrint=false&projection=full", gs.BucketName, url.QueryEscape(o.Key)) { + obj = o + } + } + if obj != nil { + response := getGCPObject(gs.BucketName, *obj) + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else if strings.Contains(r.RequestURI, "/o?") { + // Return info about all the objects in the bucket. + response := gs.GetAllObjects() + jsonResponse, err := json.Marshal(response) + if err != nil { + w.WriteHeader(500) + return + } + w.WriteHeader(200) + w.Write(jsonResponse) + return + } + w.WriteHeader(404) + return + } else { + // Handle object file query. + bucketPrefix := fmt.Sprintf("/%s/", gs.BucketName) + if strings.HasPrefix(r.RequestURI, bucketPrefix) { + // The URL path is of the format /<bucket>/included/file.txt. + // Extract the object key by discarding the bucket prefix. + key := strings.TrimPrefix(r.URL.Path, bucketPrefix) + // Handle returning object file in a bucket. + response, err := gs.GetObjectFile(key) + if err != nil { + w.WriteHeader(404) + return + } + w.WriteHeader(200) + w.Write(response) + return + } + w.WriteHeader(404) + return + } +} + +func getGCPObject(bucket string, obj gcpMockObject) *raw.Object { + return &raw.Object{ + Bucket: bucket, + Name: obj.Key, + ContentType: obj.ContentType, + } +} + +func getGCPBucket(name, eTag string) *raw.Bucket { + return &raw.Bucket{ + Name: name, + Location: "loc", + Etag: eTag, + } +} diff --git a/pkg/gcp/gcp.go b/pkg/gcp/gcp.go index 9127fcde3..f98e498c4 100644 --- a/pkg/gcp/gcp.go +++ b/pkg/gcp/gcp.go @@ -73,7 +73,8 @@ func ValidateSecret(secret map[string][]byte, name string) error { func (c *GCPClient) BucketExists(ctx context.Context, bucketName string) (bool, error) { _, err := c.Client.Bucket(bucketName).Attrs(ctx) if err == gcpstorage.ErrBucketNotExist { - return false, err + // Not returning error to be compatible with minio's API. + return false, nil } if err != nil { return false, err From 4a1818d5fe848edc089c6d6d368aabe24e0bbe00 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Thu, 9 Dec 2021 03:42:54 +0530 Subject: [PATCH 52/53] bucket: Ignore patch error not found on delete Ignore "not found" error while patching when the delete timestamp is set. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index 5d5f1e816..fac77e7db 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -35,6 +35,7 @@ import ( "golang.org/x/sync/semaphore" "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" @@ -167,6 +168,10 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Finally, patch the resource if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } retErr = kerrors.NewAggregate([]error{retErr, err}) } From a1e50676400e9e94c2da2127f3a91dce5cb40ea6 Mon Sep 17 00:00:00 2001 From: Sunny <darkowlzz@protonmail.com> Date: Tue, 21 Dec 2021 04:43:16 +0530 Subject: [PATCH 53/53] bucket: Add more reconciler design improvements - Remove ArtifactUnavailable condition and use Reconciling condition to convey the same. - Make Reconciling condition affect the ready condition. - Introduce summarizeAndPatch() to calculate the final status conditions and patch them. - Introduce reconcile() to iterate through the sub-reconcilers and execute them. Signed-off-by: Sunny <darkowlzz@protonmail.com> --- controllers/bucket_controller.go | 524 +++++++++++++++----------- controllers/bucket_controller_test.go | 124 +++--- go.mod | 2 + go.sum | 4 + 4 files changed, 381 insertions(+), 273 deletions(-) diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go index fac77e7db..a106382d6 100644 --- a/controllers/bucket_controller.go +++ b/controllers/bucket_controller.go @@ -19,6 +19,7 @@ package controllers import ( "context" "crypto/sha256" + "errors" "fmt" "os" "path/filepath" @@ -36,7 +37,7 @@ import ( "google.golang.org/api/option" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" kuberecorder "k8s.io/client-go/tools/record" @@ -53,9 +54,37 @@ import ( "github.com/fluxcd/pkg/runtime/predicates" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + serror "github.com/fluxcd/source-controller/internal/error" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" "github.com/fluxcd/source-controller/pkg/sourceignore" ) +// Status conditions owned by Bucket reconciler. +var bucketOwnedConditions = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, +} + +// Conditions that Ready condition is influenced by in descending order of their +// priority. +var bucketReadyDeps = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + +// Negative conditions that Ready condition is influenced by. +var bucketReadyDepsNegative = []string{ + sourcev1.ArtifactOutdatedCondition, + sourcev1.FetchFailedCondition, + meta.StalledCondition, + meta.ReconcilingCondition, +} + // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete @@ -74,6 +103,10 @@ type BucketReconcilerOptions struct { MaxConcurrentReconciles int } +// bucketReconcilerFunc is the function type for all the bucket reconciler +// functions. +type bucketReconcilerFunc func(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) + func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) } @@ -111,69 +144,14 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res return ctrl.Result{}, err } + var recResult sreconcile.Result + // Always attempt to patch the object and status after each reconciliation + // NOTE: This deferred block only modifies the named return error. The + // result from the reconciliation remains the same. Any requeue attributes + // set in the result will continue to be effective. defer func() { - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { - obj.Status.SetLastHandledReconcileRequest(v) - } - - // Summarize the Ready condition based on abnormalities that may have been observed - conditions.SetSummary(obj, - meta.ReadyCondition, - conditions.WithConditions( - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - conditions.WithNegativePolarityConditions( - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - ), - ) - - // Patch the object, ignoring conflicts on the conditions owned by this controller - patchOpts := []patch.Option{ - patch.WithOwnedConditions{ - Conditions: []string{ - sourcev1.ArtifactOutdatedCondition, - sourcev1.FetchFailedCondition, - sourcev1.ArtifactUnavailableCondition, - meta.ReadyCondition, - meta.ReconcilingCondition, - meta.StalledCondition, - }, - }, - } - - // Determine if the resource is still being reconciled, or if it has stalled, and record this observation - if retErr == nil && (result.IsZero() || !result.Requeue) { - // We are no longer reconciling - conditions.Delete(obj, meta.ReconcilingCondition) - - // We have now observed this generation - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) - - readyCondition := conditions.Get(obj, meta.ReadyCondition) - switch readyCondition.Status { - case metav1.ConditionFalse: - // As we are no longer reconciling and the end-state is not ready, the reconciliation has stalled - conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) - case metav1.ConditionTrue: - // As we are no longer reconciling and the end-state is ready, the reconciliation is no longer stalled - conditions.Delete(obj, meta.StalledCondition) - } - } - - // Finally, patch the resource - if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { - // Ignore patch error "not found" when the object is being deleted. - if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) - } - retErr = kerrors.NewAggregate([]error{retErr, err}) - } + retErr = r.summarizeAndPatch(ctx, obj, patchHelper, recResult, retErr) // Always record readiness and duration metrics r.Metrics.RecordReadiness(ctx, obj) @@ -183,60 +161,109 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // Add finalizer first if not exist to avoid the race condition between init and delete if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) + recResult = sreconcile.ResultRequeue return ctrl.Result{Requeue: true}, nil } // Examine if the object is under deletion if !obj.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, obj) + res, err := r.reconcileDelete(ctx, obj) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, res, err) } // Reconcile actual object - return r.reconcile(ctx, obj) + reconcilers := []bucketReconcilerFunc{ + r.reconcileStorage, + r.reconcileSource, + r.reconcileArtifact, + } + recResult, err = r.reconcile(ctx, obj, reconcilers) + return sreconcile.BuildRuntimeResult(ctx, r.EventRecorder, obj, recResult, err) } -// reconcile steps through the actual reconciliation tasks for the object, it returns early on the first step that -// produces an error. -func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { - // Mark the resource as under reconciliation - conditions.MarkReconciling(obj, meta.ProgressingReason, "") +// summarizeAndPatch analyzes the object conditions to create a summary of the +// status conditions and patches the object with the calculated summary. +func (r *BucketReconciler) summarizeAndPatch(ctx context.Context, obj *sourcev1.Bucket, patchHelper *patch.Helper, res sreconcile.Result, recErr error) error { + // Record the value of the reconciliation request if any. + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } + + // Compute the reconcile results, obtain patch options and reconcile error. + var patchOpts []patch.Option + patchOpts, recErr = sreconcile.ComputeReconcileResult(obj, res, recErr, bucketOwnedConditions) + + // Summarize the Ready condition based on abnormalities that may have been observed. + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + bucketReadyDeps..., + ), + conditions.WithNegativePolarityConditions( + bucketReadyDepsNegative..., + ), + ) + + // Finally, patch the resource. + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + // Ignore patch error "not found" when the object is being deleted. + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) }) + } + recErr = kerrors.NewAggregate([]error{recErr, err}) + } + + return recErr +} - // Reconcile the storage data - if result, err := r.reconcileStorage(ctx, obj); err != nil || result.IsZero() { - return result, err +// reconcile steps iterates through the actual reconciliation tasks for objec, +// it returns early on the first step that returns ResultRequeue or produces an +// error. +func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcilerFunc) (sreconcile.Result, error) { + if obj.Generation != obj.Status.ObservedGeneration { + conditions.MarkReconciling(obj, "NewGeneration", "Reconciling new generation %d", obj.Generation) } + var artifact sourcev1.Artifact + // Create temp working dir tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) if err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, "Failed to create temporary directory: %s", err) - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create temporary directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } defer os.RemoveAll(tmpDir) - // Reconcile the source from upstream - var artifact sourcev1.Artifact - if result, err := r.reconcileSource(ctx, obj, &artifact, tmpDir); err != nil || result.IsZero() { - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, err - } - - // Reconcile the artifact to storage - if result, err := r.reconcileArtifact(ctx, obj, artifact, tmpDir); err != nil || result.IsZero() { - return result, err + // Run the sub-reconcilers and build the result of reconciliation. + var res sreconcile.Result + var resErr error + for _, rec := range reconcilers { + recResult, err := rec(ctx, obj, &artifact, tmpDir) + // Exit immediately on ResultRequeue. + if recResult == sreconcile.ResultRequeue { + return sreconcile.ResultRequeue, nil + } + // If an error is received, prioritize the returned results because an + // error also means immediate requeue. + if err != nil { + resErr = err + res = recResult + break + } + // Prioritize requeue request in the result. + res = sreconcile.LowestRequeuingResult(res, recResult) } - - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return res, resErr } // reconcileStorage ensures the current state of the storage matches the desired and previously observed state. // // All artifacts for the resource except for the current one are garbage collected from the storage. // If the artifact in the Status object of the resource disappeared from storage, it is removed from the object. -// If the object does not have an artifact in its Status object, a v1beta1.ArtifactUnavailableCondition is set. // If the hostname of the URLs on the object do not match the current storage server hostname, they are updated. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -248,26 +275,23 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.B // Record that we do not have an artifact if obj.GetArtifact() == nil { - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage") - return ctrl.Result{Requeue: true}, nil + conditions.MarkReconciling(obj, "NoArtifact", "No artifact for resource in storage") + return sreconcile.ResultSuccess, nil } - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) // Always update URLs to ensure hostname is up-to-date // TODO(hidde): we may want to send out an event only if we notice the URL has changed r.Storage.SetArtifactURL(obj.GetArtifact()) obj.Status.URL = r.Storage.SetHostname(obj.Status.URL) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileSource reconciles the upstream bucket with the client for the given object's Provider, and returns the // result. // If a SecretRef is defined, it attempts to fetch the Secret before calling the provider. If the fetch of the Secret // fails, it records v1beta1.FetchFailedCondition=True and returns early. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { var secret *corev1.Secret if obj.Spec.SecretRef != nil { secretName := types.NamespacedName{ @@ -276,12 +300,13 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu } secret = &corev1.Secret{} if err := r.Get(ctx, secretName, secret); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", secretName.String(), err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.AuthenticationFailedReason, - "Failed to get secret '%s': %s", secretName.String(), err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to get secret '%s': %w", secretName.String(), err), + Reason: sourcev1.AuthenticationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, e.Err.Error()) // Return error as the world as observed may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } } @@ -302,19 +327,18 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bu // On a successful download, it removes v1beta1.FetchFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (ctrl.Result, error) { + secret *corev1.Secret, dir string) (sreconcile.Result, error) { // Build the client with the configuration from the object and secret s3Client, err := r.buildMinioClient(obj, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to construct S3 client: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to construct S3 client: %s", err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to construct S3 client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } // Confirm bucket exists @@ -322,36 +346,42 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source defer cancel() exists, err := s3Client.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if !exists { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Look for file with ignore rules first path := filepath.Join(dir, sourceignore.IgnoreFile) if err := s3Client.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil { if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence if obj.Spec.Ignore != nil { @@ -368,11 +398,12 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()), }) { if err = object.Err; err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Ignore directories and the .sourceignore file @@ -391,13 +422,17 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source revision, err := index.Revision() if err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } } if !obj.GetArtifact().HasRevision(revision) { // Mark observations about the revision on the object - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New upstream revision '%s'", revision) + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) // Download the files in parallel, but with a limited number of workers group, groupCtx := errgroup.WithContext(ctx) @@ -421,20 +456,21 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, - "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileGCPSource ensures the upstream Google Cloud Storage bucket can be reached and downloaded from using the @@ -446,18 +482,17 @@ func (r *BucketReconciler) reconcileMinioSource(ctx context.Context, obj *source // On a successful download, it removes v1beta1.DownloadFailedCondition, and compares the current revision of HEAD to // the artifact on the object, and records v1beta1.ArtifactOutdatedCondition if they differ. // If the download was successful, the given artifact pointer is set to a new artifact with the available metadata. -// -// The caller should assume a failure if an error is returned, or the Result is zero. func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, - secret *corev1.Secret, dir string) (ctrl.Result, error) { + secret *corev1.Secret, dir string) (sreconcile.Result, error) { gcpClient, err := r.buildGCPClient(ctx, secret) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to construct GCP client: %s", err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to construct GCP client: %s", err.Error()) + e := &serror.Event{ + Err: fmt.Errorf("failed to construct GCP client: %w", err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) // Return error as the contents of the secret may change - return ctrl.Result{}, err + return sreconcile.ResultEmpty, e } defer gcpClient.Close(ctrl.LoggerFrom(ctx)) @@ -466,36 +501,42 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 defer cancel() exists, err := gcpClient.BucketExists(ctxTimeout, obj.Spec.BucketName) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to verify existence of bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to verify existence of bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if !exists { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Bucket '%s' does not exist", obj.Spec.BucketName) - return ctrl.Result{}, fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName) + e := &serror.Event{ + Err: fmt.Errorf("bucket '%s' does not exist", obj.Spec.BucketName), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // Look for file with ignore rules first path := filepath.Join(dir, sourceignore.IgnoreFile) if err := gcpClient.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if err != gcpstorage.ErrObjectNotExist { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to get '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to get '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } } ps, err := sourceignore.ReadIgnoreFile(path, nil) if err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to read '%s' file: %s", sourceignore.IgnoreFile, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to read '%s' file: %w", sourceignore.IgnoreFile, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } // In-spec patterns take precedence if obj.Spec.Ignore != nil { @@ -514,11 +555,12 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 if err == gcp.IteratorDone { break } - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Failed to list objects from bucket '%s': %s", obj.Spec.BucketName, err.Error()) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("failed to list objects from bucket '%s': %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile { @@ -535,14 +577,17 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 // Calculate revision checksum from the collected index values revision, err := index.Revision() if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to calculate revision: %w", err), + Reason: meta.FailedReason, + } } if !obj.GetArtifact().HasRevision(revision) { // Mark observations about the revision on the object - conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", - "New upstream revision '%s'", revision) + message := fmt.Sprintf("new upstream revision '%s'", revision) + conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message) + conditions.MarkReconciling(obj, "NewRevision", message) // Download the files in parallel, but with a limited number of workers group, groupCtx := errgroup.WithContext(ctx) @@ -566,113 +611,121 @@ func (r *BucketReconciler) reconcileGCPSource(ctx context.Context, obj *sourcev1 return nil }) if err = group.Wait(); err != nil { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.BucketOperationFailedReason, - "Download from bucket '%s' failed: %s", obj.Spec.BucketName, err) - return ctrl.Result{}, err + e := &serror.Event{ + Err: fmt.Errorf("download from bucket '%s' failed: %w", obj.Spec.BucketName, err), + Reason: sourcev1.BucketOperationFailedReason, + } + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, e.Err.Error()) + return sreconcile.ResultEmpty, e } - r.Eventf(obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, - "Downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) + r.eventLogf(ctx, obj, corev1.EventTypeNormal, sourcev1.BucketOperationSucceedReason, + "downloaded %d files from bucket '%s' revision '%s'", len(index), obj.Spec.BucketName, revision) } conditions.Delete(obj, sourcev1.FetchFailedCondition) // Create potential new artifact *artifact = r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileArtifact archives a new artifact to the storage, if the current observation on the object does not match the // given data. // // The inspection of the given data to the object is differed, ensuring any stale observations as -// v1beta1.ArtifactUnavailableCondition and v1beta1.ArtifactOutdatedCondition are always deleted. // If the given artifact does not differ from the object's current, it returns early. // On a successful archive, the artifact in the status of the given object is set, and the symlink in the storage is // updated to its path. -// -// The caller should assume a failure if an error is returned, or the Result is zero. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, artifact *sourcev1.Artifact, dir string) (sreconcile.Result, error) { // Always restore the Ready condition in case it got removed due to a transient error defer func() { - if obj.GetArtifact() != nil { - conditions.Delete(obj, sourcev1.ArtifactUnavailableCondition) - } if obj.GetArtifact().HasRevision(artifact.Revision) { conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, - "Stored artifact for revision '%s'", artifact.Revision) + "stored artifact for revision '%s'", artifact.Revision) } }() // The artifact is up-to-date if obj.GetArtifact().HasRevision(artifact.Revision) { - ctrl.LoggerFrom(ctx).Info(fmt.Sprintf("Already up to date, current revision '%s'", artifact.Revision)) - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + r.eventLogf(ctx, obj, corev1.EventTypeNormal, meta.SucceededReason, "already up to date, current revision '%s'", artifact.Revision) + return sreconcile.ResultSuccess, nil } + // Mark reconciling because the artifact and remote source are different. + // and they have to be reconciled. + conditions.MarkReconciling(obj, "NewRevision", "new upstream revision '%s'", artifact.Revision) + // Ensure target path exists and is a directory if f, err := os.Stat(dir); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to stat source path") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to stat source path: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } else if !f.IsDir() { - err := fmt.Errorf("source path '%s' is not a directory", dir) - ctrl.LoggerFrom(ctx).Error(err, "invalid target path") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("source path '%s' is not a directory", dir), + Reason: sourcev1.StorageOperationFailedReason, + } } // Ensure artifact directory exists and acquire lock - if err := r.Storage.MkdirAll(artifact); err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to create artifact directory") - return ctrl.Result{}, err + if err := r.Storage.MkdirAll(*artifact); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to create artifact directory: %w", err), + Reason: sourcev1.StorageOperationFailedReason, + } } - unlock, err := r.Storage.Lock(artifact) + unlock, err := r.Storage.Lock(*artifact) if err != nil { - ctrl.LoggerFrom(ctx).Error(err, "failed to acquire lock for artifact") - return ctrl.Result{}, err + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("failed to acquire lock for artifact: %w", err), + Reason: meta.FailedReason, + } } defer unlock() // Archive directory to storage - if err := r.Storage.Archive(&artifact, dir, nil); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Unable to archive artifact to storage: %s", err) - return ctrl.Result{}, err + if err := r.Storage.Archive(artifact, dir, nil); err != nil { + return sreconcile.ResultEmpty, &serror.Event{ + Err: fmt.Errorf("unable to archive artifact to storage: %s", err), + Reason: sourcev1.StorageOperationFailedReason, + } } r.AnnotatedEventf(obj, map[string]string{ "revision": artifact.Revision, "checksum": artifact.Checksum, - }, corev1.EventTypeNormal, "NewArtifact", "Stored artifact for revision '%s'", artifact.Revision) + }, corev1.EventTypeNormal, "NewArtifact", "stored artifact for revision '%s'", artifact.Revision) // Record it on the object obj.Status.Artifact = artifact.DeepCopy() // Update symlink on a "best effort" basis - url, err := r.Storage.Symlink(artifact, "latest.tar.gz") + url, err := r.Storage.Symlink(*artifact, "latest.tar.gz") if err != nil { - r.Eventf(obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, - "Failed to update status URL symlink: %s", err) + r.eventLogf(ctx, obj, corev1.EventTypeWarning, sourcev1.StorageOperationFailedReason, + "failed to update status URL symlink: %s", err) } if url != "" { obj.Status.URL = url } - return ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil + return sreconcile.ResultSuccess, nil } // reconcileDelete handles the deletion of an object. It first garbage collects all artifacts for the object from the // artifact storage, if successful, the finalizer is removed from the object. -func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +// func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (ctrl.Result, error) { +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection - return ctrl.Result{}, err + return sreconcile.ResultEmpty, err } // Remove our finalizer from the list controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer) // Stop reconciliation as the object is being deleted - return ctrl.Result{}, nil + return sreconcile.ResultEmpty, nil } // garbageCollect performs a garbage collection for the given v1beta1.Bucket. It removes all but the current @@ -681,23 +734,26 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { if !obj.DeletionTimestamp.IsZero() { if err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { - r.Eventf(obj, corev1.EventTypeWarning, "GarbageCollectionFailed", - "Garbage collection for deleted resource failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err), + Reason: "GarbageCollectionFailed", + } } obj.Status.Artifact = nil // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", - "Garbage collected artifacts for deleted resource") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", + "garbage collected artifacts for deleted resource") return nil } if obj.GetArtifact() != nil { if err := r.Storage.RemoveAllButCurrent(*obj.GetArtifact()); err != nil { - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionFailed", "Garbage collection of old artifacts failed: %s", err) - return err + return &serror.Event{ + Err: fmt.Errorf("garbage collection of old artifacts failed: %s", err), + Reason: "GarbageCollectionFailed", + } } // TODO(hidde): we should only push this event if we actually garbage collected something - r.Eventf(obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "Garbage collected old artifacts") + r.eventLogf(ctx, obj, corev1.EventTypeNormal, "GarbageCollectionSucceeded", "garbage collected old artifacts") } return nil } @@ -771,3 +827,17 @@ func (i etagIndex) Revision() (string, error) { } return fmt.Sprintf("%x", sum.Sum(nil)), nil } + +// eventLog records event and logs at the same time. This log is different from +// the debug log in the event recorder in the sense that this is a simple log, +// the event recorder debug log contains complete details about the event. +func (r *BucketReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) { + msg := fmt.Sprintf(messageFmt, args...) + // Log and emit event. + if eventType == corev1.EventTypeWarning { + ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg) + } else { + ctrl.LoggerFrom(ctx).Info(msg) + } + r.Eventf(obj, eventType, reason, msg) +} diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go index 40fd9d0ca..431322ea1 100644 --- a/controllers/bucket_controller_test.go +++ b/controllers/bucket_controller_test.go @@ -31,6 +31,7 @@ import ( "testing" "time" + "github.com/darkowlzz/controller-check/status" "github.com/go-logr/logr" . "github.com/onsi/gomega" raw "google.golang.org/api/storage/v1" @@ -38,7 +39,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/log" @@ -47,6 +47,7 @@ import ( "github.com/fluxcd/pkg/runtime/conditions" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" + sreconcile "github.com/fluxcd/source-controller/internal/reconcile" ) // Environment variable to set the GCP Storage host for the GCP client. @@ -126,6 +127,11 @@ func TestBucketReconciler_Reconcile(t *testing.T) { obj.Generation == obj.Status.ObservedGeneration }, timeout).Should(BeTrue()) + // Check if the object status is valid. + condns := &status.Conditions{NegativePolarity: bucketReadyDepsNegative} + checker := status.NewChecker(testEnv.Client, testEnv.GetScheme(), condns) + checker.CheckErr(ctx, obj) + g.Expect(testEnv.Delete(ctx, obj)).To(Succeed()) // Wait for Bucket to be deleted @@ -141,7 +147,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact *sourcev1.Artifact assertConditions []metav1.Condition @@ -167,6 +173,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, + want: sreconcile.ResultSuccess, assertArtifact: &sourcev1.Artifact{ Path: "/reconcile-storage/c.txt", Revision: "c", @@ -189,12 +196,12 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { testStorage.SetArtifactURL(obj.Status.Artifact) return nil }, - want: ctrl.Result{Requeue: true}, + want: sreconcile.ResultSuccess, assertPaths: []string{ "!/reconcile-storage/invalid.txt", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactUnavailableCondition, "NoArtifact", "No artifact for resource in storage"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "No artifact for resource in storage"), }, }, { @@ -214,6 +221,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { } return nil }, + want: sreconcile.ResultSuccess, assertPaths: []string{ "/reconcile-storage/hostname.txt", }, @@ -243,7 +251,9 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed()) } - got, err := r.reconcileStorage(context.TODO(), obj) + var artifact sourcev1.Artifact + + got, err := r.reconcileStorage(context.TODO(), obj, &artifact, "") g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) @@ -273,7 +283,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { middleware http.Handler secret *corev1.Secret beforeFunc func(obj *sourcev1.Bucket) - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact sourcev1.Artifact assertConditions []metav1.Condition @@ -289,12 +299,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), }, }, // TODO(hidde): middleware for mock server @@ -312,7 +324,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -330,7 +342,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct S3 client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct S3 client: invalid 'dummy' secret data: required fields"), }, }, { @@ -341,7 +353,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), }, }, { @@ -352,7 +364,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { }, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), }, }, { @@ -379,12 +391,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100.tar.gz", Revision: "94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '94992ae8fb8300723e970e304ea3414266cb414e364ba3f570bb09069f883100'"), }, }, { @@ -414,12 +428,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), }, }, { @@ -438,6 +454,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", @@ -448,7 +465,7 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*s3MockObject{ { @@ -458,12 +475,14 @@ func TestBucketReconciler_reconcileMinioSource(t *testing.T) { LastModified: time.Now(), }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8.tar.gz", Revision: "f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'f0467900d3cede8323f3e61a1467f7cd370d1c0d942ff990a1a7be1eb1a231e8'"), }, }, } @@ -534,7 +553,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { bucketObjects []*gcpMockObject secret *corev1.Secret beforeFunc func(obj *sourcev1.Bucket) - want ctrl.Result + want sreconcile.Result wantErr bool assertArtifact sourcev1.Artifact assertConditions []metav1.Condition @@ -564,12 +583,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), }, }, { @@ -580,9 +601,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "Failed to get secret '/dummy': secrets \"dummy\" not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"), }, }, { @@ -598,9 +620,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { Name: "dummy", } }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to construct GCP client: invalid 'dummy' secret data: required fields"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to construct GCP client: invalid 'dummy' secret data: required fields"), }, }, { @@ -609,9 +632,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Bucket 'invalid' does not exist"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' does not exist"), }, }, { @@ -620,9 +644,10 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" }, + want: sreconcile.ResultEmpty, wantErr: true, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to verify existence of bucket 'unavailable'"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to verify existence of bucket 'unavailable'"), }, }, { @@ -645,12 +670,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4.tar.gz", Revision: "7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '7556d9ebaa9bcf1b24f363a6d5543af84403acb340fe1eaaf31dcdb0a6e6b4d4'"), }, }, { @@ -677,12 +704,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz", Revision: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"), }, }, { @@ -700,6 +729,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", @@ -710,7 +740,7 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", beforeFunc: func(obj *sourcev1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "Failed to read test file") + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*gcpMockObject{ { @@ -719,12 +749,14 @@ func TestBucketReconciler_reconcileGCPSource(t *testing.T) { ContentType: "text/plain", }, }, + want: sreconcile.ResultSuccess, assertArtifact: sourcev1.Artifact{ Path: "bucket/test-bucket/23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8.tar.gz", Revision: "23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8", }, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "New upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '23d97ef9557996c9d911df4359d6086eda7bec5af76e43651581d80f5bcad4b8'"), }, }, // TODO: Middleware for mock server to test authentication using secret. @@ -802,7 +834,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { name string beforeFunc func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) - want ctrl.Result + want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ @@ -811,9 +843,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), }, }, { @@ -825,20 +858,9 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(obj.Status.URL).To(BeEmpty()) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), - }, - }, - { - name: "Removes ArtifactUnavailableCondition after creating artifact", - beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { - obj.Spec.Interval = metav1.Duration{Duration: interval} - conditions.MarkTrue(obj, sourcev1.ArtifactUnavailableCondition, "Foo", "") - }, - want: ctrl.Result{RequeueAfter: interval}, - assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), }, }, { @@ -847,9 +869,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), }, }, { @@ -864,9 +887,10 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { t.Expect(err).NotTo(HaveOccurred()) t.Expect(localPath).To(Equal(targetFile)) }, - want: ctrl.Result{RequeueAfter: interval}, + want: sreconcile.ResultSuccess, assertConditions: []metav1.Condition{ - *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision 'existing'"), + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), }, }, { @@ -874,7 +898,11 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { beforeFunc: func(t *WithT, obj *sourcev1.Bucket, artifact sourcev1.Artifact, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, + want: sreconcile.ResultEmpty, wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, }, { name: "Dir path is not a directory", @@ -889,7 +917,11 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, + want: sreconcile.ResultEmpty, wantErr: true, + assertConditions: []metav1.Condition{ + *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'existing'"), + }, }, } @@ -929,7 +961,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { dlog := log.NewDelegatingLogSink(log.NullLogSink{}) nullLogger := logr.New(dlog) - got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, artifact, tmpDir) + got, err := r.reconcileArtifact(logr.NewContext(ctx, nullLogger), obj, &artifact, tmpDir) g.Expect(err != nil).To(Equal(tt.wantErr)) g.Expect(got).To(Equal(tt.want)) diff --git a/go.mod b/go.mod index 84e6cdfce..d89937d1a 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/Masterminds/semver/v3 v3.1.1 github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 github.com/cyphar/filepath-securejoin v0.2.2 + github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c github.com/elazarl/goproxy v0.0.0-20211114080932-d06c3be7c11b github.com/fluxcd/pkg/apis/meta v0.11.0-rc.3 github.com/fluxcd/pkg/gittestserver v0.5.0 @@ -114,6 +115,7 @@ require ( github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect github.com/klauspost/compress v1.13.5 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect github.com/lib/pq v1.10.0 // indirect diff --git a/go.sum b/go.sum index cdfd60673..5401fbdb2 100644 --- a/go.sum +++ b/go.sum @@ -231,6 +231,8 @@ github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1S github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c h1:pyp/Dvd1gYP/D3z1zs46h0YhYzFp0hjxw0XVIO9+vh4= +github.com/darkowlzz/controller-check v0.0.0-20220119215126-648356cef22c/go.mod h1:haYO9UW76kUUKpIBbv3ydaU5wZ/7r0yqp61PGzVRSYU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -617,6 +619,8 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=