From 5be0c537296ac6262b1fb54e4f3eb6a45c624591 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Fri, 23 Aug 2024 13:13:41 +0300 Subject: [PATCH 1/6] Promote Bucket API to v1 Signed-off-by: Stefan Prodan --- PROJECT | 3 + api/v1/bucket_types.go | 278 +++++ api/v1/sts_types.go | 26 + api/v1/zz_generated.deepcopy.go | 168 +++ api/v1beta1/bucket_types.go | 2 +- api/v1beta2/bucket_types.go | 31 +- .../source.toolkit.fluxcd.io_buckets.yaml | 407 +++++++- docs/api/v1/source.md | 960 +++++++++++++++--- internal/controller/bucket_controller.go | 2 +- .../bucket_controller_fetch_test.go | 2 +- internal/controller/bucket_controller_test.go | 251 +++-- internal/controller/helmchart_controller.go | 19 +- .../controller/helmchart_controller_test.go | 6 +- main.go | 2 +- pkg/azure/blob.go | 2 +- pkg/azure/blob_integration_test.go | 2 +- pkg/azure/blob_test.go | 2 +- pkg/minio/minio.go | 2 +- pkg/minio/minio_test.go | 2 +- 19 files changed, 1878 insertions(+), 289 deletions(-) create mode 100644 api/v1/bucket_types.go create mode 100644 api/v1/sts_types.go diff --git a/PROJECT b/PROJECT index 8af858a45..0c243993c 100644 --- a/PROJECT +++ b/PROJECT @@ -37,4 +37,7 @@ resources: - group: source kind: OCIRepository version: v1beta2 +- group: source + kind: Bucket + version: v1 version: "2" diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go new file mode 100644 index 000000000..939519eed --- /dev/null +++ b/api/v1/bucket_types.go @@ -0,0 +1,278 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/fluxcd/pkg/apis/acl" + "github.com/fluxcd/pkg/apis/meta" +) + +const ( + // BucketKind is the string representation of a Bucket. + BucketKind = "Bucket" +) + +const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = "generic" + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service. + BucketProviderAmazon string = "aws" + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = "gcp" + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = "azure" +) + +// BucketSpec specifies the required configuration to produce an Artifact for +// an object storage bucket. +// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers" +// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider" +// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider" +// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider" +type BucketSpec struct { + // Provider of the object storage bucket. + // Defaults to 'generic', which expects an S3 (API) compatible object + // storage. + // +kubebuilder:validation:Enum=generic;aws;gcp;azure + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` + + // BucketName is the name of the object storage bucket. + // +required + BucketName string `json:"bucketName"` + + // Endpoint is the object storage address the BucketName is located at. + // +required + Endpoint string `json:"endpoint"` + + // STS specifies the required configuration to use a Security Token + // Service for fetching temporary credentials to authenticate in a + // Bucket provider. + // + // This field is only supported for the `aws` and `generic` providers. + // +optional + STS *BucketSTSSpec `json:"sts,omitempty"` + + // Insecure allows connecting to a non-TLS HTTP Endpoint. + // +optional + Insecure bool `json:"insecure,omitempty"` + + // Region of the Endpoint where the BucketName is located in. + // +optional + Region string `json:"region,omitempty"` + + // Prefix to use for server-side filtering of files in the Bucket. + // +optional + Prefix string `json:"prefix,omitempty"` + + // SecretRef specifies the Secret containing authentication credentials + // for the Bucket. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // bucket. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `generic` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` + + // ProxySecretRef specifies the Secret containing the proxy configuration + // to use while communicating with the Bucket server. + // +optional + ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"` + + // Interval at which the Bucket Endpoint is checked for updates. + // This interval is approximate and may be subject to jitter to ensure + // efficient use of resources. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +required + Interval metav1.Duration `json:"interval"` + + // Timeout for fetch operations, defaults to 60s. + // +kubebuilder:default="60s" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$" + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Ignore overrides the set of excluded patterns in the .sourceignore format + // (which is the same as .gitignore). If not provided, a default will be used, + // consult the documentation for your version to find out what those are. + // +optional + Ignore *string `json:"ignore,omitempty"` + + // Suspend tells the controller to suspend the reconciliation of this + // Bucket. + // +optional + Suspend bool `json:"suspend,omitempty"` + + // AccessFrom specifies an Access Control List for allowing cross-namespace + // references to this object. + // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + // +optional + AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` +} + +// BucketSTSSpec specifies the required configuration to use a Security Token +// Service for fetching temporary credentials to authenticate in a Bucket +// provider. +type BucketSTSSpec struct { + // Provider of the Security Token Service. + // +kubebuilder:validation:Enum=aws;ldap + // +required + Provider string `json:"provider"` + + // Endpoint is the HTTP/S endpoint of the Security Token Service from + // where temporary credentials will be fetched. + // +required + // +kubebuilder:validation:Pattern="^(http|https)://.*$" + Endpoint string `json:"endpoint"` + + // SecretRef specifies the Secret containing authentication credentials + // for the STS endpoint. This Secret must contain the fields `username` + // and `password` and is supported only for the `ldap` provider. + // +optional + SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` + + // CertSecretRef can be given the name of a Secret containing + // either or both of + // + // - a PEM-encoded client certificate (`tls.crt`) and private + // key (`tls.key`); + // - a PEM-encoded CA certificate (`ca.crt`) + // + // and whichever are supplied, will be used for connecting to the + // STS endpoint. The client cert and key are useful if you are + // authenticating with a certificate; the CA cert is useful if + // you are using a self-signed server certificate. The Secret must + // be of type `Opaque` or `kubernetes.io/tls`. + // + // This field is only supported for the `ldap` provider. + // +optional + CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"` +} + +// BucketStatus records the observed state of a Bucket. +type BucketStatus struct { + // ObservedGeneration is the last observed generation of the Bucket object. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Conditions holds the conditions for the Bucket. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // URL is the dynamic fetch link for the latest Artifact. + // It is provided on a "best effort" basis, and using the precise + // BucketStatus.Artifact data is recommended. + // +optional + URL string `json:"url,omitempty"` + + // Artifact represents the last successful Bucket reconciliation. + // +optional + Artifact *Artifact `json:"artifact,omitempty"` + + // ObservedIgnore is the observed exclusion patterns used for constructing + // the source artifact. + // +optional + ObservedIgnore *string `json:"observedIgnore,omitempty"` + + meta.ReconcileRequestStatus `json:",inline"` +} + +const ( + // BucketOperationSucceededReason signals that the Bucket listing and fetch + // operations succeeded. + BucketOperationSucceededReason string = "BucketOperationSucceeded" + + // BucketOperationFailedReason signals that the Bucket listing or fetch + // operations failed. + BucketOperationFailedReason string = "BucketOperationFailed" +) + +// GetConditions returns the status conditions of the object. +func (in *Bucket) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *Bucket) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + +// GetRequeueAfter returns the duration after which the source must be reconciled again. +func (in *Bucket) GetRequeueAfter() time.Duration { + return in.Spec.Interval.Duration +} + +// GetArtifact returns the latest artifact from the source if present in the status sub-resource. +func (in *Bucket) GetArtifact() *Artifact { + return in.Status.Artifact +} + +// +genclient +// +kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" + +// Bucket is the Schema for the buckets API. +type Bucket struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BucketSpec `json:"spec,omitempty"` + // +kubebuilder:default={"observedGeneration":-1} + Status BucketStatus `json:"status,omitempty"` +} + +// BucketList contains a list of Bucket objects. +// +kubebuilder:object:root=true +type BucketList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Bucket `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1/sts_types.go b/api/v1/sts_types.go new file mode 100644 index 000000000..4b1d05881 --- /dev/null +++ b/api/v1/sts_types.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The Flux authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // STSProviderAmazon represents the AWS provider for Security Token Service. + // Provides support for fetching temporary credentials from an AWS STS endpoint. + STSProviderAmazon string = "aws" + // STSProviderLDAP represents the LDAP provider for Security Token Service. + // Provides support for fetching temporary credentials from an LDAP endpoint. + STSProviderLDAP string = "ldap" +) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 0616741d4..6326ea211 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -55,6 +55,174 @@ func (in *Artifact) DeepCopy() *Artifact { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bucket) DeepCopyInto(out *Bucket) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket. +func (in *Bucket) DeepCopy() *Bucket { + if in == nil { + return nil + } + out := new(Bucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Bucket) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketList) DeepCopyInto(out *BucketList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Bucket, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList. +func (in *BucketList) DeepCopy() *BucketList { + if in == nil { + return nil + } + out := new(BucketList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BucketList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec. +func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec { + if in == nil { + return nil + } + out := new(BucketSTSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { + *out = *in + if in.STS != nil { + in, out := &in.STS, &out.STS + *out = new(BucketSTSSpec) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.CertSecretRef != nil { + in, out := &in.CertSecretRef, &out.CertSecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + if in.ProxySecretRef != nil { + in, out := &in.ProxySecretRef, &out.ProxySecretRef + *out = new(meta.LocalObjectReference) + **out = **in + } + out.Interval = in.Interval + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Ignore != nil { + in, out := &in.Ignore, &out.Ignore + *out = new(string) + **out = **in + } + if in.AccessFrom != nil { + in, out := &in.AccessFrom, &out.AccessFrom + *out = new(acl.AccessFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. +func (in *BucketSpec) DeepCopy() *BucketSpec { + if in == nil { + return nil + } + out := new(BucketSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BucketStatus) DeepCopyInto(out *BucketStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Artifact != nil { + in, out := &in.Artifact, &out.Artifact + *out = new(Artifact) + (*in).DeepCopyInto(*out) + } + if in.ObservedIgnore != nil { + in, out := &in.ObservedIgnore, &out.ObservedIgnore + *out = new(string) + **out = **in + } + out.ReconcileRequestStatus = in.ReconcileRequestStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus. +func (in *BucketStatus) DeepCopy() *BucketStatus { + if in == nil { + return nil + } + out := new(BucketStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GitRepository) DeepCopyInto(out *GitRepository) { *out = *in diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go index d69ff648b..639a0bbe0 100644 --- a/api/v1beta1/bucket_types.go +++ b/api/v1beta1/bucket_types.go @@ -195,7 +195,7 @@ func (in *Bucket) GetInterval() metav1.Duration { // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:deprecatedversion:warning="v1beta1 Bucket is deprecated, upgrade to v1beta2" +// +kubebuilder:deprecatedversion:warning="v1beta1 Bucket is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index d8ce704e7..612c6db06 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -33,35 +33,22 @@ const ( ) const ( - // BucketProviderGeneric for any S3 API compatible storage Bucket. - BucketProviderGeneric string = "generic" - // BucketProviderAmazon for an AWS S3 object storage Bucket. - // Provides support for retrieving credentials from the AWS EC2 service. - BucketProviderAmazon string = "aws" - // BucketProviderGoogle for a Google Cloud Storage Bucket. - // Provides support for authentication using a workload identity. - BucketProviderGoogle string = "gcp" - // BucketProviderAzure for an Azure Blob Storage Bucket. - // Provides support for authentication using a Service Principal, - // Managed Identity or Shared Key. - BucketProviderAzure string = "azure" - // GenericBucketProvider for any S3 API compatible storage Bucket. - // Deprecated: use BucketProviderGeneric. - GenericBucketProvider string = "generic" + // Deprecated: use v1.BucketProviderGeneric. + GenericBucketProvider string = apiv1.BucketProviderGeneric // AmazonBucketProvider for an AWS S3 object storage Bucket. // Provides support for retrieving credentials from the AWS EC2 service. - // Deprecated: use BucketProviderAmazon. - AmazonBucketProvider string = "aws" + // Deprecated: use v1.BucketProviderAmazon. + AmazonBucketProvider string = apiv1.BucketProviderAmazon // GoogleBucketProvider for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. - // Deprecated: use BucketProviderGoogle. - GoogleBucketProvider string = "gcp" + // Deprecated: use v1.BucketProviderGoogle. + GoogleBucketProvider string = apiv1.BucketProviderGoogle // AzureBucketProvider for an Azure Blob Storage Bucket. // Provides support for authentication using a Service Principal, // Managed Identity or Shared Key. - // Deprecated: use BucketProviderAzure. - AzureBucketProvider string = "azure" + // Deprecated: use v1.BucketProviderAzure. + AzureBucketProvider string = apiv1.BucketProviderAzure ) // BucketSpec specifies the required configuration to produce an Artifact for @@ -266,9 +253,9 @@ func (in *Bucket) GetArtifact() *apiv1.Artifact { } // +genclient -// +kubebuilder:storageversion // +kubebuilder:object:root=true // +kubebuilder:subresource:status +// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1" // +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 7af0c9beb..2bb459d16 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -14,6 +14,407 @@ spec: singular: bucket scope: Namespaced versions: + - additionalPrinterColumns: + - jsonPath: .spec.endpoint + name: Endpoint + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + BucketSpec specifies the required configuration to produce an Artifact for + an object storage bucket. + properties: + accessFrom: + description: |- + AccessFrom specifies an Access Control List for allowing cross-namespace + references to this object. + NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 + properties: + namespaceSelectors: + description: |- + NamespaceSelectors is the list of namespace selectors to which this ACL applies. + Items in this list are evaluated using a logical OR operation. + items: + description: |- + NamespaceSelector selects the namespaces to which this ACL applies. + An empty map of MatchLabels matches all namespaces in a cluster. + properties: + matchLabels: + additionalProperties: + type: string + description: |- + MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: array + required: + - namespaceSelectors + type: object + bucketName: + description: BucketName is the name of the object storage bucket. + type: string + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + + and whichever are supplied, will be used for connecting to the + bucket. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + + This field is only supported for the `generic` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: Endpoint is the object storage address the BucketName + is located at. + type: string + ignore: + description: |- + Ignore overrides the set of excluded patterns in the .sourceignore format + (which is the same as .gitignore). If not provided, a default will be used, + consult the documentation for your version to find out what those are. + type: string + insecure: + description: Insecure allows connecting to a non-TLS HTTP Endpoint. + type: boolean + interval: + description: |- + Interval at which the Bucket Endpoint is checked for updates. + This interval is approximate and may be subject to jitter to ensure + efficient use of resources. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$ + type: string + prefix: + description: Prefix to use for server-side filtering of files in the + Bucket. + type: string + provider: + default: generic + description: |- + Provider of the object storage bucket. + Defaults to 'generic', which expects an S3 (API) compatible object + storage. + enum: + - generic + - aws + - gcp + - azure + type: string + proxySecretRef: + description: |- + ProxySecretRef specifies the Secret containing the proxy configuration + to use while communicating with the Bucket server. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + region: + description: Region of the Endpoint where the BucketName is located + in. + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the Bucket. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + sts: + description: |- + STS specifies the required configuration to use a Security Token + Service for fetching temporary credentials to authenticate in a + Bucket provider. + + + This field is only supported for the `aws` and `generic` providers. + properties: + certSecretRef: + description: |- + CertSecretRef can be given the name of a Secret containing + either or both of + + + - a PEM-encoded client certificate (`tls.crt`) and private + key (`tls.key`); + - a PEM-encoded CA certificate (`ca.crt`) + + + and whichever are supplied, will be used for connecting to the + STS endpoint. The client cert and key are useful if you are + authenticating with a certificate; the CA cert is useful if + you are using a self-signed server certificate. The Secret must + be of type `Opaque` or `kubernetes.io/tls`. + + + This field is only supported for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + endpoint: + description: |- + Endpoint is the HTTP/S endpoint of the Security Token Service from + where temporary credentials will be fetched. + pattern: ^(http|https)://.*$ + type: string + provider: + description: Provider of the Security Token Service. + enum: + - aws + - ldap + type: string + secretRef: + description: |- + SecretRef specifies the Secret containing authentication credentials + for the STS endpoint. This Secret must contain the fields `username` + and `password` and is supported only for the `ldap` provider. + properties: + name: + description: Name of the referent. + type: string + required: + - name + type: object + required: + - endpoint + - provider + type: object + suspend: + description: |- + Suspend tells the controller to suspend the reconciliation of this + Bucket. + type: boolean + timeout: + default: 60s + description: Timeout for fetch operations, defaults to 60s. + pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$ + type: string + required: + - bucketName + - endpoint + - interval + type: object + x-kubernetes-validations: + - message: STS configuration is only supported for the 'aws' and 'generic' + Bucket providers + rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts) + - message: '''aws'' is the only supported STS provider for the ''aws'' + Bucket provider' + rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider + == 'aws' + - message: '''ldap'' is the only supported STS provider for the ''generic'' + Bucket provider' + rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider + == 'ldap' + - message: spec.sts.secretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)' + - message: spec.sts.certSecretRef is not required for the 'aws' STS provider + rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)' + status: + default: + observedGeneration: -1 + description: BucketStatus records the observed state of a Bucket. + properties: + artifact: + description: Artifact represents the last successful Bucket reconciliation. + properties: + digest: + description: Digest is the digest of the file in the form of ':'. + pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$ + type: string + lastUpdateTime: + description: |- + LastUpdateTime is the timestamp corresponding to the last update of the + Artifact. + format: date-time + type: string + metadata: + additionalProperties: + type: string + description: Metadata holds upstream information such as OCI annotations. + type: object + path: + description: |- + Path is the relative file path of the Artifact. It can be used to locate + the file in the root of the Artifact storage on the local file system of + the controller managing the Source. + type: string + revision: + description: |- + Revision is a human-readable identifier traceable in the origin source + system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. + type: string + size: + description: Size is the number of bytes in the file. + format: int64 + type: integer + url: + description: |- + URL is the HTTP address of the Artifact as exposed by the controller + managing the Source. It can be used to retrieve the Artifact for + consumption, e.g. by another controller applying the Artifact contents. + type: string + required: + - lastUpdateTime + - path + - revision + - url + type: object + conditions: + description: Conditions holds the conditions for the Bucket. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource.\n---\nThis struct is intended for + direct use as an array at the field path .status.conditions. For + example,\n\n\n\ttype FooStatus struct{\n\t // Represents the + observations of a foo's current state.\n\t // Known .status.conditions.type + are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // + +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t + \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t + \ // other fields\n\t}" + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict is important. + The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastHandledReconcileAt: + description: |- + LastHandledReconcileAt holds the value of the most recent + reconcile request value, so a change of the annotation value + can be detected. + type: string + observedGeneration: + description: ObservedGeneration is the last observed generation of + the Bucket object. + format: int64 + type: integer + observedIgnore: + description: |- + ObservedIgnore is the observed exclusion patterns used for constructing + the source artifact. + type: string + url: + description: |- + URL is the dynamic fetch link for the latest Artifact. + It is provided on a "best effort" basis, and using the precise + BucketStatus.Artifact data is recommended. + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .spec.endpoint name: Endpoint @@ -28,7 +429,7 @@ spec: name: Age type: date deprecated: true - deprecationWarning: v1beta1 Bucket is deprecated, upgrade to v1beta2 + deprecationWarning: v1beta1 Bucket is deprecated, upgrade to v1 name: v1beta1 schema: openAPIV3Schema: @@ -271,6 +672,8 @@ spec: - jsonPath: .status.conditions[?(@.type=="Ready")].message name: Status type: string + deprecated: true + deprecationWarning: v1beta2 Bucket is deprecated, upgrade to v1 name: v1beta2 schema: openAPIV3Schema: @@ -656,6 +1059,6 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md index 96c42bdfa..2fcce0d63 100644 --- a/docs/api/v1/source.md +++ b/docs/api/v1/source.md @@ -9,15 +9,17 @@

Package v1 contains API Schema definitions for the source v1 API group

Resource Types: -

GitRepository +

Bucket

-

GitRepository is the Schema for the gitrepositories API.

+

Bucket is the Schema for the buckets API.

@@ -42,7 +44,7 @@ string string @@ -63,8 +65,8 @@ Refer to the Kubernetes API documentation for the fields of the @@ -74,91 +76,132 @@ GitRepositorySpec
-GitRepository +Bucket
spec
- -GitRepositorySpec + +BucketSpec
+ + + + + + + + + + + + @@ -173,62 +216,79 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + + +
-url
+provider
string
-

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

-secretRef
+bucketName
- -github.com/fluxcd/pkg/apis/meta.LocalObjectReference +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec
(Optional) -

SecretRef specifies the Secret containing authentication credentials for -the GitRepository. -For HTTPS repositories the Secret must contain ‘username’ and ‘password’ -fields for basic auth or ‘bearerToken’ field for token auth. -For SSH repositories the Secret must contain ‘identity’ -and ‘known_hosts’ fields.

+

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

-interval
+insecure
- -Kubernetes meta/v1.Duration - +bool
-

Interval at which the GitRepository URL is checked for updates. -This interval is approximate and may be subject to jitter to ensure -efficient use of resources.

+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

-timeout
+region
- -Kubernetes meta/v1.Duration - +string
(Optional) -

Timeout for Git operations like cloning, defaults to 60s.

+

Region of the Endpoint where the BucketName is located in.

-ref
+prefix
- -GitRepositoryRef +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
(Optional) -

Reference specifies the Git reference to resolve and monitor for -changes, defaults to the ‘master’ branch.

+

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

-verify
+certSecretRef
- -GitRepositoryVerification + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference
(Optional) -

Verification specifies the configuration to verify the Git commit -signature(s).

+

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

(Optional)

ProxySecretRef specifies the Secret containing the proxy configuration -to use while communicating with the Git server.

+to use while communicating with the Bucket server.

-ignore
+interval
-string + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration +
(Optional) -

Ignore overrides the set of excluded patterns in the .sourceignore format -(which is the same as .gitignore). If not provided, a default will be used, -consult the documentation for your version to find out what those are.

+

Timeout for fetch operations, defaults to 60s.

-suspend
+ignore
-bool +string
(Optional) -

Suspend tells the controller to suspend the reconciliation of this -GitRepository.

+

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

-recurseSubmodules
+suspend
bool
(Optional) -

RecurseSubmodules enables the initialization of all submodules within -the GitRepository as cloned from the URL, using their default settings.

+

Suspend tells the controller to suspend the reconciliation of this +Bucket.

-include
+accessFrom
- -[]GitRepositoryInclude + +github.com/fluxcd/pkg/apis/acl.AccessFrom
(Optional) -

Include specifies a list of GitRepository resources which Artifacts -should be included in the Artifact produced for this GitRepository.

+

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

@@ -238,8 +298,8 @@ should be included in the Artifact produced for this GitRepository.

status
- -GitRepositoryStatus + +BucketStatus @@ -250,9 +310,9 @@ GitRepositoryStatus
-

HelmChart +

GitRepository

-

HelmChart is the Schema for the helmcharts API.

+

GitRepository is the Schema for the gitrepositories API.

@@ -277,7 +337,7 @@ string string @@ -298,8 +358,8 @@ Refer to the Kubernetes API documentation for the fields of the @@ -309,45 +369,52 @@ HelmChartSpec
-HelmChart +GitRepository
spec
- -HelmChartSpec + +GitRepositorySpec
- -
-chart
+url
string
-

Chart is the name or path the Helm chart is available at in the -SourceRef.

+

URL specifies the Git repository URL, it can be an HTTP/S or SSH address.

-version
+secretRef
-string + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference +
(Optional) -

Version is the chart version semver expression, ignored for charts from -GitRepository and Bucket sources. Defaults to latest when omitted.

+

SecretRef specifies the Secret containing authentication credentials for +the GitRepository. +For HTTPS repositories the Secret must contain ‘username’ and ‘password’ +fields for basic auth or ‘bearerToken’ field for token auth. +For SSH repositories the Secret must contain ‘identity’ +and ‘known_hosts’ fields.

-sourceRef
+interval
- -LocalHelmChartSourceReference + +Kubernetes meta/v1.Duration
-

SourceRef is the reference to the Source the chart is available at.

+

Interval at which the GitRepository URL is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

-interval
+timeout
Kubernetes meta/v1.Duration @@ -355,101 +422,329 @@ Kubernetes meta/v1.Duration
-

Interval at which the HelmChart SourceRef is checked for updates. -This interval is approximate and may be subject to jitter to ensure -efficient use of resources.

+(Optional) +

Timeout for Git operations like cloning, defaults to 60s.

-reconcileStrategy
+ref
-string + +GitRepositoryRef +
(Optional) -

ReconcileStrategy determines what enables the creation of a new artifact. -Valid values are (‘ChartVersion’, ‘Revision’). -See the documentation of the values for an explanation on their behavior. -Defaults to ChartVersion when omitted.

+

Reference specifies the Git reference to resolve and monitor for +changes, defaults to the ‘master’ branch.

-valuesFiles
+verify
-[]string + +GitRepositoryVerification +
(Optional) -

ValuesFiles is an alternative list of values files to use as the chart -values (values.yaml is not included by default), expected to be a -relative path in the SourceRef. -Values files are merged in the order of this list with the last file -overriding the first. Ignored when omitted.

+

Verification specifies the configuration to verify the Git commit +signature(s).

-ignoreMissingValuesFiles
+proxySecretRef
-bool + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference +
(Optional) -

IgnoreMissingValuesFiles controls whether to silently ignore missing values -files rather than failing.

+

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Git server.

-suspend
+ignore
-bool +string
(Optional) -

Suspend tells the controller to suspend the reconciliation of this -source.

+

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

-verify
+suspend
- -OCIRepositoryVerification - +bool
(Optional) -

Verify contains the secret name containing the trusted public keys -used to verify the signature and specifies which provider to use to check -whether OCI image is authentic. -This field is only supported when using HelmRepository source with spec.type ‘oci’. -Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

-
+

Suspend tells the controller to suspend the reconciliation of this +GitRepository.

-status
+recurseSubmodules
- -HelmChartStatus - +bool - - +(Optional) +

RecurseSubmodules enables the initialization of all submodules within +the GitRepository as cloned from the URL, using their default settings.

+ + + + +include
+ + +[]GitRepositoryInclude + + + + +(Optional) +

Include specifies a list of GitRepository resources which Artifacts +should be included in the Artifact produced for this GitRepository.

+ + + + + + + +status
+ + +GitRepositoryStatus + + + + + + + + +
+
+

HelmChart +

+

HelmChart is the Schema for the helmcharts API.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+source.toolkit.fluxcd.io/v1 +
+kind
+string +
+HelmChart +
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +HelmChartSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+chart
+ +string + +
+

Chart is the name or path the Helm chart is available at in the +SourceRef.

+
+version
+ +string + +
+(Optional) +

Version is the chart version semver expression, ignored for charts from +GitRepository and Bucket sources. Defaults to latest when omitted.

+
+sourceRef
+ + +LocalHelmChartSourceReference + + +
+

SourceRef is the reference to the Source the chart is available at.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the HelmChart SourceRef is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+reconcileStrategy
+ +string + +
+(Optional) +

ReconcileStrategy determines what enables the creation of a new artifact. +Valid values are (‘ChartVersion’, ‘Revision’). +See the documentation of the values for an explanation on their behavior. +Defaults to ChartVersion when omitted.

+
+valuesFiles
+ +[]string + +
+(Optional) +

ValuesFiles is an alternative list of values files to use as the chart +values (values.yaml is not included by default), expected to be a +relative path in the SourceRef. +Values files are merged in the order of this list with the last file +overriding the first. Ignored when omitted.

+
+ignoreMissingValuesFiles
+ +bool + +
+(Optional) +

IgnoreMissingValuesFiles controls whether to silently ignore missing values +files rather than failing.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +source.

+
+verify
+ + +OCIRepositoryVerification + + +
+(Optional) +

Verify contains the secret name containing the trusted public keys +used to verify the signature and specifies which provider to use to check +whether OCI image is authentic. +This field is only supported when using HelmRepository source with spec.type ‘oci’. +Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.

+
+
+status
+ + +HelmChartStatus + + +
+
@@ -711,6 +1006,7 @@ HelmRepositoryStatus

(Appears on: +BucketStatus, GitRepositoryStatus, HelmChartStatus, HelmRepositoryStatus) @@ -818,6 +1114,436 @@ map[string]string

+

BucketSTSSpec +

+

+(Appears on: +BucketSpec) +

+

BucketSTSSpec specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a Bucket +provider.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+

Provider of the Security Token Service.

+
+endpoint
+ +string + +
+

Endpoint is the HTTP/S endpoint of the Security Token Service from +where temporary credentials will be fetched.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the STS endpoint. This Secret must contain the fields username +and password and is supported only for the ldap provider.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +STS endpoint. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the ldap provider.

+
+
+
+

BucketSpec +

+

+(Appears on: +Bucket) +

+

BucketSpec specifies the required configuration to produce an Artifact for +an object storage bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+provider
+ +string + +
+(Optional) +

Provider of the object storage bucket. +Defaults to ‘generic’, which expects an S3 (API) compatible object +storage.

+
+bucketName
+ +string + +
+

BucketName is the name of the object storage bucket.

+
+endpoint
+ +string + +
+

Endpoint is the object storage address the BucketName is located at.

+
+sts
+ + +BucketSTSSpec + + +
+(Optional) +

STS specifies the required configuration to use a Security Token +Service for fetching temporary credentials to authenticate in a +Bucket provider.

+

This field is only supported for the aws and generic providers.

+
+insecure
+ +bool + +
+(Optional) +

Insecure allows connecting to a non-TLS HTTP Endpoint.

+
+region
+ +string + +
+(Optional) +

Region of the Endpoint where the BucketName is located in.

+
+prefix
+ +string + +
+(Optional) +

Prefix to use for server-side filtering of files in the Bucket.

+
+secretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

SecretRef specifies the Secret containing authentication credentials +for the Bucket.

+
+certSecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

CertSecretRef can be given the name of a Secret containing +either or both of

+
    +
  • a PEM-encoded client certificate (tls.crt) and private +key (tls.key);
  • +
  • a PEM-encoded CA certificate (ca.crt)
  • +
+

and whichever are supplied, will be used for connecting to the +bucket. The client cert and key are useful if you are +authenticating with a certificate; the CA cert is useful if +you are using a self-signed server certificate. The Secret must +be of type Opaque or kubernetes.io/tls.

+

This field is only supported for the generic provider.

+
+proxySecretRef
+ + +github.com/fluxcd/pkg/apis/meta.LocalObjectReference + + +
+(Optional) +

ProxySecretRef specifies the Secret containing the proxy configuration +to use while communicating with the Bucket server.

+
+interval
+ + +Kubernetes meta/v1.Duration + + +
+

Interval at which the Bucket Endpoint is checked for updates. +This interval is approximate and may be subject to jitter to ensure +efficient use of resources.

+
+timeout
+ + +Kubernetes meta/v1.Duration + + +
+(Optional) +

Timeout for fetch operations, defaults to 60s.

+
+ignore
+ +string + +
+(Optional) +

Ignore overrides the set of excluded patterns in the .sourceignore format +(which is the same as .gitignore). If not provided, a default will be used, +consult the documentation for your version to find out what those are.

+
+suspend
+ +bool + +
+(Optional) +

Suspend tells the controller to suspend the reconciliation of this +Bucket.

+
+accessFrom
+ + +github.com/fluxcd/pkg/apis/acl.AccessFrom + + +
+(Optional) +

AccessFrom specifies an Access Control List for allowing cross-namespace +references to this object. +NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

+
+
+
+

BucketStatus +

+

+(Appears on: +Bucket) +

+

BucketStatus records the observed state of a Bucket.

+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+observedGeneration
+ +int64 + +
+(Optional) +

ObservedGeneration is the last observed generation of the Bucket object.

+
+conditions
+ + +[]Kubernetes meta/v1.Condition + + +
+(Optional) +

Conditions holds the conditions for the Bucket.

+
+url
+ +string + +
+(Optional) +

URL is the dynamic fetch link for the latest Artifact. +It is provided on a “best effort” basis, and using the precise +BucketStatus.Artifact data is recommended.

+
+artifact
+ + +Artifact + + +
+(Optional) +

Artifact represents the last successful Bucket reconciliation.

+
+observedIgnore
+ +string + +
+(Optional) +

ObservedIgnore is the observed exclusion patterns used for constructing +the source artifact.

+
+ReconcileRequestStatus
+ + +github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus + + +
+

+(Members of ReconcileRequestStatus are embedded into this type.) +

+
+
+

GitRepositoryInclude

diff --git a/internal/controller/bucket_controller.go b/internal/controller/bucket_controller.go index 4a677a6d4..9a347c70d 100644 --- a/internal/controller/bucket_controller.go +++ b/internal/controller/bucket_controller.go @@ -52,8 +52,8 @@ import ( rreconcile "github.com/fluxcd/pkg/runtime/reconcile" "github.com/fluxcd/pkg/sourceignore" + bucketv1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1" - bucketv1 "github.com/fluxcd/source-controller/api/v1beta2" intdigest "github.com/fluxcd/source-controller/internal/digest" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/index" diff --git a/internal/controller/bucket_controller_fetch_test.go b/internal/controller/bucket_controller_fetch_test.go index b31568ff8..ead96fb99 100644 --- a/internal/controller/bucket_controller_fetch_test.go +++ b/internal/controller/bucket_controller_fetch_test.go @@ -27,7 +27,7 @@ import ( "gotest.tools/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" "github.com/fluxcd/source-controller/internal/index" ) diff --git a/internal/controller/bucket_controller_test.go b/internal/controller/bucket_controller_test.go index 84835a533..7563d6e99 100644 --- a/internal/controller/bucket_controller_test.go +++ b/internal/controller/bucket_controller_test.go @@ -44,7 +44,6 @@ import ( "github.com/fluxcd/pkg/runtime/patch" sourcev1 "github.com/fluxcd/source-controller/api/v1" - bucketv1 "github.com/fluxcd/source-controller/api/v1beta2" intdigest "github.com/fluxcd/source-controller/internal/digest" "github.com/fluxcd/source-controller/internal/index" gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs" @@ -68,10 +67,10 @@ func TestBucketReconciler_deleteBeforeFinalizer(t *testing.T) { g.Expect(k8sClient.Delete(ctx, namespace)).NotTo(HaveOccurred()) }) - bucket := &bucketv1.Bucket{} + bucket := &sourcev1.Bucket{} bucket.Name = "test-bucket" bucket.Namespace = namespaceName - bucket.Spec = bucketv1.BucketSpec{ + bucket.Spec = sourcev1.BucketSpec{ Interval: metav1.Duration{Duration: interval}, BucketName: "foo", Endpoint: "bar", @@ -124,12 +123,12 @@ func TestBucketReconciler_Reconcile(t *testing.T) { g.Expect(testEnv.Create(ctx, secret)).To(Succeed()) defer testEnv.Delete(ctx, secret) - origObj := &bucketv1.Bucket{ + origObj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "bucket-reconcile-", Namespace: "default", }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Provider: "generic", BucketName: s3Server.BucketName, Endpoint: u.Host, @@ -197,7 +196,7 @@ func TestBucketReconciler_Reconcile(t *testing.T) { func TestBucketReconciler_reconcileStorage(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *bucketv1.Bucket, storage *Storage) error + beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error want sreconcile.Result wantErr bool assertArtifact *sourcev1.Artifact @@ -206,7 +205,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }{ { name: "garbage collects", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { revisions := []string{"a", "b", "c", "d"} for n := range revisions { v := revisions[n] @@ -256,7 +255,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices missing artifact in storage", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { obj.Status.Artifact = &sourcev1.Artifact{ Path: "/reconcile-storage/invalid.txt", Revision: "d", @@ -275,7 +274,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices empty artifact digest", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { f := "empty-digest.txt" obj.Status.Artifact = &sourcev1.Artifact{ @@ -306,7 +305,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "notices artifact digest mismatch", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { f := "digest-mismatch.txt" obj.Status.Artifact = &sourcev1.Artifact{ @@ -337,7 +336,7 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { }, { name: "updates hostname on diff from current", - beforeFunc: func(obj *bucketv1.Bucket, storage *Storage) error { + beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error { obj.Status.Artifact = &sourcev1.Artifact{ Path: "/reconcile-storage/hostname.txt", Revision: "f", @@ -380,14 +379,14 @@ func TestBucketReconciler_reconcileStorage(t *testing.T) { r := &BucketReconciler{ Client: fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&bucketv1.Bucket{}). + WithStatusSubresource(&sourcev1.Bucket{}). Build(), EventRecorder: record.NewFakeRecorder(32), Storage: testStorage, patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), } - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-", Generation: 1, @@ -438,7 +437,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { bucketObjects []*s3mock.Object middleware http.Handler secret *corev1.Secret - beforeFunc func(obj *bucketv1.Bucket) + beforeFunc func(obj *sourcev1.Bucket) want sreconcile.Result wantErr bool assertIndex *index.Digester @@ -472,7 +471,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing secretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -495,7 +494,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -513,7 +512,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing certSecretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -536,7 +535,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.CertSecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -554,7 +553,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing proxySecretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -577,7 +576,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -595,8 +594,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing sts.secretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - obj.Spec.STS = &bucketv1.BucketSTSSpec{ + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ SecretRef: &meta.LocalObjectReference{Name: "dummy"}, } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -618,9 +617,9 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Provider = "generic" - obj.Spec.STS = &bucketv1.BucketSTSSpec{ + obj.Spec.STS = &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "https://something", SecretRef: &meta.LocalObjectReference{Name: "dummy"}, @@ -639,8 +638,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing sts.certSecretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - obj.Spec.STS = &bucketv1.BucketSTSSpec{ + beforeFunc: func(obj *sourcev1.Bucket) { + obj.Spec.STS = &sourcev1.BucketSTSSpec{ CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -662,9 +661,9 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Provider = "generic" - obj.Spec.STS = &bucketv1.BucketSTSSpec{ + obj.Spec.STS = &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "https://something", CertSecretRef: &meta.LocalObjectReference{Name: "dummy"}, @@ -683,7 +682,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes non-existing bucket name", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -691,7 +690,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -699,9 +698,9 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes incompatible sts.provider", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Provider = "generic" - obj.Spec.STS = &bucketv1.BucketSTSSpec{ + obj.Spec.STS = &sourcev1.BucketSTSSpec{ Provider: "aws", } conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -718,9 +717,9 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Observes invalid sts.endpoint", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Provider = "generic" - obj.Spec.STS = &bucketv1.BucketSTSSpec{ + obj.Spec.STS = &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "something\t", } @@ -737,7 +736,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { }, { name: "Transient bucket name API failure", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -746,7 +745,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -786,7 +785,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "spec.ignore overrides .sourceignore", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { ignore := "!ignored/file.txt" obj.Spec.Ignore = &ignore }, @@ -823,7 +822,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Up-to-date artifact", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{ Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", } @@ -850,8 +849,8 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to read test file") + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*s3mock.Object{ { @@ -881,7 +880,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { LastModified: time.Now(), }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{ Path: "some-path", Revision: "some-rev", @@ -904,7 +903,7 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&bucketv1.Bucket{}) + WithStatusSubresource(&sourcev1.Bucket{}) if tt.secret != nil { clientBuilder.WithObjects(tt.secret) @@ -918,12 +917,12 @@ func TestBucketReconciler_reconcileSource_generic(t *testing.T) { } tmpDir := t.TempDir() - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } @@ -976,7 +975,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { bucketName string bucketObjects []*gcsmock.Object secret *corev1.Secret - beforeFunc func(obj *bucketv1.Bucket) + beforeFunc func(obj *sourcev1.Bucket) want sreconcile.Result wantErr bool assertIndex *index.Digester @@ -1003,7 +1002,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { "serviceaccount": []byte("testsa"), }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -1020,7 +1019,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Observes non-existing secretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -1044,7 +1043,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.SecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -1063,7 +1062,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Observes non-existing proxySecretRef", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -1087,7 +1086,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { Name: "dummy", }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.ProxySecretRef = &meta.LocalObjectReference{ Name: "dummy", } @@ -1106,7 +1105,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Observes non-existing bucket name", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.BucketName = "invalid" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -1115,14 +1114,14 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "bucket 'invalid' not found"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, }, { name: "Transient bucket name API failure", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Spec.Endpoint = "transient.example.com" obj.Spec.BucketName = "unavailable" conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -1132,7 +1131,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { wantErr: true, assertIndex: index.NewDigester(), assertConditions: []metav1.Condition{ - *conditions.TrueCondition(sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), + *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"), *conditions.TrueCondition(meta.ReconcilingCondition, meta.ProgressingReason, "foo"), *conditions.UnknownCondition(meta.ReadyCondition, "foo", "bar"), }, @@ -1172,7 +1171,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "spec.ignore overrides .sourceignore", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { ignore := "!ignored/file.txt" obj.Spec.Ignore = &ignore }, @@ -1209,7 +1208,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Up-to-date artifact", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{ Revision: "sha256:b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479", } @@ -1236,8 +1235,8 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { { name: "Removes FetchFailedCondition after reconciling source", bucketName: "dummy", - beforeFunc: func(obj *bucketv1.Bucket) { - conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, bucketv1.BucketOperationFailedReason, "failed to read test file") + beforeFunc: func(obj *sourcev1.Bucket) { + conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file") }, bucketObjects: []*gcsmock.Object{ { @@ -1267,7 +1266,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { Generation: 3, }, }, - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{ Path: "some-path", Revision: "some-rev", @@ -1291,7 +1290,7 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). - WithStatusSubresource(&bucketv1.Bucket{}) + WithStatusSubresource(&sourcev1.Bucket{}) if tt.secret != nil { clientBuilder.WithObjects(tt.secret) @@ -1306,12 +1305,12 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { tmpDir := t.TempDir() // Test bucket object. - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ BucketName: tt.bucketName, Timeout: &metav1.Duration{Duration: timeout}, Provider: "gcp", @@ -1368,15 +1367,15 @@ func TestBucketReconciler_reconcileSource_gcs(t *testing.T) { func TestBucketReconciler_reconcileArtifact(t *testing.T) { tests := []struct { name string - beforeFunc func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) - afterFunc func(t *WithT, obj *bucketv1.Bucket, dir string) + beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) + afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string) want sreconcile.Result wantErr bool assertConditions []metav1.Condition }{ { name: "Archiving artifact to storage makes ArtifactInStorage=True", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -1390,7 +1389,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Up-to-date artifact should not persist and update status", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { revision := index.Digest(intdigest.Canonical) obj.Spec.Interval = metav1.Duration{Duration: interval} // Incomplete artifact @@ -1398,7 +1397,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, - afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) { + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { // Still incomplete t.Expect(obj.Status.URL).To(BeEmpty()) }, @@ -1411,7 +1410,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Removes ArtifactOutdatedCondition after creating a new artifact", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "") conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") @@ -1426,12 +1425,12 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Creates latest symlink to the created artifact", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { obj.Spec.Interval = metav1.Duration{Duration: interval} conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, - afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) { + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { localPath := testStorage.LocalPath(*obj.GetArtifact()) symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz") targetFile, err := os.Readlink(symlinkPath) @@ -1447,7 +1446,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Dir path deleted", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") @@ -1462,7 +1461,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { }, { name: "Dir path is not a directory", - beforeFunc: func(t *WithT, obj *bucketv1.Bucket, index *index.Digester, dir string) { + beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *index.Digester, dir string) { // Remove the given directory and create a file for the same // path. t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) @@ -1472,7 +1471,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { conditions.MarkReconciling(obj, meta.ProgressingReason, "foo") conditions.MarkUnknown(obj, meta.ReadyCondition, "foo", "bar") }, - afterFunc: func(t *WithT, obj *bucketv1.Bucket, dir string) { + afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) { t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred()) }, want: sreconcile.ResultEmpty, @@ -1491,7 +1490,7 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { clientBuilder := fakeclient.NewClientBuilder(). WithScheme(testEnv.GetScheme()). - WithStatusSubresource(&bucketv1.Bucket{}) + WithStatusSubresource(&sourcev1.Bucket{}) r := &BucketReconciler{ Client: clientBuilder.Build(), @@ -1500,13 +1499,13 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { patchOptions: getPatchOptions(bucketReadyCondition.Owned, "sc"), } - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test-bucket-", Generation: 1, Namespace: "default", }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Timeout: &metav1.Duration{Duration: timeout}, }, } @@ -1547,13 +1546,13 @@ func TestBucketReconciler_reconcileArtifact(t *testing.T) { func TestBucketReconciler_statusConditions(t *testing.T) { tests := []struct { name string - beforeFunc func(obj *bucketv1.Bucket) + beforeFunc func(obj *sourcev1.Bucket) assertConditions []metav1.Condition wantErr bool }{ { name: "positive conditions only", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") }, assertConditions: []metav1.Condition{ @@ -1563,7 +1562,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) { }, { name: "multiple failures", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory") conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error") @@ -1578,7 +1577,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) { }, { name: "mixed positive and negative conditions", - beforeFunc: func(obj *bucketv1.Bucket) { + beforeFunc: func(obj *sourcev1.Bucket) { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision") conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret") }, @@ -1595,10 +1594,10 @@ func TestBucketReconciler_statusConditions(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ TypeMeta: metav1.TypeMeta{ - APIVersion: bucketv1.GroupVersion.String(), - Kind: bucketv1.BucketKind, + APIVersion: sourcev1.GroupVersion.String(), + Kind: sourcev1.BucketKind, }, ObjectMeta: metav1.ObjectMeta{ Name: "test-bucket", @@ -1609,7 +1608,7 @@ func TestBucketReconciler_statusConditions(t *testing.T) { c := fakeclient.NewClientBuilder(). WithScheme(testEnv.Scheme()). WithObjects(obj). - WithStatusSubresource(&bucketv1.Bucket{}). + WithStatusSubresource(&sourcev1.Bucket{}). Build() serialPatcher := patch.NewSerialPatcher(obj, c) @@ -1644,8 +1643,8 @@ func TestBucketReconciler_notify(t *testing.T) { name string res sreconcile.Result resErr error - oldObjBeforeFunc func(obj *bucketv1.Bucket) - newObjBeforeFunc func(obj *bucketv1.Bucket) + oldObjBeforeFunc func(obj *sourcev1.Bucket) + newObjBeforeFunc func(obj *sourcev1.Bucket) wantEvent string }{ { @@ -1657,7 +1656,7 @@ func TestBucketReconciler_notify(t *testing.T) { name: "new artifact", res: sreconcile.ResultSuccess, resErr: nil, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { + newObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} }, wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from", @@ -1666,12 +1665,12 @@ func TestBucketReconciler_notify(t *testing.T) { name: "recovery from failure", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *bucketv1.Bucket) { + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { + newObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, @@ -1681,12 +1680,12 @@ func TestBucketReconciler_notify(t *testing.T) { name: "recovery and new artifact", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *bucketv1.Bucket) { + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail") conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo") }, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { + newObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Digest: "bbb"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, @@ -1696,11 +1695,11 @@ func TestBucketReconciler_notify(t *testing.T) { name: "no updates", res: sreconcile.ResultSuccess, resErr: nil, - oldObjBeforeFunc: func(obj *bucketv1.Bucket) { + oldObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, - newObjBeforeFunc: func(obj *bucketv1.Bucket) { + newObjBeforeFunc: func(obj *sourcev1.Bucket) { obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Digest: "yyy"} conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready") }, @@ -1713,8 +1712,8 @@ func TestBucketReconciler_notify(t *testing.T) { recorder := record.NewFakeRecorder(32) - oldObj := &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + oldObj := &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ BucketName: "test-bucket", }, } @@ -1755,23 +1754,23 @@ func TestBucketReconciler_notify(t *testing.T) { func TestBucketReconciler_getProxyURL(t *testing.T) { tests := []struct { name string - bucket *bucketv1.Bucket + bucket *sourcev1.Bucket objects []client.Object expectedURL string expectedErr string }{ { name: "empty proxySecretRef", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: nil, }, }, }, { name: "non-existing proxySecretRef", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "non-existing", }, @@ -1781,8 +1780,8 @@ func TestBucketReconciler_getProxyURL(t *testing.T) { }, { name: "missing address in proxySecretRef", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "dummy", }, @@ -1800,8 +1799,8 @@ func TestBucketReconciler_getProxyURL(t *testing.T) { }, { name: "invalid address in proxySecretRef", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "dummy", }, @@ -1821,8 +1820,8 @@ func TestBucketReconciler_getProxyURL(t *testing.T) { }, { name: "no user, no password", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "dummy", }, @@ -1842,8 +1841,8 @@ func TestBucketReconciler_getProxyURL(t *testing.T) { }, { name: "user, no password", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "dummy", }, @@ -1864,8 +1863,8 @@ func TestBucketReconciler_getProxyURL(t *testing.T) { }, { name: "no user, password", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "dummy", }, @@ -1886,8 +1885,8 @@ func TestBucketReconciler_getProxyURL(t *testing.T) { }, { name: "user, password", - bucket: &bucketv1.Bucket{ - Spec: bucketv1.BucketSpec{ + bucket: &sourcev1.Bucket{ + Spec: sourcev1.BucketSpec{ ProxySecretRef: &meta.LocalObjectReference{ Name: "dummy", }, @@ -1941,13 +1940,13 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { tests := []struct { name string bucketProvider string - stsConfig *bucketv1.BucketSTSSpec + stsConfig *sourcev1.BucketSTSSpec err string }{ { name: "gcp unsupported", bucketProvider: "gcp", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "http://test", }, @@ -1956,7 +1955,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "azure unsupported", bucketProvider: "azure", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "http://test", }, @@ -1965,7 +1964,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "aws supported", bucketProvider: "aws", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "http://test", }, @@ -1973,7 +1972,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "invalid endpoint", bucketProvider: "aws", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "test", }, @@ -1998,7 +1997,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "ldap unsupported for aws", bucketProvider: "aws", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "http://test", }, @@ -2007,7 +2006,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "aws unsupported for generic", bucketProvider: "generic", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "http://test", }, @@ -2016,7 +2015,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "aws does not require a secret", bucketProvider: "aws", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "http://test", SecretRef: &meta.LocalObjectReference{}, @@ -2026,7 +2025,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "aws does not require a cert secret", bucketProvider: "aws", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "aws", Endpoint: "http://test", CertSecretRef: &meta.LocalObjectReference{}, @@ -2036,7 +2035,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "ldap may use a secret", bucketProvider: "generic", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "http://test", SecretRef: &meta.LocalObjectReference{}, @@ -2045,7 +2044,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "ldap may use a cert secret", bucketProvider: "generic", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "http://test", CertSecretRef: &meta.LocalObjectReference{}, @@ -2054,7 +2053,7 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { { name: "ldap may not use a secret or cert secret", bucketProvider: "generic", - stsConfig: &bucketv1.BucketSTSSpec{ + stsConfig: &sourcev1.BucketSTSSpec{ Provider: "ldap", Endpoint: "http://test", }, @@ -2065,12 +2064,12 @@ func TestBucketReconciler_APIServerValidation_STS(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - obj := &bucketv1.Bucket{ + obj := &sourcev1.Bucket{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "bucket-reconcile-", Namespace: "default", }, - Spec: bucketv1.BucketSpec{ + Spec: sourcev1.BucketSpec{ Provider: tt.bucketProvider, BucketName: "test", Endpoint: "test", diff --git a/internal/controller/helmchart_controller.go b/internal/controller/helmchart_controller.go index 4467ec801..5e9e4bdb0 100644 --- a/internal/controller/helmchart_controller.go +++ b/internal/controller/helmchart_controller.go @@ -65,7 +65,6 @@ import ( "github.com/fluxcd/pkg/tar" sourcev1 "github.com/fluxcd/source-controller/api/v1" - sourcev1beta2 "github.com/fluxcd/source-controller/api/v1beta2" "github.com/fluxcd/source-controller/internal/cache" serror "github.com/fluxcd/source-controller/internal/error" "github.com/fluxcd/source-controller/internal/helm/chart" @@ -191,7 +190,7 @@ func (r *HelmChartReconciler) SetupWithManagerAndOptions(ctx context.Context, mg builder.WithPredicates(SourceRevisionChangePredicate{}), ). Watches( - &sourcev1beta2.Bucket{}, + &sourcev1.Bucket{}, handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange), builder.WithPredicates(SourceRevisionChangePredicate{}), ). @@ -502,7 +501,7 @@ func (r *HelmChartReconciler) reconcileSource(ctx context.Context, sp *patch.Ser switch typedSource := s.(type) { case *sourcev1.HelmRepository: return r.buildFromHelmRepository(ctx, obj, typedSource, build) - case *sourcev1.GitRepository, *sourcev1beta2.Bucket: + case *sourcev1.GitRepository, *sourcev1.Bucket: return r.buildFromTarballArtifact(ctx, obj, *typedSource.GetArtifact(), build) default: // Ending up here should generally not be possible @@ -777,12 +776,12 @@ func (r *HelmChartReconciler) buildFromTarballArtifact(ctx context.Context, obj if obj.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind { rev = git.ExtractHashFromRevision(rev).String() } - if obj.Spec.SourceRef.Kind == sourcev1beta2.BucketKind { + if obj.Spec.SourceRef.Kind == sourcev1.BucketKind { if dig := digest.Digest(rev); dig.Validate() == nil { rev = dig.Encoded() } } - if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1beta2.BucketKind { + if kind := obj.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind { // The SemVer from the metadata is at times used in e.g. the label metadata for a resource // in a chart, which has a limited length of 63 characters. // To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and @@ -928,15 +927,15 @@ func (r *HelmChartReconciler) getSource(ctx context.Context, obj *sourcev1.HelmC return nil, err } s = &repo - case sourcev1beta2.BucketKind: - var bucket sourcev1beta2.Bucket + case sourcev1.BucketKind: + var bucket sourcev1.Bucket if err := r.Client.Get(ctx, namespacedName, &bucket); err != nil { return nil, err } s = &bucket default: return nil, fmt.Errorf("unsupported source kind '%s', must be one of: %v", obj.Spec.SourceRef.Kind, []string{ - sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1beta2.BucketKind}) + sourcev1.HelmRepositoryKind, sourcev1.GitRepositoryKind, sourcev1.BucketKind}) } return s, nil } @@ -1196,7 +1195,7 @@ func (r *HelmChartReconciler) requestsForGitRepositoryChange(ctx context.Context } func (r *HelmChartReconciler) requestsForBucketChange(ctx context.Context, o client.Object) []reconcile.Request { - bucket, ok := o.(*sourcev1beta2.Bucket) + bucket, ok := o.(*sourcev1.Bucket) if !ok { ctrl.LoggerFrom(ctx).Error(fmt.Errorf("expected a Bucket, got %T", o), "failed to get reconcile requests for Bucket change") @@ -1210,7 +1209,7 @@ func (r *HelmChartReconciler) requestsForBucketChange(ctx context.Context, o cli var list sourcev1.HelmChartList if err := r.List(ctx, &list, client.MatchingFields{ - sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1beta2.BucketKind, bucket.Name), + sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name), }); err != nil { ctrl.LoggerFrom(ctx).Error(err, "failed to list HelmCharts for Bucket change") return nil diff --git a/internal/controller/helmchart_controller_test.go b/internal/controller/helmchart_controller_test.go index 39f9991f1..6bc1e890b 100644 --- a/internal/controller/helmchart_controller_test.go +++ b/internal/controller/helmchart_controller_test.go @@ -1855,10 +1855,10 @@ func TestHelmChartReconciler_getSource(t *testing.T) { Namespace: "foo", }, }, - &sourcev1beta2.Bucket{ + &sourcev1.Bucket{ TypeMeta: metav1.TypeMeta{ - Kind: sourcev1beta2.BucketKind, - APIVersion: sourcev1beta2.GroupVersion.String(), + Kind: sourcev1.BucketKind, + APIVersion: sourcev1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: "bucket", diff --git a/main.go b/main.go index a0abb7c8c..72ba918c9 100644 --- a/main.go +++ b/main.go @@ -247,7 +247,7 @@ func main() { }).SetupWithManagerAndOptions(mgr, controller.BucketReconcilerOptions{ RateLimiter: helper.GetRateLimiter(rateLimiterOptions), }); err != nil { - setupLog.Error(err, "unable to create controller", "controller", v1beta2.BucketKind) + setupLog.Error(err, "unable to create controller", "controller", v1.BucketKind) os.Exit(1) } diff --git a/pkg/azure/blob.go b/pkg/azure/blob.go index c95c9754d..24f778a85 100644 --- a/pkg/azure/blob.go +++ b/pkg/azure/blob.go @@ -39,7 +39,7 @@ import ( "github.com/fluxcd/pkg/masktoken" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( diff --git a/pkg/azure/blob_integration_test.go b/pkg/azure/blob_integration_test.go index 1d1040adb..704b4c0c3 100644 --- a/pkg/azure/blob_integration_test.go +++ b/pkg/azure/blob_integration_test.go @@ -44,7 +44,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) var ( diff --git a/pkg/azure/blob_test.go b/pkg/azure/blob_test.go index 240376f2b..6c77cd13d 100644 --- a/pkg/azure/blob_test.go +++ b/pkg/azure/blob_test.go @@ -37,7 +37,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" testlistener "github.com/fluxcd/source-controller/tests/listener" testproxy "github.com/fluxcd/source-controller/tests/proxy" ) diff --git a/pkg/minio/minio.go b/pkg/minio/minio.go index 7d2e8ae60..6c7da9727 100644 --- a/pkg/minio/minio.go +++ b/pkg/minio/minio.go @@ -30,7 +30,7 @@ import ( "github.com/minio/minio-go/v7/pkg/s3utils" corev1 "k8s.io/api/core/v1" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" ) // MinioClient is a minimal Minio client for fetching files from S3 compatible diff --git a/pkg/minio/minio_test.go b/pkg/minio/minio_test.go index db0ecfe9c..9a31d49b5 100644 --- a/pkg/minio/minio_test.go +++ b/pkg/minio/minio_test.go @@ -45,7 +45,7 @@ import ( "github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/sourceignore" - sourcev1 "github.com/fluxcd/source-controller/api/v1beta2" + sourcev1 "github.com/fluxcd/source-controller/api/v1" testlistener "github.com/fluxcd/source-controller/tests/listener" testproxy "github.com/fluxcd/source-controller/tests/proxy" ) From cd48373d6c96397a35a3a7c50c28f0d7f8bd3d30 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Fri, 23 Aug 2024 13:42:28 +0300 Subject: [PATCH 2/6] Update controller-gen to v0.16.1 Signed-off-by: Stefan Prodan --- Makefile | 2 +- .../source.toolkit.fluxcd.io_buckets.yaml | 74 ++--------- ...rce.toolkit.fluxcd.io_gitrepositories.yaml | 67 ++-------- .../source.toolkit.fluxcd.io_helmcharts.yaml | 60 ++------- ...ce.toolkit.fluxcd.io_helmrepositories.yaml | 66 ++-------- ...rce.toolkit.fluxcd.io_ocirepositories.yaml | 25 +--- config/rbac/role.yaml | 124 ++---------------- 7 files changed, 58 insertions(+), 360 deletions(-) diff --git a/Makefile b/Makefile index e36900987..4b1f9f7c5 100644 --- a/Makefile +++ b/Makefile @@ -38,7 +38,7 @@ FUZZ_TIME ?= 1m GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))' # API (doc) generation utilities -CONTROLLER_GEN_VERSION ?= v0.15.0 +CONTROLLER_GEN_VERSION ?= v0.16.1 GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113 # If gobin not set, create one on ./build and add to path. diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 2bb459d16..969aaaa02 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.1 name: buckets.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -90,19 +90,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the bucket. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - This field is only supported for the `generic` provider. properties: name: @@ -179,7 +176,6 @@ spec: Service for fetching temporary credentials to authenticate in a Bucket provider. - This field is only supported for the `aws` and `generic` providers. properties: certSecretRef: @@ -187,19 +183,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the STS endpoint. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - This field is only supported for the `ldap` provider. properties: name: @@ -320,16 +313,8 @@ spec: conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -370,12 +355,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -566,22 +546,15 @@ spec: description: URL is the HTTP address of this artifact. type: string required: + - lastUpdateTime - path - url type: object conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -622,12 +595,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -737,19 +705,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the bucket. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - This field is only supported for the `generic` provider. properties: name: @@ -826,7 +791,6 @@ spec: Service for fetching temporary credentials to authenticate in a Bucket provider. - This field is only supported for the `aws` and `generic` providers. properties: certSecretRef: @@ -834,19 +798,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the STS endpoint. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - This field is only supported for the `ldap` provider. properties: name: @@ -967,16 +928,8 @@ spec: conditions: description: Conditions holds the conditions for the Bucket. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1017,12 +970,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml index f68757a0a..f12533ad6 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.1 name: gitrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -132,7 +132,6 @@ spec: description: |- Commit SHA to check out, takes precedence over all reference fields. - This can be combined with Branch to shallow clone the branch, in which the commit is expected to exist. type: string @@ -140,7 +139,6 @@ spec: description: |- Name of the reference to check out; takes precedence over Branch, Tag and SemVer. - It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string @@ -193,7 +191,6 @@ spec: description: |- Mode specifies which Git object(s) should be verified. - The variants "head" and "HEAD" both imply the same thing, i.e. verify the commit that the HEAD of the Git repository points to. The variant "head" solely exists to ensure backwards compatibility. @@ -275,16 +272,8 @@ spec: conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -325,12 +314,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -683,22 +667,15 @@ spec: description: URL is the HTTP address of this artifact. type: string required: + - lastUpdateTime - path - url type: object conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -739,12 +716,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -784,6 +756,7 @@ spec: description: URL is the HTTP address of this artifact. type: string required: + - lastUpdateTime - path - url type: object @@ -952,7 +925,6 @@ spec: description: |- Commit SHA to check out, takes precedence over all reference fields. - This can be combined with Branch to shallow clone the branch, in which the commit is expected to exist. type: string @@ -960,7 +932,6 @@ spec: description: |- Name of the reference to check out; takes precedence over Branch, Tag and SemVer. - It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head" type: string @@ -1087,16 +1058,8 @@ spec: conditions: description: Conditions holds the conditions for the GitRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1137,12 +1100,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -1166,7 +1124,6 @@ spec: changed. It has the format of `:`, for example: `sha256:`. - Deprecated: Replaced with explicit fields for observed artifact content config in the status. type: string diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml index c07b6ade7..26e5a7e97 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.1 name: helmcharts.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -252,16 +252,8 @@ spec: conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -302,12 +294,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -538,22 +525,15 @@ spec: description: URL is the HTTP address of this artifact. type: string required: + - lastUpdateTime - path - url type: object conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -594,12 +574,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -903,16 +878,8 @@ spec: conditions: description: Conditions holds the conditions for the HelmChart. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -953,12 +920,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml index f199fcd20..a42f54fa6 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.1 name: helmrepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -89,19 +89,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the registry. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - It takes precedence over the values specified in the Secret referred to by `.spec.secretRef`. properties: @@ -243,16 +240,8 @@ spec: conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -293,12 +282,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -474,22 +458,15 @@ spec: description: URL is the HTTP address of this artifact. type: string required: + - lastUpdateTime - path - url type: object conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -530,12 +507,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -641,19 +613,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the registry. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - It takes precedence over the values specified in the Secret referred to by `.spec.secretRef`. properties: @@ -795,16 +764,8 @@ spec: conditions: description: Conditions holds the conditions for the HelmRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -845,12 +806,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml index a6098b72a..a60b7b416 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.15.0 + controller-gen.kubebuilder.io/version: v0.16.1 name: ocirepositories.source.toolkit.fluxcd.io spec: group: source.toolkit.fluxcd.io @@ -59,19 +59,16 @@ spec: CertSecretRef can be given the name of a Secret containing either or both of - - a PEM-encoded client certificate (`tls.crt`) and private key (`tls.key`); - a PEM-encoded CA certificate (`ca.crt`) - and whichever are supplied, will be used for connecting to the registry. The client cert and key are useful if you are authenticating with a certificate; the CA cert is useful if you are using a self-signed server certificate. The Secret must be of type `Opaque` or `kubernetes.io/tls`. - Note: Support for the `caFile`, `certFile` and `keyFile` keys have been deprecated. properties: @@ -313,16 +310,8 @@ spec: conditions: description: Conditions holds the conditions for the OCIRepository. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -363,12 +352,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -391,7 +375,6 @@ spec: artifact needs to be rebuilt. It has the format of `:`, for example: `sha256:`. - Deprecated: Replaced with explicit fields for observed artifact content config in the status. type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 8bd710bef..65bd29831 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -23,125 +23,9 @@ rules: - source.toolkit.fluxcd.io resources: - buckets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - buckets/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - gitrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - gitrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmcharts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmcharts/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - helmrepositories - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/finalizers - verbs: - - create - - delete - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - - helmrepositories/status - verbs: - - get - - patch - - update -- apiGroups: - - source.toolkit.fluxcd.io - resources: - ocirepositories verbs: - create @@ -154,6 +38,10 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/finalizers + - gitrepositories/finalizers + - helmcharts/finalizers + - helmrepositories/finalizers - ocirepositories/finalizers verbs: - create @@ -164,6 +52,10 @@ rules: - apiGroups: - source.toolkit.fluxcd.io resources: + - buckets/status + - gitrepositories/status + - helmcharts/status + - helmrepositories/status - ocirepositories/status verbs: - get From 5acef7b169528488cae4620385439d44663e2343 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Fri, 23 Aug 2024 13:57:38 +0300 Subject: [PATCH 3/6] Add API docs for Bucket v1 Signed-off-by: Stefan Prodan --- README.md | 2 +- docs/spec/v1/README.md | 1 + docs/spec/v1/buckets.md | 1382 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 1384 insertions(+), 1 deletion(-) create mode 100644 docs/spec/v1/buckets.md diff --git a/README.md b/README.md index 393d35169..1838328d2 100644 --- a/README.md +++ b/README.md @@ -22,7 +22,7 @@ and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/component | [OCIRepository](docs/spec/v1beta2/ocirepositories.md) | `source.toolkit.fluxcd.io/v1beta2` | | [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` | | [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` | -| [Bucket](docs/spec/v1beta2/buckets.md) | `source.toolkit.fluxcd.io/v1beta2` | +| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` | ## Features diff --git a/docs/spec/v1/README.md b/docs/spec/v1/README.md index a87051a52..3a382959f 100644 --- a/docs/spec/v1/README.md +++ b/docs/spec/v1/README.md @@ -8,6 +8,7 @@ This is the v1 API specification for defining the desired state sources of Kuber + [GitRepository](gitrepositories.md) + [HelmRepository](helmrepositories.md) + [HelmChart](helmcharts.md) + + [Bucket](buckets.md) ## Implementation diff --git a/docs/spec/v1/buckets.md b/docs/spec/v1/buckets.md new file mode 100644 index 000000000..980a4b998 --- /dev/null +++ b/docs/spec/v1/buckets.md @@ -0,0 +1,1382 @@ +# Buckets + + + +The `Bucket` API defines a Source to produce an Artifact for objects from storage +solutions like Amazon S3, Google Cloud Storage buckets, or any other solution +with a S3 compatible API such as Minio, Alibaba Cloud OSS and others. + +## Example + +The following is an example of a Bucket. It creates a tarball (`.tar.gz`) +Artifact with the fetched objects from an object storage with an S3 +compatible API (e.g. [Minio](https://min.io)): + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: minio-bucket + namespace: default +spec: + interval: 5m0s + endpoint: minio.example.com + insecure: true + secretRef: + name: minio-bucket-secret + bucketName: example +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-bucket-secret + namespace: default +type: Opaque +stringData: + accesskey: + secretkey: +``` + +In the above example: + +- A Bucket named `minio-bucket` is created, indicated by the + `.metadata.name` field. +- The source-controller checks the object storage bucket every five minutes, + indicated by the `.spec.interval` field. +- It authenticates to the `minio.example.com` endpoint with + the static credentials from the `minio-secret` Secret data, indicated by + the `.spec.endpoint` and `.spec.secretRef.name` fields. +- A list of object keys and their [etags](https://en.wikipedia.org/wiki/HTTP_ETag) + in the `.spec.bucketName` bucket is compiled, while filtering the keys using + [default ignore rules](#default-exclusions). +- The digest (algorithm defaults to SHA256) of the list is used as Artifact + revision, reported in-cluster in the `.status.artifact.revision` field. +- When the current Bucket revision differs from the latest calculated revision, + all objects are fetched and archived. +- The new Artifact is reported in the `.status.artifact` field. + +You can run this example by saving the manifest into `bucket.yaml`, and +changing the Bucket and Secret values to target a Minio instance you have +control over. + +**Note:** For more advanced examples targeting e.g. Amazon S3 or GCP, see +[Provider](#provider). + +1. Apply the resource on the cluster: + + ```sh + kubectl apply -f bucket.yaml + ``` + +2. Run `kubectl get buckets` to see the Bucket: + + ```console + NAME ENDPOINT AGE READY STATUS + minio-bucket minio.example.com 34s True stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + ``` + +3. Run `kubectl describe bucket minio-bucket` to see the [Artifact](#artifact) + and [Conditions](#conditions) in the Bucket's Status: + + ```console + ... + Status: + Artifact: + Digest: sha256:72aa638abb455ca5f9ef4825b949fd2de4d4be0a74895bf7ed2338622cd12686 + Last Update Time: 2024-02-01T23:43:38Z + Path: bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Revision: sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + Size: 38099 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855.tar.gz + Conditions: + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: Ready + Last Transition Time: 2024-02-01T23:43:38Z + Message: stored artifact for revision 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + Observed Generation: 1 + Reason: Succeeded + Status: True + Type: ArtifactInStorage + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz + Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal NewArtifact 82s source-controller stored artifact with 16 fetched files from 'example' bucket + ``` + +## Writing a Bucket spec + +As with all other Kubernetes config, a Bucket needs `apiVersion`, `kind`, and +`metadata` fields. The name of a Bucket object must be a valid +[DNS subdomain name](https://kubernetes.io/docs/concepts/overview/working-with-objects/names#dns-subdomain-names). + +A Bucket also needs a +[`.spec` section](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status). + +### Provider + +The `.spec.provider` field allows for specifying a Provider to enable provider +specific configurations, for example to communicate with a non-S3 compatible +API endpoint, or to change the authentication method. + +Supported options are: + +- [Generic](#generic) +- [AWS](#aws) +- [Azure](#azure) +- [GCP](#gcp) + +If you do not specify `.spec.provider`, it defaults to `generic`. + +#### Generic + +When a Bucket's `spec.provider` is set to `generic`, the controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go), which can communicate +with any Amazon S3 compatible object storage (including +[GCS](https://cloud.google.com/storage/docs/interoperability), +[Wasabi](https://wasabi-support.zendesk.com/hc/en-us/articles/360002079671-How-do-I-use-Minio-Client-with-Wasabi-), +and many others). + +The `generic` Provider _requires_ a [Secret reference](#secret-reference) to a +Secret with `.data.accesskey` and `.data.secretkey` values, used to +authenticate with static credentials. + +The Provider allows for specifying a region the bucket is in using the +[`.spec.region` field](#region), if required by the [Endpoint](#endpoint). + +##### Generic example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: generic-insecure + namespace: default +spec: + provider: generic + interval: 5m0s + bucketName: podinfo + endpoint: minio.minio.svc.cluster.local:9000 + timeout: 60s + insecure: true + secretRef: + name: minio-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### AWS + +When a Bucket's `.spec.provider` field is set to `aws`, the source-controller +will attempt to communicate with the specified [Endpoint](#endpoint) using the +[Minio Client SDK](https://github.com/minio/minio-go). + +Without a [Secret reference](#secret-reference), authorization using +credentials retrieved from the AWS EC2 service is attempted by default. When +a reference is specified, it expects a Secret with `.data.accesskey` and +`.data.secretkey` values, used to authenticate with static credentials. + +The Provider allows for specifying the +[Amazon AWS Region](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions) +using the [`.spec.region` field](#region). + +##### AWS EC2 example + +**Note:** On EKS you have to create an [IAM role](#aws-iam-role-example) for +the source-controller service account that grants access to the bucket. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + timeout: 30s +``` + +##### AWS IAM role example + +Replace `` with the specified `.spec.bucketName`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:GetObject", + "Resource": "arn:aws:s3:::/*" + }, + { + "Sid": "", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::" + } + ] +} +``` + +##### AWS static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: aws + namespace: default +spec: + interval: 5m0s + provider: aws + bucketName: podinfo + endpoint: s3.amazonaws.com + region: us-east-1 + secretRef: + name: aws-credentials +--- +apiVersion: v1 +kind: Secret +metadata: + name: aws-credentials + namespace: default +type: Opaque +data: + accesskey: + secretkey: +``` + +#### Azure + +When a Bucket's `.spec.provider` is set to `azure`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Azure Blob Storage SDK for Go](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob). + +Without a [Secret reference](#secret-reference), authentication using a chain +with: + +- [Environment credentials](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential) +- [Workload Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.3.0-beta.4#WorkloadIdentityCredential) +- [Managed Identity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) + with the `AZURE_CLIENT_ID` +- Managed Identity with a system-assigned identity + +is attempted by default. If no chain can be established, the bucket +is assumed to be publicly reachable. + +When a reference is specified, it expects a Secret with one of the following +sets of `.data` fields: + +- `tenantId`, `clientId` and `clientSecret` for authenticating a Service + Principal with a secret. +- `tenantId`, `clientId` and `clientCertificate` (plus optionally + `clientCertificatePassword` and/or `clientCertificateSendChain`) for + authenticating a Service Principal with a certificate. +- `clientId` for authenticating using a Managed Identity. +- `accountKey` for authenticating using a + [Shared Key](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob#SharedKeyCredential). +- `sasKey` for authenticating using a [SAS Token](https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview) + +For any Managed Identity and/or Azure Active Directory authentication method, +the base URL can be configured using `.data.authorityHost`. If not supplied, +[`AzurePublicCloud` is assumed](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AuthorityHost). + +##### Azure example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-public + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: podinfo + endpoint: https://podinfoaccount.blob.core.windows.net + timeout: 30s +``` + +##### Azure Service Principal Secret example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-secret + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientSecret: +``` + +##### Azure Service Principal Certificate example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-service-principal-cert + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-sp-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-sp-auth + namespace: default +type: Opaque +data: + tenantId: + clientId: + clientCertificate: + # Plus optionally + clientCertificatePassword: + clientCertificateSendChain: # either "1" or "true" +``` + +##### Azure Managed Identity with Client ID example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-managed-identity + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-smi-auth +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-smi-auth + namespace: default +type: Opaque +data: + clientId: +``` + +##### Azure Blob Shared Key example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-shared-key + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + accountKey: +``` + +##### Workload Identity + +If you have [Workload Identity](https://azure.github.io/azure-workload-identity/docs/installation/managed-clusters.html) +set up on your cluster, you need to create an Azure Identity and give it +access to Azure Blob Storage. + +```shell +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" +``` + +Establish a federated identity between the Identity and the source-controller +ServiceAccount. + +```shell +export SERVICE_ACCOUNT_ISSUER="$(az aks show --resource-group --name --query "oidcIssuerProfile.issuerUrl" -otsv)" + +az identity federated-credential create \ + --name "kubernetes-federated-credential" \ + --identity-name "${IDENTITY_NAME}" \ + --resource-group "${RESOURCE_GROUP}" \ + --issuer "${SERVICE_ACCOUNT_ISSUER}" \ + --subject "system:serviceaccount:flux-system:source-controller" +``` + +Add a patch to label and annotate the source-controller Deployment and ServiceAccount +correctly so that it can match an identity binding: + +```yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - gotk-components.yaml + - gotk-sync.yaml +patches: + - patch: |- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: source-controller + namespace: flux-system + annotations: + azure.workload.identity/client-id: + labels: + azure.workload.identity/use: "true" + - patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: source-controller + namespace: flux-system + labels: + azure.workload.identity/use: "true" + spec: + template: + metadata: + labels: + azure.workload.identity/use: "true" +``` + +If you have set up Workload Identity correctly and labeled the source-controller +Deployment and ServiceAccount, then you don't need to reference a Secret. For more information, +please see [documentation](https://azure.github.io/azure-workload-identity/docs/quick-start.html). + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testsas + endpoint: https://testfluxsas.blob.core.windows.net +``` + +##### Deprecated: Managed Identity with AAD Pod Identity + +If you are using [aad pod identity](https://azure.github.io/aad-pod-identity/docs), +You need to create an Azure Identity and give it access to Azure Blob Storage. + +```sh +export IDENTITY_NAME="blob-access" + +az role assignment create --role "Storage Blob Data Reader" \ +--assignee-object-id "$(az identity show -n $IDENTITY_NAME -o tsv --query principalId -g $RESOURCE_GROUP)" \ +--scope "/subscriptions//resourceGroups/$RESOURCE_GROUP/providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" + +export IDENTITY_CLIENT_ID="$(az identity show -n ${IDENTITY_NAME} -g ${RESOURCE_GROUP} -otsv --query clientId)" +export IDENTITY_RESOURCE_ID="$(az identity show -n ${IDENTITY_NAME} -otsv --query id)" +``` + +Create an AzureIdentity object that references the identity created above: + +```yaml +--- +apiVersion: aadpodidentity.k8s.io/v1 +kind: AzureIdentity +metadata: + name: # source-controller label will match this name + namespace: flux-system +spec: + clientID: + resourceID: + type: 0 # user-managed identity +``` + +Create an AzureIdentityBinding object that binds Pods with a specific selector +with the AzureIdentity created: + +```yaml +apiVersion: "aadpodidentity.k8s.io/v1" +kind: AzureIdentityBinding +metadata: + name: ${IDENTITY_NAME}-binding +spec: + azureIdentity: ${IDENTITY_NAME} + selector: ${IDENTITY_NAME} +``` + +Label the source-controller Deployment correctly so that it can match an identity binding: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kustomize-controller + namespace: flux-system +spec: + template: + metadata: + labels: + aadpodidbinding: ${IDENTITY_NAME} # match the AzureIdentity name +``` + +If you have set up aad-pod-identity correctly and labeled the source-controller +Deployment, then you don't need to reference a Secret. + +```yaml +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-bucket + namespace: flux-system +spec: + interval: 5m0s + provider: azure + bucketName: testsas + endpoint: https://testfluxsas.blob.core.windows.net +``` + +##### Azure Blob SAS Token example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: azure-sas-token + namespace: default +spec: + interval: 5m0s + provider: azure + bucketName: + endpoint: https://.blob.core.windows.net + secretRef: + name: azure-key +--- +apiVersion: v1 +kind: Secret +metadata: + name: azure-key + namespace: default +type: Opaque +data: + sasKey: +``` + +The `sasKey` only contains the SAS token e.g +`?sv=2020-08-0&ss=bfqt&srt=co&sp=rwdlacupitfx&se=2022-05-26T21:55:35Z&st=2022-05...`. +The leading question mark (`?`) is optional. The query values from the `sasKey` +data field in the Secrets gets merged with the ones in the `.spec.endpoint` of +the Bucket. If the same key is present in the both of them, the value in the +`sasKey` takes precedence. + +**Note:** The SAS token has an expiry date, and it must be updated before it +expires to allow Flux to continue to access Azure Storage. It is allowed to use +an account-level or container-level SAS token. + +The minimum permissions for an account-level SAS token are: + +- Allowed services: `Blob` +- Allowed resource types: `Container`, `Object` +- Allowed permissions: `Read`, `List` + +The minimum permissions for a container-level SAS token are: + +- Allowed permissions: `Read`, `List` + +Refer to the [Azure documentation](https://learn.microsoft.com/en-us/rest/api/storageservices/create-account-sas#blob-service) for a full overview on permissions. + +#### GCP + +When a Bucket's `.spec.provider` is set to `gcp`, the source-controller will +attempt to communicate with the specified [Endpoint](#endpoint) using the +[Google Client SDK](https://github.com/googleapis/google-api-go-client). + +Without a [Secret reference](#secret-reference), authorization using a +workload identity is attempted by default. The workload identity is obtained +using the `GOOGLE_APPLICATION_CREDENTIALS` environment variable, falling back +to the Google Application Credential file in the config directory. +When a reference is specified, it expects a Secret with a `.data.serviceaccount` +value with a GCP service account JSON file. + +The Provider allows for specifying the +[Bucket location](https://cloud.google.com/storage/docs/locations) using the +[`.spec.region` field](#region). + +##### GCP example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-workload-identity + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: podinfo + endpoint: storage.googleapis.com + region: us-east-1 + timeout: 30s +``` + +##### GCP static auth example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: gcp-secret + namespace: default +spec: + interval: 5m0s + provider: gcp + bucketName: + endpoint: storage.googleapis.com + region: + secretRef: + name: gcp-service-account +--- +apiVersion: v1 +kind: Secret +metadata: + name: gcp-service-account + namespace: default +type: Opaque +data: + serviceaccount: +``` + +Where the (base64 decoded) value of `.data.serviceaccount` looks like this: + +```json +{ + "type": "service_account", + "project_id": "example", + "private_key_id": "28qwgh3gdf5hj3gb5fj3gsu5yfgh34f45324568hy2", + "private_key": "-----BEGIN PRIVATE KEY-----\nHwethgy123hugghhhbdcu6356dgyjhsvgvGFDHYgcdjbvcdhbsx63c\n76tgycfehuhVGTFYfw6t7ydgyVgydheyhuggycuhejwy6t35fthyuhegvcetf\nTFUHGTygghubhxe65ygt6tgyedgy326hucyvsuhbhcvcsjhcsjhcsvgdtHFCGi\nHcye6tyyg3gfyuhchcsbhygcijdbhyyTF66tuhcevuhdcbhuhhvftcuhbh3uh7t6y\nggvftUHbh6t5rfthhuGVRtfjhbfcrd5r67yuhuvgFTYjgvtfyghbfcdrhyjhbfctfdfyhvfg\ntgvggtfyghvft6tugvTF5r66tujhgvfrtyhhgfct6y7ytfr5ctvghbhhvtghhjvcttfycf\nffxfghjbvgcgyt67ujbgvctfyhVC7uhvgcyjvhhjvyujc\ncgghgvgcfhgg765454tcfthhgftyhhvvyvvffgfryyu77reredswfthhgfcftycfdrttfhf/\n-----END PRIVATE KEY-----\n", + "client_email": "test@example.iam.gserviceaccount.com", + "client_id": "32657634678762536746", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "token_uri": "https://oauth2.googleapis.com/token", + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%40podinfo.iam.gserviceaccount.com" +} +``` + +### Interval + +`.spec.interval` is a required field that specifies the interval which the +object storage bucket must be consulted at. + +After successfully reconciling a Bucket object, the source-controller requeues +the object for inspection after the specified interval. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `10m0s` to look at the object storage bucket every 10 minutes. + +If the `.metadata.generation` of a resource changes (due to e.g. the apply of a +change to the spec), this is handled instantly outside the interval window. + +**Note:** The controller can be configured to apply a jitter to the interval in +order to distribute the load more evenly when multiple Bucket objects are set up +with the same interval. For more information, please refer to the +[source-controller configuration options](https://fluxcd.io/flux/components/source/options/). + +### Endpoint + +`.spec.endpoint` is a required field that specifies the HTTP/S object storage +endpoint to connect to and fetch objects from. Connecting to an (insecure) +HTTP endpoint requires enabling [`.spec.insecure`](#insecure). + +Some endpoints require the specification of a [`.spec.region`](#region), +see [Provider](#provider) for more (provider specific) examples. + +### STS + +`.spec.sts` is an optional field for specifying the Security Token Service +configuration. A Security Token Service (STS) is a web service that issues +temporary security credentials. By adding this field, one may specify the +STS endpoint from where temporary credentials will be fetched. + +This field is only supported for the `aws` and `generic` bucket [providers](#provider). + +If using `.spec.sts`, the following fields are required: + +- `.spec.sts.provider`, the Security Token Service provider. The only supported + option for the `generic` bucket provider is `ldap`. The only supported option + for the `aws` bucket provider is `aws`. +- `.spec.sts.endpoint`, the HTTP/S endpoint of the Security Token Service. In + the case of `aws` this can be `https://sts.amazonaws.com`, or a Regional STS + Endpoint, or an Interface Endpoint created inside a VPC. In the case of + `ldap` this must be the LDAP server endpoint. + +When using the `ldap` provider, the following fields may also be specified: + +- `.spec.sts.secretRef.name`, the name of the Secret containing the LDAP + credentials. The Secret must contain the following keys: + - `username`, the username to authenticate with. + - `password`, the password to authenticate with. +- `.spec.sts.certSecretRef.name`, the name of the Secret containing the + TLS configuration for communicating with the STS endpoint. The contents + of this Secret must follow the same structure of + [`.spec.certSecretRef.name`](#cert-secret-reference). + +If [`.spec.proxySecretRef.name`](#proxy-secret-reference) is specified, +the proxy configuration will be used for commucating with the STS endpoint. + +Example for the `ldap` provider: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + sts: + provider: ldap + endpoint: https://ldap.example.com + secretRef: + name: ldap-credentials + certSecretRef: + name: ldap-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-credentials + namespace: example +type: Opaque +stringData: + username: + password: +--- +apiVersion: v1 +kind: Secret +metadata: + name: ldap-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Bucket name + +`.spec.bucketName` is a required field that specifies which object storage +bucket on the [Endpoint](#endpoint) objects should be fetched from. + +See [Provider](#provider) for more (provider specific) examples. + +### Region + +`.spec.region` is an optional field to specify the region a +[`.spec.bucketName`](#bucket-name) is located in. + +See [Provider](#provider) for more (provider specific) examples. + +### Cert secret reference + +`.spec.certSecretRef.name` is an optional field to specify a secret containing +TLS certificate data. The secret can contain the following keys: + +* `tls.crt` and `tls.key`, to specify the client certificate and private key used +for TLS client authentication. These must be used in conjunction, i.e. +specifying one without the other will lead to an error. +* `ca.crt`, to specify the CA certificate used to verify the server, which is +required if the server is using a self-signed certificate. + +If the server is using a self-signed certificate and has TLS client +authentication enabled, all three values are required. + +The Secret should be of type `Opaque` or `kubernetes.io/tls`. All the files in +the Secret are expected to be [PEM-encoded][pem-encoding]. Assuming you have +three files; `client.key`, `client.crt` and `ca.crt` for the client private key, +client certificate and the CA certificate respectively, you can generate the +required Secret using the `flux create secret tls` command: + +```sh +flux create secret tls minio-tls --tls-key-file=client.key --tls-crt-file=client.crt --ca-crt-file=ca.crt +``` + +If TLS client authentication is not required, you can generate the secret with: + +```sh +flux create secret tls minio-tls --ca-crt-file=ca.crt +``` + +This API is only supported for the `generic` [provider](#provider). + +Example usage: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: example + namespace: example +spec: + interval: 5m + bucketName: example + provider: generic + endpoint: minio.example.com + certSecretRef: + name: minio-tls +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-tls + namespace: example +type: kubernetes.io/tls # or Opaque +stringData: + tls.crt: + tls.key: + ca.crt: +``` + +### Proxy secret reference + +`.spec.proxySecretRef.name` is an optional field used to specify the name of a +Secret that contains the proxy settings for the object. These settings are used +for all the remote operations related to the Bucket. +The Secret can contain three keys: + +- `address`, to specify the address of the proxy server. This is a required key. +- `username`, to specify the username to use if the proxy server is protected by + basic authentication. This is an optional key. +- `password`, to specify the password to use if the proxy server is protected by + basic authentication. This is an optional key. + +Example: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: http-proxy +type: Opaque +stringData: + address: http://proxy.com + username: mandalorian + password: grogu +``` + +Proxying can also be configured in the source-controller Deployment directly by +using the standard environment variables such as `HTTPS_PROXY`, `ALL_PROXY`, etc. + +`.spec.proxySecretRef.name` takes precedence over all environment variables. + +### Insecure + +`.spec.insecure` is an optional field to allow connecting to an insecure (HTTP) +[endpoint](#endpoint), if set to `true`. The default value is `false`, +denying insecure (HTTP) connections. + +### Timeout + +`.spec.timeout` is an optional field to specify a timeout for object storage +fetch operations. The value must be in a +[Go recognized duration string format](https://pkg.go.dev/time#ParseDuration), +e.g. `1m30s` for a timeout of one minute and thirty seconds. +The default value is `60s`. + +### Secret reference + +`.spec.secretRef.name` is an optional field to specify a name reference to a +Secret in the same namespace as the Bucket, containing authentication +credentials for the object storage. For some `.spec.provider` implementations +the presence of the field is required, see [Provider](#provider) for more +details and examples. + +### Prefix + +`.spec.prefix` is an optional field to enable server-side filtering +of files in the Bucket. + +**Note:** The server-side filtering works only with the `generic`, `aws` +and `gcp` [provider](#provider) and is preferred over [`.spec.ignore`](#ignore) +as a more efficient way of excluding files. + +### Ignore + +`.spec.ignore` is an optional field to specify rules in [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format). Storage +objects which keys match the defined rules are excluded while fetching. + +When specified, `.spec.ignore` overrides the [default exclusion +list](#default-exclusions), and may overrule the [`.sourceignore` file +exclusions](#sourceignore-file). See [excluding files](#excluding-files) +for more information. + +### Suspend + +`.spec.suspend` is an optional field to suspend the reconciliation of a Bucket. +When set to `true`, the controller will stop reconciling the Bucket, and changes +to the resource or in the object storage bucket will not result in a new +Artifact. When the field is set to `false` or removed, it will resume. + +For practical information, see +[suspending and resuming](#suspending-and-resuming). + +## Working with Buckets + +### Excluding files + +By default, storage bucket objects which match the [default exclusion +rules](#default-exclusions) are excluded while fetching. It is possible to +overwrite and/or overrule the default exclusions using a file in the bucket +and/or an in-spec set of rules. + +#### `.sourceignore` file + +Excluding files is possible by adding a `.sourceignore` file in the root of the +object storage bucket. The `.sourceignore` file follows [the `.gitignore` +pattern format](https://git-scm.com/docs/gitignore#_pattern_format), and +pattern entries may overrule [default exclusions](#default-exclusions). + +#### Ignore spec + +Another option is to define the exclusions within the Bucket spec, using the +[`.spec.ignore` field](#ignore). Specified rules override the +[default exclusion list](#default-exclusions), and may overrule `.sourceignore` +file exclusions. + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + ignore: | + # exclude all + /* + # include deploy dir + !/deploy + # exclude file extensions from deploy dir + /deploy/**/*.md + /deploy/**/*.txt +``` + +### Triggering a reconcile + +To manually tell the source-controller to reconcile a Bucket outside the +[specified interval window](#interval), a Bucket can be annotated with +`reconcile.fluxcd.io/requestedAt: `. Annotating the resource +queues the Bucket for reconciliation if the `` differs from +the last value the controller acted on, as reported in +[`.status.lastHandledReconcileAt`](#last-handled-reconcile-at). + +Using `kubectl`: + +```sh +kubectl annotate --field-manager=flux-client-side-apply --overwrite bucket/ reconcile.fluxcd.io/requestedAt="$(date +%s)" +``` + +Using `flux`: + +```sh +flux reconcile source bucket +``` + +### Waiting for `Ready` + +When a change is applied, it is possible to wait for the Bucket to reach a +[ready state](#ready-bucket) using `kubectl`: + +```sh +kubectl wait bucket/ --for=condition=ready --timeout=1m +``` + +### Suspending and resuming + +When you find yourself in a situation where you temporarily want to pause the +reconciliation of a Bucket, you can suspend it using the [`.spec.suspend` +field](#suspend). + +#### Suspend a Bucket + +In your YAML declaration: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + suspend: true +``` + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\": {\"suspend\" : true }}' +``` + +Using `flux`: + +```sh +flux suspend source bucket +``` + +**Note:** When a Bucket has an Artifact and is suspended, and this Artifact +later disappears from the storage due to e.g. the source-controller Pod being +evicted from a Node, this will not be reflected in the Bucket's Status until it +is resumed. + +#### Resume a Bucket + +In your YAML declaration, comment out (or remove) the field: + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +spec: + # suspend: true +``` + +**Note:** Setting the field value to `false` has the same effect as removing +it, but does not allow for "hot patching" using e.g. `kubectl` while practicing +GitOps; as the manually applied patch would be overwritten by the declared +state in Git. + +Using `kubectl`: + +```sh +kubectl patch bucket --field-manager=flux-client-side-apply -p '{\"spec\" : {\"suspend\" : false }}' +``` + +Using `flux`: + +```sh +flux resume source bucket +``` + +### Debugging a Bucket + +There are several ways to gather information about a Bucket for debugging +purposes. + +#### Describe the Bucket + +Describing a Bucket using `kubectl describe bucket ` displays the +latest recorded information for the resource in the `Status` and `Events` +sections: + +```console +... +Status: +... + Conditions: + Last Transition Time: 2024-02-02T13:26:55Z + Message: processing object: new generation 1 -> 2 + Observed Generation: 2 + Reason: ProgressingWithRetry + Status: True + Type: Reconciling + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: False + Type: Ready + Last Transition Time: 2024-02-02T13:26:55Z + Message: bucket 'my-new-bucket' does not exist + Observed Generation: 2 + Reason: BucketOperationFailed + Status: True + Type: FetchFailed + Observed Generation: 1 + URL: http://source-controller.source-system.svc.cluster.local./bucket/default/minio-bucket/latest.tar.gz +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Warning BucketOperationFailed 37s (x11 over 42s) source-controller bucket 'my-new-bucket' does not exist +``` + +#### Trace emitted Events + +To view events for specific Bucket(s), `kubectl events` can be used in +combination with `--for` to list the Events for specific objects. For example, +running + +```sh +kubectl events --for Bucket/ +``` + +lists + +```console +LAST SEEN TYPE REASON OBJECT MESSAGE +2m30s Normal NewArtifact bucket/ fetched 16 files with revision from 'my-new-bucket' +36s Normal ArtifactUpToDate bucket/ artifact up-to-date with remote revision: 'sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' +18s Warning BucketOperationFailed bucket/ bucket 'my-new-bucket' does not exist +``` + +Besides being reported in Events, the reconciliation errors are also logged by +the controller. The Flux CLI offer commands for filtering the logs for a +specific Bucket, e.g. `flux logs --level=error --kind=Bucket --name=`. + +## Bucket Status + +### Artifact + +The Bucket reports the latest synchronized state from the object storage +bucket as an Artifact object in the `.status.artifact` of the resource. + +The Artifact file is a gzip compressed TAR archive +(`.tar.gz`), and can be retrieved in-cluster from the +`.status.artifact.url` HTTP address. + +#### Artifact example + +```yaml +--- +apiVersion: source.toolkit.fluxcd.io/v1 +kind: Bucket +metadata: + name: +status: + artifact: + digest: sha256:cbec34947cc2f36dee8adcdd12ee62ca6a8a36699fc6e56f6220385ad5bd421a + lastUpdateTime: "2024-01-28T10:30:30Z" + path: bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz + revision: sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2 + size: 38099 + url: http://source-controller..svc.cluster.local./bucket///c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2.tar.gz +``` + +#### Default exclusions + +The following files and extensions are excluded from the Artifact by +default: + +- Git files (`.git/, .gitignore, .gitmodules, .gitattributes`) +- File extensions (`.jpg, .jpeg, .gif, .png, .wmv, .flv, .tar.gz, .zip`) +- CI configs (`.github/, .circleci/, .travis.yml, .gitlab-ci.yml, appveyor.yml, .drone.yml, cloudbuild.yaml, codeship-services.yml, codeship-steps.yml`) +- CLI configs (`.goreleaser.yml, .sops.yaml`) +- Flux v1 config (`.flux.yaml`) + +To define your own exclusion rules, see [excluding files](#excluding-files). + +### Conditions + +A Bucket enters various states during its lifecycle, reflected as +[Kubernetes Conditions][typical-status-properties]. +It can be [reconciling](#reconciling-bucket) while fetching storage objects, +it can be [ready](#ready-bucket), or it can [fail during +reconciliation](#failed-bucket). + +The Bucket API is compatible with the [kstatus specification][kstatus-spec], +and reports `Reconciling` and `Stalled` conditions where applicable to +provide better (timeout) support to solutions polling the Bucket to become +`Ready`. + +#### Reconciling Bucket + +The source-controller marks a Bucket as _reconciling_ when one of the following +is true: + +- There is no current Artifact for the Bucket, or the reported Artifact is + determined to have disappeared from the storage. +- The generation of the Bucket is newer than the [Observed Generation](#observed-generation). +- The newly calculated Artifact revision differs from the current Artifact. + +When the Bucket is "reconciling", the `Ready` Condition status becomes +`Unknown` when the controller detects drift, and the controller adds a Condition +with the following attributes to the Bucket's `.status.conditions`: + +- `type: Reconciling` +- `status: "True"` +- `reason: Progressing` | `reason: ProgressingWithRetry` + +If the reconciling state is due to a new revision, an additional Condition is +added with the following attributes: + +- `type: ArtifactOutdated` +- `status: "True"` +- `reason: NewRevision` + +Both Conditions have a ["negative polarity"][typical-status-properties], +and are only present on the Bucket while their status value is `"True"`. + +#### Ready Bucket + +The source-controller marks a Bucket as _ready_ when it has the following +characteristics: + +- The Bucket reports an [Artifact](#artifact). +- The reported Artifact exists in the controller's Artifact storage. +- The Bucket was able to communicate with the Bucket's object storage endpoint + using the current spec. +- The revision of the reported Artifact is up-to-date with the latest + calculated revision of the object storage bucket. + +When the Bucket is "ready", the controller sets a Condition with the following +attributes in the Bucket's `.status.conditions`: + +- `type: Ready` +- `status: "True"` +- `reason: Succeeded` + +This `Ready` Condition will retain a status value of `"True"` until the Bucket +is marked as [reconciling](#reconciling-bucket), or e.g. a +[transient error](#failed-bucket) occurs due to a temporary network issue. + +When the Bucket Artifact is archived in the controller's Artifact +storage, the controller sets a Condition with the following attributes in the +Bucket's `.status.conditions`: + +- `type: ArtifactInStorage` +- `status: "True"` +- `reason: Succeeded` + +This `ArtifactInStorage` Condition will retain a status value of `"True"` until +the Artifact in the storage no longer exists. + +#### Failed Bucket + +The source-controller may get stuck trying to produce an Artifact for a Bucket +without completing. This can occur due to some of the following factors: + +- The object storage [Endpoint](#endpoint) is temporarily unavailable. +- The specified object storage bucket does not exist. +- The [Secret reference](#secret-reference) contains a reference to a + non-existing Secret. +- The credentials in the referenced Secret are invalid. +- The Bucket spec contains a generic misconfiguration. +- A storage related failure when storing the artifact. + +When this happens, the controller sets the `Ready` Condition status to `False`, +and adds a Condition with the following attributes to the Bucket's +`.status.conditions`: + +- `type: FetchFailed` | `type: StorageOperationFailed` +- `status: "True"` +- `reason: AuthenticationFailed` | `reason: BucketOperationFailed` + +This condition has a ["negative polarity"][typical-status-properties], +and is only present on the Bucket while the status value is `"True"`. +There may be more arbitrary values for the `reason` field to provide accurate +reason for a condition. + +While the Bucket has this Condition, the controller will continue to attempt +to produce an Artifact for the resource with an exponential backoff, until +it succeeds and the Bucket is marked as [ready](#ready-bucket). + +Note that a Bucket can be [reconciling](#reconciling-bucket) while failing at +the same time, for example due to a newly introduced configuration issue in the +Bucket spec. When a reconciliation fails, the `Reconciling` Condition reason +would be `ProgressingWithRetry`. When the reconciliation is performed again +after the failure, the reason is updated to `Progressing`. + +### Observed Ignore + +The source-controller reports an observed ignore in the Bucket's +`.status.observedIgnore`. The observed ignore is the latest `.spec.ignore` value +which resulted in a [ready state](#ready-bucket), or stalled due to error +it can not recover from without human intervention. The value is the same as the +[ignore in spec](#ignore). It indicates the ignore rules used in building the +current artifact in storage. + +Example: +```yaml +status: + ... + observedIgnore: | + hpa.yaml + build + ... +``` + +### Observed Generation + +The source-controller reports an +[observed generation][typical-status-properties] +in the Bucket's `.status.observedGeneration`. The observed generation is the +latest `.metadata.generation` which resulted in either a [ready state](#ready-bucket), +or stalled due to error it can not recover from without human +intervention. + +### Last Handled Reconcile At + +The source-controller reports the last `reconcile.fluxcd.io/requestedAt` +annotation value it acted on in the `.status.lastHandledReconcileAt` field. + +For practical information about this field, see [triggering a +reconcile](#triggering-a-reconcile). + +[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties +[kstatus-spec]: https://github.com/kubernetes-sigs/cli-utils/tree/master/pkg/kstatus From 297b5f19414f0d8ed8de7ad63ec31829a58e183c Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Fri, 23 Aug 2024 14:34:27 +0300 Subject: [PATCH 4/6] Update samples to Bucket v1 Signed-off-by: Stefan Prodan --- .../{source_v1beta2_bucket.yaml => source_v1_bucket.yaml} | 2 +- config/testdata/bucket/source.yaml | 2 +- config/testdata/helmchart-from-bucket/source.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) rename config/samples/{source_v1beta2_bucket.yaml => source_v1_bucket.yaml} (81%) diff --git a/config/samples/source_v1beta2_bucket.yaml b/config/samples/source_v1_bucket.yaml similarity index 81% rename from config/samples/source_v1beta2_bucket.yaml rename to config/samples/source_v1_bucket.yaml index cbc211aa6..f09cbe213 100644 --- a/config/samples/source_v1beta2_bucket.yaml +++ b/config/samples/source_v1_bucket.yaml @@ -1,4 +1,4 @@ -apiVersion: source.toolkit.fluxcd.io/v1beta2 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: bucket-sample diff --git a/config/testdata/bucket/source.yaml b/config/testdata/bucket/source.yaml index 459e7400a..bd3097ee2 100644 --- a/config/testdata/bucket/source.yaml +++ b/config/testdata/bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: podinfo diff --git a/config/testdata/helmchart-from-bucket/source.yaml b/config/testdata/helmchart-from-bucket/source.yaml index 0609cf541..814305d13 100644 --- a/config/testdata/helmchart-from-bucket/source.yaml +++ b/config/testdata/helmchart-from-bucket/source.yaml @@ -1,5 +1,5 @@ --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: Bucket metadata: name: charts @@ -13,7 +13,7 @@ spec: secretRef: name: minio-credentials --- -apiVersion: source.toolkit.fluxcd.io/v1beta1 +apiVersion: source.toolkit.fluxcd.io/v1 kind: HelmChart metadata: name: helmchart-bucket From 2fa8c58d9f810f39ab0cfafdb08ee8991063072b Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Sat, 24 Aug 2024 11:43:54 +0300 Subject: [PATCH 5/6] Remove unused `accessFrom` field from Bucket v1 Signed-off-by: Stefan Prodan --- api/v1/bucket_types.go | 7 ---- api/v1/zz_generated.deepcopy.go | 5 --- api/v1beta2/bucket_types.go | 23 +++++++++++-- .../source.toolkit.fluxcd.io_buckets.yaml | 28 ---------------- docs/api/v1/source.md | 32 ------------------- 5 files changed, 20 insertions(+), 75 deletions(-) diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go index 939519eed..2c733a6cc 100644 --- a/api/v1/bucket_types.go +++ b/api/v1/bucket_types.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/meta" ) @@ -141,12 +140,6 @@ type BucketSpec struct { // Bucket. // +optional Suspend bool `json:"suspend,omitempty"` - - // AccessFrom specifies an Access Control List for allowing cross-namespace - // references to this object. - // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 - // +optional - AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` } // BucketSTSSpec specifies the required configuration to use a Security Token diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index 6326ea211..12e537fae 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -173,11 +173,6 @@ func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { *out = new(string) **out = **in } - if in.AccessFrom != nil { - in, out := &in.AccessFrom, &out.AccessFrom - *out = new(acl.AccessFrom) - (*in).DeepCopyInto(*out) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec. diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index 612c6db06..a7641c750 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -33,21 +33,38 @@ const ( ) const ( + // BucketProviderGeneric for any S3 API compatible storage Bucket. + BucketProviderGeneric string = "generic" + // BucketProviderAmazon for an AWS S3 object storage Bucket. + // Provides support for retrieving credentials from the AWS EC2 service. + BucketProviderAmazon string = "aws" + // BucketProviderGoogle for a Google Cloud Storage Bucket. + // Provides support for authentication using a workload identity. + BucketProviderGoogle string = "gcp" + // BucketProviderAzure for an Azure Blob Storage Bucket. + // Provides support for authentication using a Service Principal, + // Managed Identity or Shared Key. + BucketProviderAzure string = "azure" + // GenericBucketProvider for any S3 API compatible storage Bucket. - // Deprecated: use v1.BucketProviderGeneric. + // + // Deprecated: use BucketProviderGeneric. GenericBucketProvider string = apiv1.BucketProviderGeneric // AmazonBucketProvider for an AWS S3 object storage Bucket. // Provides support for retrieving credentials from the AWS EC2 service. + // // Deprecated: use v1.BucketProviderAmazon. AmazonBucketProvider string = apiv1.BucketProviderAmazon // GoogleBucketProvider for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. - // Deprecated: use v1.BucketProviderGoogle. + // + // Deprecated: use BucketProviderGoogle. GoogleBucketProvider string = apiv1.BucketProviderGoogle // AzureBucketProvider for an Azure Blob Storage Bucket. // Provides support for authentication using a Service Principal, // Managed Identity or Shared Key. - // Deprecated: use v1.BucketProviderAzure. + // + // Deprecated: use BucketProviderAzure. AzureBucketProvider string = apiv1.BucketProviderAzure ) diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml index 969aaaa02..3d8f812cc 100644 --- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml +++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml @@ -54,34 +54,6 @@ spec: BucketSpec specifies the required configuration to produce an Artifact for an object storage bucket. properties: - accessFrom: - description: |- - AccessFrom specifies an Access Control List for allowing cross-namespace - references to this object. - NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 - properties: - namespaceSelectors: - description: |- - NamespaceSelectors is the list of namespace selectors to which this ACL applies. - Items in this list are evaluated using a logical OR operation. - items: - description: |- - NamespaceSelector selects the namespaces to which this ACL applies. - An empty map of MatchLabels matches all namespaces in a cluster. - properties: - matchLabels: - additionalProperties: - type: string - description: |- - MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. - type: object - type: object - type: array - required: - - namespaceSelectors - type: object bucketName: description: BucketName is the name of the object storage bucket. type: string diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md index 2fcce0d63..1424cdecc 100644 --- a/docs/api/v1/source.md +++ b/docs/api/v1/source.md @@ -275,22 +275,6 @@ bool Bucket.

- - -accessFrom
- - -github.com/fluxcd/pkg/apis/acl.AccessFrom - - - - -(Optional) -

AccessFrom specifies an Access Control List for allowing cross-namespace -references to this object. -NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

- - @@ -1421,22 +1405,6 @@ bool Bucket.

- - -accessFrom
- - -github.com/fluxcd/pkg/apis/acl.AccessFrom - - - - -(Optional) -

AccessFrom specifies an Access Control List for allowing cross-namespace -references to this object. -NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092

- - From 36a4889ea2b4573760b19d2eadeb3dcea386571b Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Mon, 26 Aug 2024 18:56:22 +0300 Subject: [PATCH 6/6] Alias Bucket providers from v1beta2 to v1 Signed-off-by: Stefan Prodan --- api/v1beta2/bucket_types.go | 10 +++--- internal/controller/bucket_controller.go | 39 ++++++++++++------------ main.go | 2 +- 3 files changed, 25 insertions(+), 26 deletions(-) diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go index a7641c750..d18fc76f7 100644 --- a/api/v1beta2/bucket_types.go +++ b/api/v1beta2/bucket_types.go @@ -34,17 +34,17 @@ const ( const ( // BucketProviderGeneric for any S3 API compatible storage Bucket. - BucketProviderGeneric string = "generic" + BucketProviderGeneric string = apiv1.BucketProviderGeneric // BucketProviderAmazon for an AWS S3 object storage Bucket. // Provides support for retrieving credentials from the AWS EC2 service. - BucketProviderAmazon string = "aws" + BucketProviderAmazon string = apiv1.BucketProviderAmazon // BucketProviderGoogle for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. - BucketProviderGoogle string = "gcp" + BucketProviderGoogle string = apiv1.BucketProviderGoogle // BucketProviderAzure for an Azure Blob Storage Bucket. // Provides support for authentication using a Service Principal, // Managed Identity or Shared Key. - BucketProviderAzure string = "azure" + BucketProviderAzure string = apiv1.BucketProviderAzure // GenericBucketProvider for any S3 API compatible storage Bucket. // @@ -53,7 +53,7 @@ const ( // AmazonBucketProvider for an AWS S3 object storage Bucket. // Provides support for retrieving credentials from the AWS EC2 service. // - // Deprecated: use v1.BucketProviderAmazon. + // Deprecated: use BucketProviderAmazon. AmazonBucketProvider string = apiv1.BucketProviderAmazon // GoogleBucketProvider for a Google Cloud Storage Bucket. // Provides support for authentication using a workload identity. diff --git a/internal/controller/bucket_controller.go b/internal/controller/bucket_controller.go index 9a347c70d..0675b4aae 100644 --- a/internal/controller/bucket_controller.go +++ b/internal/controller/bucket_controller.go @@ -52,7 +52,6 @@ import ( rreconcile "github.com/fluxcd/pkg/runtime/reconcile" "github.com/fluxcd/pkg/sourceignore" - bucketv1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1" intdigest "github.com/fluxcd/source-controller/internal/digest" serror "github.com/fluxcd/source-controller/internal/error" @@ -159,7 +158,7 @@ type BucketProvider interface { // bucketReconcileFunc is the function type for all the v1beta2.Bucket // (sub)reconcile functions. The type implementations are grouped and // executed serially to perform the complete reconcile of the object. -type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) +type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) @@ -169,7 +168,7 @@ func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts Buc r.patchOptions = getPatchOptions(bucketReadyCondition.Owned, r.ControllerName) return ctrl.NewControllerManagedBy(mgr). - For(&bucketv1.Bucket{}). + For(&sourcev1.Bucket{}). WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). WithOptions(controller.Options{ RateLimiter: opts.RateLimiter, @@ -182,7 +181,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res log := ctrl.LoggerFrom(ctx) // Fetch the Bucket - obj := &bucketv1.Bucket{} + obj := &sourcev1.Bucket{} if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } @@ -255,7 +254,7 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res // reconcile iterates through the bucketReconcileFunc tasks for the // object. It returns early on the first call that returns // reconcile.ResultRequeue, or produces an error. -func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { oldObj := obj.DeepCopy() rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") @@ -326,7 +325,7 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche } // notify emits notification related to the reconciliation. -func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *bucketv1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { +func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { // Notify successful reconciliation for new artifact and recovery from any // failure. if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { @@ -364,7 +363,7 @@ func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *bucketv1. // condition is added. // The hostname of any URL in the Status of the object are updated, to ensure // they match the Storage server hostname of current runtime. -func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { // Garbage collect previous advertised artifact(s) from storage _ = r.garbageCollect(ctx, obj) @@ -423,7 +422,7 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria // When a SecretRef is defined, it attempts to fetch the Secret before calling // the provider. If this fails, it records v1beta2.FetchFailedCondition=True on // the object and returns early. -func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { secret, err := r.getSecret(ctx, obj.Spec.SecretRef, obj.GetNamespace()) if err != nil { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) @@ -441,7 +440,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial // Construct provider client var provider BucketProvider switch obj.Spec.Provider { - case bucketv1.BucketProviderGoogle: + case sourcev1.BucketProviderGoogle: if err = gcp.ValidateSecret(secret); err != nil { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) @@ -459,7 +458,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } - case bucketv1.BucketProviderAzure: + case sourcev1.BucketProviderAzure: if err = azure.ValidateSecret(secret); err != nil { e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) @@ -545,7 +544,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial // Fetch etag index if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { - e := serror.NewGeneric(err, bucketv1.BucketOperationFailedReason) + e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -577,7 +576,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial }() if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { - e := serror.NewGeneric(err, bucketv1.BucketOperationFailedReason) + e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) return sreconcile.ResultEmpty, e } @@ -596,7 +595,7 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial // early. // On a successful archive, the Artifact in the Status of the object is set, // and the symlink in the Storage is updated to its path. -func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *bucketv1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { // Calculate revision revision := index.Digest(intdigest.Canonical) @@ -689,7 +688,7 @@ func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.Seri // reconcileDelete handles the deletion of the object. // It first garbage collects all Artifacts for the object from the Storage. // Removing the finalizer from the object if successful. -func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bucket) (sreconcile.Result, error) { +func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bucket) (sreconcile.Result, error) { // Garbage collect the resource's artifacts if err := r.garbageCollect(ctx, obj); err != nil { // Return the error so we retry the failed garbage collection @@ -708,7 +707,7 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *bucketv1.Bu // It removes all but the current Artifact from the Storage, unless the // deletion timestamp on the object is set. Which will result in the // removal of all Artifacts for the objects. -func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *bucketv1.Bucket) error { +func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { if !obj.DeletionTimestamp.IsZero() { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { return serror.NewGeneric( @@ -776,7 +775,7 @@ func (r *BucketReconciler) getTLSConfig(ctx context.Context, // getProxyURL attempts to fetch a proxy URL from the object's proxy secret // reference. -func (r *BucketReconciler) getProxyURL(ctx context.Context, obj *bucketv1.Bucket) (*url.URL, error) { +func (r *BucketReconciler) getProxyURL(ctx context.Context, obj *sourcev1.Bucket) (*url.URL, error) { namespace := obj.GetNamespace() proxySecret, err := r.getSecret(ctx, obj.Spec.ProxySecretRef, namespace) if err != nil || proxySecret == nil { @@ -802,7 +801,7 @@ func (r *BucketReconciler) getProxyURL(ctx context.Context, obj *bucketv1.Bucket // getSTSSecret attempts to fetch the secret from the object's STS secret // reference. -func (r *BucketReconciler) getSTSSecret(ctx context.Context, obj *bucketv1.Bucket) (*corev1.Secret, error) { +func (r *BucketReconciler) getSTSSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) { if obj.Spec.STS == nil { return nil, nil } @@ -811,7 +810,7 @@ func (r *BucketReconciler) getSTSSecret(ctx context.Context, obj *bucketv1.Bucke // getSTSTLSConfig attempts to fetch the certificate secret from the object's // STS configuration. -func (r *BucketReconciler) getSTSTLSConfig(ctx context.Context, obj *bucketv1.Bucket) (*stdtls.Config, error) { +func (r *BucketReconciler) getSTSTLSConfig(ctx context.Context, obj *sourcev1.Bucket) (*stdtls.Config, error) { if obj.Spec.STS == nil { return nil, nil } @@ -848,7 +847,7 @@ func (r *BucketReconciler) annotatedEventLogf(ctx context.Context, // bucket using the given provider, while filtering them using .sourceignore // rules. After fetching an object, the etag value in the index is updated to // the current value to ensure accuracy. -func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *bucketv1.Bucket, index *index.Digester, tempDir string) error { +func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() @@ -902,7 +901,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *bucketv1. // using the given provider, and stores them into tempDir. It downloads in // parallel, but limited to the maxConcurrentBucketFetches. // Given an index is provided, the bucket is assumed to exist. -func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *bucketv1.Bucket, index *index.Digester, tempDir string) error { +func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) defer cancel() diff --git a/main.go b/main.go index 72ba918c9..42e2f81de 100644 --- a/main.go +++ b/main.go @@ -351,7 +351,7 @@ func mustSetupManager(metricsAddr, healthAddr string, maxConcurrent int, &v1.GitRepository{}: {Label: watchSelector}, &v1.HelmRepository{}: {Label: watchSelector}, &v1.HelmChart{}: {Label: watchSelector}, - &v1beta2.Bucket{}: {Label: watchSelector}, + &v1.Bucket{}: {Label: watchSelector}, &v1beta2.OCIRepository{}: {Label: watchSelector}, }, },