diff --git a/api/v1/agentaction_types.go b/api/v1/agentaction_types.go index ff196319..b760141e 100644 --- a/api/v1/agentaction_types.go +++ b/api/v1/agentaction_types.go @@ -78,6 +78,17 @@ func (a *AgentAction) GetRetryLabelValue() string { return getRetryLabelValue(a.Annotations) } +// CreatedByAgentConfig checks if an AgentAction is running on behalf of an agent config. +func (a *AgentAction) CreatedByAgentConfig() bool { + for _, ref := range a.GetOwnerReferences() { + if ref.Kind == KindAgentConfig { + return true + } + } + + return false +} + // SetRetryAnnotation flags the resource to retry its last operation. func (a *AgentAction) SetRetryAnnotation(retry string) { if a.Annotations == nil { diff --git a/api/v1/agentconfig_types.go b/api/v1/agentconfig_types.go index ebf40dc4..e258b8f6 100644 --- a/api/v1/agentconfig_types.go +++ b/api/v1/agentconfig_types.go @@ -1,7 +1,13 @@ package v1 import ( + "crypto/md5" + "encoding/hex" "fmt" + "net/url" + "regexp" + "sort" + "strings" "github.com/mitchellh/mapstructure" "github.com/opencontainers/go-digest" @@ -10,6 +16,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + // AnnotationAgentCfgPluginHash is the label used to store plugin hashes from a AgentConfig definition. + AnnotationAgentCfgPluginsHash = "agent-config-plugins-hash" + + // KindAgentConfig represents AgentConfig kind value. + KindAgentConfig = "AgentConfig" +) + +// DefaultPlugins is the set of default plugins that will be used by the operator. +var DefaultPlugins = map[string]Plugin{ + "kubernetes": {}, +} + // AgentConfigSpec defines the configuration for the Porter agent. // // SERIALIZATION NOTE: @@ -19,14 +38,17 @@ import ( type AgentConfigSpec struct { // PorterRepository is the repository for the Porter Agent image. // Defaults to ghcr.io/getporter/porter-agent + // +optional PorterRepository string `json:"porterRepository,omitempty" mapstructure:"porterRepository,omitempty"` // PorterVersion is the tag for the Porter Agent image. // Defaults to a well-known version of the agent that has been tested with the operator. // Users SHOULD override this to use more recent versions. + // +optional PorterVersion string `json:"porterVersion,omitempty" mapstructure:"porterVersion,omitempty"` // ServiceAccount is the service account to run the Porter Agent under. + // +optional ServiceAccount string `json:"serviceAccount,omitempty" mapstructure:"serviceAccount,omitempty"` // VolumeSize is the size of the persistent volume that Porter will @@ -34,28 +56,194 @@ type AgentConfigSpec struct { // between the Porter Agent and the bundle invocation image. It must // be large enough to store any files used by the bundle including credentials, // parameters and outputs. + // +optional VolumeSize string `json:"volumeSize,omitempty" mapstructure:"volumeSize,omitempty"` // PullPolicy specifies when to pull the Porter Agent image. The default // is to use PullAlways when the tag is canary or latest, and PullIfNotPresent // otherwise. + // +optional PullPolicy v1.PullPolicy `json:"pullPolicy,omitempty" mapstructure:"pullPolicy,omitempty"` // InstallationServiceAccount specifies a service account to run the Kubernetes pod/job for the installation image. // The default is to run without a service account. // This can be useful for a bundle which is targeting the kubernetes cluster that the operator is installed in. + // +optional InstallationServiceAccount string `json:"installationServiceAccount,omitempty" mapstructure:"installationServiceAccount,omitempty"` + // +optional + Plugins map[string]Plugin `json:"plugins,omitempty" mapstructure:"plugins,omitempty"` +} + +// MergeConfig from another AgentConfigSpec. The values from the override are applied +// only when they are not empty. +func (c AgentConfigSpec) MergeConfig(overrides ...AgentConfigSpec) (AgentConfigSpec, error) { + final := c + var targetRaw map[string]interface{} + if err := mapstructure.Decode(c, &targetRaw); err != nil { + return AgentConfigSpec{}, err + } + + for _, override := range overrides { + var overrideRaw map[string]interface{} + if err := mapstructure.Decode(override, &overrideRaw); err != nil { + return AgentConfigSpec{}, err + } + + targetRaw = MergeMap(targetRaw, overrideRaw) + } + + if err := mapstructure.Decode(targetRaw, &final); err != nil { + return AgentConfigSpec{}, err + } + + return final, nil +} + +// AgentConfigStatus defines the observed state of AgentConfig +type AgentConfigStatus struct { + PorterResourceStatus `json:",inline"` + // The current status of whether the AgentConfig is ready to be used for an AgentAction. + // +kubebuilder:default:=false + // +kubebuilder:validation:Type=boolean + Ready bool `json:"ready"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// AgentConfig is the Schema for the agentconfigs API +type AgentConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AgentConfigSpec `json:"spec,omitempty"` + Status AgentConfigStatus `json:"status,omitempty"` +} + +func (ac *AgentConfig) GetStatus() PorterResourceStatus { + return ac.Status.PorterResourceStatus +} + +func (ac *AgentConfig) SetStatus(value PorterResourceStatus) { + ac.Status.PorterResourceStatus = value +} + +// MergeReadyConfigs applies override AgentConfig that's ready to be used for an AgentAction in sequential order. +func (ac AgentConfig) MergeReadyConfigs(overrides ...AgentConfig) (AgentConfig, error) { + specs := []AgentConfigSpec{ac.Spec} + cfg := ac + for _, override := range overrides { + specs = append(specs, override.Spec) + // only consider the agent config if it exist + if override.Name != "" { + cfg = override + } + } + base := AgentConfigSpec{} + cfgSpec, err := base.MergeConfig(specs...) + if err != nil { + return AgentConfig{}, err + } + cfg.Spec = cfgSpec + + if !cfg.Status.Ready { + return AgentConfig{}, err + } + + return cfg, nil + +} + +// AgentConfigAdapter is a wrapper of AgentConfig schema. It process the input data so that +// the controller can easily work with the input. +type AgentConfigAdapter struct { + AgentConfig + Spec AgentConfigSpecAdapter +} + +// NewAgentConfigAdapter creates a new instance of the adapter from a AgentConfig. +func NewAgentConfigAdapter(agentCfg AgentConfig) *AgentConfigAdapter { + return &AgentConfigAdapter{ + AgentConfig: agentCfg, + Spec: NewAgentConfigSpecAdapter(agentCfg.Spec), + } +} + +// GetRetryLabelValue returns a value that is safe to use +// as a label value and represents the retry annotation used +// to trigger reconciliation. +func (ac *AgentConfigAdapter) GetRetryLabelValue() string { + return getRetryLabelValue(ac.Annotations) +} + +// SetRetryAnnotation flags the resource to retry its last operation. +func (ac *AgentConfigAdapter) SetRetryAnnotation(retry string) { + if ac.Annotations == nil { + ac.Annotations = make(map[string]string, 1) + } + ac.Annotations[AnnotationRetry] = retry +} + +// GetPluginsPVCName returns a string that's the hash using plugins spec and the AgentConfig's namespace. +func (ac *AgentConfigAdapter) GetPluginsPVCName() string { + return ac.Spec.Plugins.GetPVCName(ac.Namespace) +} + +// GetPluginsPVCNameAnnotation returns a string that's the hash using plugins spec and the AgentConfig's namespace. +func (ac *AgentConfigAdapter) GetPluginsPVCNameAnnotation() map[string]string { + return ac.Spec.Plugins.GetPVCNameAnnotation(ac.Namespace) +} + +// +kubebuilder:object:root=true + +// AgentConfigList contains a list of AgentConfig values. +type AgentConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AgentConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AgentConfig{}, &AgentConfigList{}) +} + +// Plugin represents the plugin configuration. +type Plugin struct { + FeedURL string `json:"feedURL,omitempty" mapstructure:"feedURL,omitempty"` + URL string `json:"url,omitempty" mapstructure:"url,omitempty"` + Mirror string `json:"mirror,omitempty" mapstructure:"mirror,omitempty"` + Version string `json:"version,omitempty" mapstructure:"version,omitempty"` +} + +// AgentConfigSpecAdapter is a wrapper of AgentConfigSpec with a list representation of plugins configuration. +type AgentConfigSpecAdapter struct { + original AgentConfigSpec + + Plugins PluginsConfigList +} + +// NewAgentConfigSpecAdapter creates a new instance of the AgentConfigSpecAdapter from a AgentConfigSpec. +func NewAgentConfigSpecAdapter(spec AgentConfigSpec) AgentConfigSpecAdapter { + return AgentConfigSpecAdapter{ + original: spec, + Plugins: NewPluginsList(spec.Plugins), + } +} + +// GetPluginsPVCName returns a name used for this agent config plugin persistent volume claim. +func (c AgentConfigSpecAdapter) GetPluginsPVCName(namespace string) string { + return c.Plugins.GetPVCName(namespace) } // GetPorterImage returns the fully qualified image name of the Porter Agent // image. Defaults the repository and tag when not set. -func (c AgentConfigSpec) GetPorterImage() string { - version := c.PorterVersion +func (c AgentConfigSpecAdapter) GetPorterImage() string { + version := c.original.PorterVersion if version == "" { // We don't use a mutable tag like latest, or canary because it's a bad practice that we don't want to encourage. version = DefaultPorterAgentVersion } - repo := c.PorterRepository + repo := c.original.PorterRepository if repo == "" { repo = DefaultPorterAgentRepository } @@ -70,12 +258,12 @@ func (c AgentConfigSpec) GetPorterImage() string { // GetPullPolicy returns the PullPolicy that should be used for the Porter Agent // (not the bundle). Defaults to PullAlways for latest and canary, // PullIfNotPresent otherwise. -func (c AgentConfigSpec) GetPullPolicy() v1.PullPolicy { - if c.PullPolicy != "" { - return c.PullPolicy +func (c AgentConfigSpecAdapter) GetPullPolicy() v1.PullPolicy { + if c.original.PullPolicy != "" { + return c.original.PullPolicy } - if c.PorterVersion == "latest" || c.PorterVersion == "canary" || c.PorterVersion == "dev" { + if c.original.PorterVersion == "latest" || c.original.PorterVersion == "canary" || c.original.PorterVersion == "dev" { return v1.PullAlways } return v1.PullIfNotPresent @@ -83,59 +271,170 @@ func (c AgentConfigSpec) GetPullPolicy() v1.PullPolicy { // GetVolumeSize returns the size of the shared volume to mount between the // Porter Agent and the bundle's invocation image. Defaults to 64Mi. -func (c AgentConfigSpec) GetVolumeSize() resource.Quantity { - q, err := resource.ParseQuantity(c.VolumeSize) +func (c AgentConfigSpecAdapter) GetVolumeSize() resource.Quantity { + q, err := resource.ParseQuantity(c.original.VolumeSize) if err != nil || q.IsZero() { return resource.MustParse("64Mi") } return q } -// MergeConfig from another AgentConfigSpec. The values from the override are applied -// only when they are not empty. -func (c AgentConfigSpec) MergeConfig(overrides ...AgentConfigSpec) (AgentConfigSpec, error) { - final := c - var targetRaw map[string]interface{} - if err := mapstructure.Decode(c, &targetRaw); err != nil { - return AgentConfigSpec{}, err +// GetPorterRepository returns the config value of Porter repository. +func (c AgentConfigSpecAdapter) GetPorterRepository() string { + return c.original.PorterRepository +} + +// GetPorterVersion returns the config value of Porter version. +func (c AgentConfigSpecAdapter) GetPorterVersion() string { + return c.original.PorterVersion +} + +// GetServiceAccount returns the config value of service account. +func (c AgentConfigSpecAdapter) GetServiceAccount() string { + return c.original.ServiceAccount +} + +// GetInstallationServiceAccount returns the config value of installation service account. +func (c AgentConfigSpecAdapter) GetInstallationServiceAccount() string { + return c.original.InstallationServiceAccount +} + +// PluginConfigList is the list implementation of the Plugins map. +// The list is sorted based on the plugin names alphabetically. +type PluginsConfigList struct { + data map[string]Plugin + keys []string +} + +// NewPluginsList creates a new instance of PluginsConfigList. +func NewPluginsList(ps map[string]Plugin) PluginsConfigList { + keys := make([]string, 0, len(ps)) + for key := range ps { + keys = append(keys, key) } - for _, override := range overrides { - var overrideRaw map[string]interface{} - if err := mapstructure.Decode(override, &overrideRaw); err != nil { - return AgentConfigSpec{}, err - } + sort.SliceStable(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) - targetRaw = MergeMap(targetRaw, overrideRaw) + return PluginsConfigList{ + data: ps, + keys: keys, } +} - if err := mapstructure.Decode(targetRaw, &final); err != nil { - return AgentConfigSpec{}, err +// IsZero checks whether the list is empty. +func (op PluginsConfigList) IsZero() bool { + return len(op.keys) == 0 +} + +// Add adds a new item into the list. +func (op *PluginsConfigList) Add(name string, p Plugin) { + + op.data[name] = p + op.keys = append(op.keys, name) + sort.SliceStable(op.keys, func(i, j int) bool { + return op.keys[i] < op.keys[j] + }) +} + +// GetNames returns an array of plugin names in the list sorted alphabetically. +func (op PluginsConfigList) GetNames() []string { + return op.keys +} + +// GetByName returns a plugin based on its name and true if the plugin is found. +// if a plugin is not found in the list, the function returns an empty plugin and false. +func (op PluginsConfigList) GetByName(name string) (Plugin, bool) { + p, ok := op.data[name] + return p, ok +} + +// GetPVCName returns a hash of the plugin configs. +// if no plugins are defined, it returns an empty string. +func (op PluginsConfigList) GetPVCName(namespace string) string { + if len(op.data) == 0 { + return "" } - return final, nil + input := op.label() + namespace + + return "porter-" + hashString(input) } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status +// GetLabels returns a hash of all plugin configs that is safe to use +// as a label value and represents the plugin configuration used +// to trigger reconciliation. +// labels are restricted to alphanumeric and .-_ value. +// the maximum characters a label can contain is 63. +// therefore all URLs will be sanitized before using them as part of +// the label. +func (op PluginsConfigList) GetLabels() map[string]string { + if len(op.data) == 0 { + return nil + } -// AgentConfig is the Schema for the agentconfigs API -type AgentConfig struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` + return map[string]string{ + LabelManaged: "true", + LabelPluginsHash: hashString(op.label()), + } +} + +func (op PluginsConfigList) label() string { + var plugins []string + var i int + for _, k := range op.keys { + p := op.data[k] + + format := "%s" + if i > 0 { + format = "_%s" + } + plugins = append(plugins, fmt.Sprintf(format, k)) - Spec AgentConfigSpec `json:"spec,omitempty"` + if p.FeedURL != "" { + plugins = append(plugins, fmt.Sprintf("_%s", cleanURL(p.FeedURL))) + } + if p.URL != "" { + plugins = append(plugins, fmt.Sprintf("_%s", cleanURL(p.URL))) + } + if p.Mirror != "" { + plugins = append(plugins, fmt.Sprintf("_%s", cleanURL(p.Mirror))) + } + if p.Version != "" { + plugins = append(plugins, fmt.Sprintf("_%s", p.Version)) + } + i++ + } + + return strings.Join(plugins, "") } -// +kubebuilder:object:root=true +// GetPVCNameAnnotation returns a string that's the hash using plugins spec and the AgentConfig's namespace. +func (op PluginsConfigList) GetPVCNameAnnotation(namespace string) map[string]string { + return map[string]string{AnnotationAgentCfgPluginsHash: op.GetPVCName(namespace)} +} -// AgentConfigList contains a list of AgentConfig values. -type AgentConfigList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []AgentConfig `json:"items"` +func cleanURL(inputURL string) string { + var cleanURL string + u, err := url.Parse(inputURL) + if err == nil { + // Remove the scheme (e.g. "http", "https") from the URL + cleanURL = strings.Replace(u.String(), u.Scheme+"://", "", -1) + } + + // Replace all non-alphanumeric, non-._- characters with an underscore + reg, err := regexp.Compile("[^a-zA-Z0-9._-]+") + if err != nil { + return "" + } + cleanURL = reg.ReplaceAllString(cleanURL, "_") + + return cleanURL } -func init() { - SchemeBuilder.Register(&AgentConfig{}, &AgentConfigList{}) +func hashString(input string) string { + hash := md5.Sum([]byte(input)) + + return hex.EncodeToString(hash[:]) } diff --git a/api/v1/agentconfig_types_test.go b/api/v1/agentconfig_types_test.go index 443e473d..0e09cc24 100644 --- a/api/v1/agentconfig_types_test.go +++ b/api/v1/agentconfig_types_test.go @@ -9,37 +9,42 @@ import ( "k8s.io/apimachinery/pkg/api/resource" ) -func TestAgentConfigSpec_GetPorterImage(t *testing.T) { +func TestAgentConfigSpecAdapter_GetPorterImage(t *testing.T) { t.Run("default", func(t *testing.T) { - c := AgentConfigSpec{} + c := AgentConfigSpecAdapter{} assert.Equal(t, DefaultPorterAgentRepository+":"+DefaultPorterAgentVersion, c.GetPorterImage()) }) t.Run("porter version set", func(t *testing.T) { c := AgentConfigSpec{PorterVersion: "canary"} - assert.Equal(t, DefaultPorterAgentRepository+":canary", c.GetPorterImage()) + cl := NewAgentConfigSpecAdapter(c) + + assert.Equal(t, DefaultPorterAgentRepository+":canary", cl.GetPorterImage()) }) t.Run("porter repository set", func(t *testing.T) { // Test if someone has mirrored porter's agent to another registry c := AgentConfigSpec{PorterRepository: "localhost:5000/myporter"} - assert.Equal(t, "localhost:5000/myporter:"+DefaultPorterAgentVersion, c.GetPorterImage()) + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, "localhost:5000/myporter:"+DefaultPorterAgentVersion, cl.GetPorterImage()) }) t.Run("porter repository and version set", func(t *testing.T) { c := AgentConfigSpec{PorterRepository: "localhost:5000/myporter", PorterVersion: "v1.2.3"} - assert.Equal(t, "localhost:5000/myporter:v1.2.3", c.GetPorterImage()) + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, "localhost:5000/myporter:v1.2.3", cl.GetPorterImage()) }) t.Run("digest set", func(t *testing.T) { c := AgentConfigSpec{ PorterVersion: "sha256:ea7d328dc6b65e4b62a971ba8436f89d5857c2878c211312aaa5e2db2e47a2da", } - assert.Equal(t, DefaultPorterAgentRepository+"@sha256:ea7d328dc6b65e4b62a971ba8436f89d5857c2878c211312aaa5e2db2e47a2da", c.GetPorterImage()) + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, DefaultPorterAgentRepository+"@sha256:ea7d328dc6b65e4b62a971ba8436f89d5857c2878c211312aaa5e2db2e47a2da", cl.GetPorterImage()) }) } -func TestAgentConfigSpec_GetPullPolicy(t *testing.T) { +func TestAgentConfigSpecAdapter_GetPullPolicy(t *testing.T) { testcases := map[string]v1.PullPolicy{ "": v1.PullIfNotPresent, "latest": v1.PullAlways, @@ -50,22 +55,106 @@ func TestAgentConfigSpec_GetPullPolicy(t *testing.T) { for version, wantPullPolicy := range testcases { t.Run("version "+version, func(t *testing.T) { c := AgentConfigSpec{PorterVersion: version} - assert.Equal(t, wantPullPolicy, c.GetPullPolicy()) + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, wantPullPolicy, cl.GetPullPolicy()) }) } } -func TestAgentConfigSpec_GetVolumeSize(t *testing.T) { +func TestAgentConfigSpecAdapter_GetVolumeSize(t *testing.T) { t.Run("default", func(t *testing.T) { c := AgentConfigSpec{} - assert.Equal(t, resource.MustParse("64Mi"), c.GetVolumeSize()) + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, resource.MustParse("64Mi"), cl.GetVolumeSize()) }) t.Run("quantity set", func(t *testing.T) { qty := resource.MustParse("128Mi") c := AgentConfigSpec{VolumeSize: "128Mi"} - assert.Equal(t, qty, c.GetVolumeSize()) + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, qty, cl.GetVolumeSize()) + }) +} + +func TestAgentConfigSpecAdapter_GetPVCName(t *testing.T) { + t.Run("no plugins defined", func(t *testing.T) { + c := AgentConfigSpec{} + cl := NewAgentConfigSpecAdapter(c) + assert.Empty(t, cl.GetPluginsPVCName("default")) + }) + + t.Run("one plugins defined", func(t *testing.T) { + c := AgentConfigSpec{ + Plugins: map[string]Plugin{ + "kubernetes": {Version: "v1.0.0", FeedURL: "https://test"}, + }, + } + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, "porter-04ddd41f06d1720a7467dadc464d8077", cl.GetPluginsPVCName("default")) + }) + + t.Run("multiple plugins defined", func(t *testing.T) { + c := AgentConfigSpec{ + Plugins: map[string]Plugin{ + "kubernetes": {Version: "v1.0.0", FeedURL: "https://test"}, + "azure": {Version: "v1.0.0", URL: "https://test"}, + "hashicorp": {Version: "v1.0.0", Mirror: "https://test"}, + }, + } + cl := NewAgentConfigSpecAdapter(c) + assert.Equal(t, "porter-a5bc533e0e249e10c7cf442be42d6ae2", cl.GetPluginsPVCName("default")) + + // change the order of the plugins should not affect the name output. + c2 := AgentConfigSpec{ + Plugins: map[string]Plugin{ + "azure": {Version: "v1.0.0", FeedURL: "https://test"}, + "hashicorp": {Version: "v1.0.0", URL: "https://test"}, + "kubernetes": {Version: "v1.0.0", Mirror: "https://test"}, + }, + } + cl2 := NewAgentConfigSpecAdapter(c2) + assert.Equal(t, "porter-a5bc533e0e249e10c7cf442be42d6ae2", cl2.GetPluginsPVCName("default")) + }) +} + +func TestAgentConfigSpecAdapter_GetPluginsLabels(t *testing.T) { + t.Run("no plugins defined", func(t *testing.T) { + c := AgentConfigSpec{} + cl := NewAgentConfigSpecAdapter(c) + assert.Nil(t, cl.Plugins.GetLabels()) + }) + + t.Run("one plugin defined", func(t *testing.T) { + onePluginCfg := AgentConfigSpec{ + Plugins: map[string]Plugin{ + "kubernetes": {Version: "v1.0.0", FeedURL: "https://test"}, + }, + } + cl := NewAgentConfigSpecAdapter(onePluginCfg) + assert.Equal(t, map[string]string{LabelManaged: "true", LabelPluginsHash: "b1c683cd14c4e4a242c43ccd2f57a696"}, cl.Plugins.GetLabels()) + }) + + t.Run("multiple plugins defined", func(t *testing.T) { + multiplePluginsCfg := AgentConfigSpec{ + Plugins: map[string]Plugin{ + "kubernetes": {Version: "v1.0.0", FeedURL: "https://test"}, + "azure": {Version: "v1.2.0", URL: "https://test1"}, + "hashicorp": {Version: "v1.0.0", FeedURL: "https://test"}, + }, + } + mcl := NewAgentConfigSpecAdapter(multiplePluginsCfg) + assert.Equal(t, map[string]string{LabelManaged: "true", LabelPluginsHash: "d8dbdcb6a9de4e60ef7886f90cbe73f4"}, mcl.Plugins.GetLabels()) + + multiplePluginsCfgWithDifferentOrder := AgentConfigSpec{ + Plugins: map[string]Plugin{ + "hashicorp": {Version: "v1.0.0", FeedURL: "https://test"}, + "azure": {Version: "v1.2.0", URL: "https://test1"}, + "kubernetes": {Version: "v1.0.0", FeedURL: "https://test"}, + }, + } + mclWithDifferentOrder := NewAgentConfigSpecAdapter(multiplePluginsCfgWithDifferentOrder) + assert.Equal(t, map[string]string{LabelManaged: "true", LabelPluginsHash: "d8dbdcb6a9de4e60ef7886f90cbe73f4"}, mclWithDifferentOrder.Plugins.GetLabels()) }) } @@ -93,6 +182,7 @@ func TestAgentConfigSpec_MergeConfig(t *testing.T) { VolumeSize: "1Mi", PullPolicy: v1.PullIfNotPresent, InstallationServiceAccount: "base", + Plugins: map[string]Plugin{"test-plugin": {FeedURL: "localhost:5000"}, "kubernetes": {}}, } instConfig := AgentConfigSpec{ @@ -102,6 +192,7 @@ func TestAgentConfigSpec_MergeConfig(t *testing.T) { VolumeSize: "2Mi", PullPolicy: v1.PullAlways, InstallationServiceAccount: "override", + Plugins: map[string]Plugin{"azure": {FeedURL: "localhost:6000"}}, } config, err := systemConfig.MergeConfig(nsConfig, instConfig) @@ -112,5 +203,6 @@ func TestAgentConfigSpec_MergeConfig(t *testing.T) { assert.Equal(t, "2Mi", config.VolumeSize) assert.Equal(t, v1.PullAlways, config.PullPolicy) assert.Equal(t, "override", config.InstallationServiceAccount) + assert.Equal(t, map[string]Plugin{"azure": {FeedURL: "localhost:6000"}}, config.Plugins) }) } diff --git a/api/v1/const.go b/api/v1/const.go index beff6747..1083d763 100644 --- a/api/v1/const.go +++ b/api/v1/const.go @@ -40,6 +40,8 @@ const ( // Operator. LabelManaged = Prefix + "managed" + LabelPluginsHash = Prefix + "plugins-hash" + // LabelResourceKind is a label applied to resources created by the Porter // Operator, representing the kind of owning resource. It is used to help the // operator determine if a resource has already been created. @@ -97,4 +99,11 @@ const ( // VolumeImagePullSecretPath is the mount path of the volume containing for docker // auth for image pull secrets. VolumeImgPullSecretPath = "/home/nonroot" + // VolumePorterSharedName is the name of the volume shared between the porter + // agent and the invocation image. + VolumePorterPluginsName = "porter-plugins" + + // VolumePorterConfigPath is the mount path of the volume containing Porter's + // config file. + VolumePorterPluginsPath = "/app/.porter/plugins" ) diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index ed47a141..7c69e89b 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -180,7 +180,8 @@ func (in *AgentConfig) DeepCopyInto(out *AgentConfig) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfig. @@ -201,6 +202,23 @@ func (in *AgentConfig) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentConfigAdapter) DeepCopyInto(out *AgentConfigAdapter) { + *out = *in + in.AgentConfig.DeepCopyInto(&out.AgentConfig) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfigAdapter. +func (in *AgentConfigAdapter) DeepCopy() *AgentConfigAdapter { + if in == nil { + return nil + } + out := new(AgentConfigAdapter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AgentConfigList) DeepCopyInto(out *AgentConfigList) { *out = *in @@ -236,6 +254,13 @@ func (in *AgentConfigList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AgentConfigSpec) DeepCopyInto(out *AgentConfigSpec) { *out = *in + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = make(map[string]Plugin, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfigSpec. @@ -248,6 +273,39 @@ func (in *AgentConfigSpec) DeepCopy() *AgentConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentConfigSpecAdapter) DeepCopyInto(out *AgentConfigSpecAdapter) { + *out = *in + in.original.DeepCopyInto(&out.original) + in.Plugins.DeepCopyInto(&out.Plugins) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfigSpecAdapter. +func (in *AgentConfigSpecAdapter) DeepCopy() *AgentConfigSpecAdapter { + if in == nil { + return nil + } + out := new(AgentConfigSpecAdapter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AgentConfigStatus) DeepCopyInto(out *AgentConfigStatus) { + *out = *in + in.PorterResourceStatus.DeepCopyInto(&out.PorterResourceStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AgentConfigStatus. +func (in *AgentConfigStatus) DeepCopy() *AgentConfigStatus { + if in == nil { + return nil + } + out := new(AgentConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Credential) DeepCopyInto(out *Credential) { *out = *in @@ -654,6 +712,21 @@ func (in *ParameterSource) DeepCopy() *ParameterSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugin) DeepCopyInto(out *Plugin) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. +func (in *Plugin) DeepCopy() *Plugin { + if in == nil { + return nil + } + out := new(Plugin) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { *out = *in @@ -670,6 +743,33 @@ func (in *PluginConfig) DeepCopy() *PluginConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginsConfigList) DeepCopyInto(out *PluginsConfigList) { + *out = *in + if in.data != nil { + in, out := &in.data, &out.data + *out = make(map[string]Plugin, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.keys != nil { + in, out := &in.keys, &out.keys + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginsConfigList. +func (in *PluginsConfigList) DeepCopy() *PluginsConfigList { + if in == nil { + return nil + } + out := new(PluginsConfigList) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PorterConfig) DeepCopyInto(out *PorterConfig) { *out = *in diff --git a/config/crd/bases/porter.sh_agentconfigs.yaml b/config/crd/bases/porter.sh_agentconfigs.yaml index 5b72d03f..35bac74c 100644 --- a/config/crd/bases/porter.sh_agentconfigs.yaml +++ b/config/crd/bases/porter.sh_agentconfigs.yaml @@ -44,6 +44,20 @@ spec: which is targeting the kubernetes cluster that the operator is installed in. type: string + plugins: + additionalProperties: + description: Plugin represents the plugin configuration. + properties: + feedURL: + type: string + mirror: + type: string + url: + type: string + version: + type: string + type: object + type: object porterRepository: description: PorterRepository is the repository for the Porter Agent image. Defaults to ghcr.io/getporter/porter-agent @@ -71,6 +85,104 @@ spec: credentials, parameters and outputs. type: string type: object + status: + description: AgentConfigStatus defines the observed state of AgentConfig + properties: + action: + description: The most recent action executed for the resource + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + conditions: + description: 'Conditions store a list of states that have been reached. + Each condition refers to the status of the ActiveJob Possible conditions + are: Scheduled, Started, Completed, and Failed' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + type FooStatus struct{ // Represents the observations of a foo's + current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + observedGeneration: + description: The last generation observed by the controller. + format: int64 + type: integer + phase: + description: 'The current status of the agent. Possible values are: + Unknown, Pending, Running, Succeeded, and Failed.' + type: string + ready: + default: false + description: The current status of whether the AgentConfig is ready + to be used for an AgentAction. + type: boolean + required: + - ready + type: object type: object served: true storage: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 934093f8..62c20438 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -18,6 +18,7 @@ patchesStrategicMerge: #- patches/webhook_in_credentialsets.yaml #- patches/webhook_in_agentactions.yaml #- patches/webhook_in_parametersets.yaml +#- patches/webhook_in_agentconfig.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. @@ -27,6 +28,7 @@ patchesStrategicMerge: #- patches/cainjection_in_credentialsets.yaml #- patches/cainjection_in_agentactions.yaml #- patches/cainjection_in_parametersets.yaml +#- patches/cainjection_in_agentconfig.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_agentconfig.yaml b/config/crd/patches/cainjection_in_agentconfig.yaml new file mode 100644 index 00000000..68669dad --- /dev/null +++ b/config/crd/patches/cainjection_in_agentconfig.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: agentconfig.porter.sh diff --git a/config/crd/patches/webhook_in_agentconfig.yaml b/config/crd/patches/webhook_in_agentconfig.yaml new file mode 100644 index 00000000..41d7f3d4 --- /dev/null +++ b/config/crd/patches/webhook_in_agentconfig.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: agentconfig.porter.sh +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/agentconfig_editor_role.yaml b/config/rbac/agentconfig_editor_role.yaml new file mode 100644 index 00000000..699601a0 --- /dev/null +++ b/config/rbac/agentconfig_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit agnetconfig. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agentconfigs-editor-role +rules: +- apiGroups: + - porter.sh + resources: + - agentconfigs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - porter.sh + resources: + - agentconfigs/status + verbs: + - get diff --git a/config/rbac/agentconfig_viewer_role.yaml b/config/rbac/agentconfig_viewer_role.yaml new file mode 100644 index 00000000..cfc74db7 --- /dev/null +++ b/config/rbac/agentconfig_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view credentialsets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: agentconfigs-viewer-role +rules: +- apiGroups: + - porter.sh + resources: + - agentconfigs + verbs: + - get + - list + - watch +- apiGroups: + - porter.sh + resources: + - agentconfigs/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index e214028f..1da4e325 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -17,6 +17,18 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -87,6 +99,20 @@ rules: - patch - update - watch +- apiGroups: + - porter.sh + resources: + - agentconfigs/finalizers + verbs: + - update +- apiGroups: + - porter.sh + resources: + - agentconfigs/status + verbs: + - get + - patch + - update - apiGroups: - porter.sh resources: diff --git a/config/samples/_v1_agentconfig.yaml b/config/samples/_v1_agentconfig.yaml index 277a5091..15afc87f 100644 --- a/config/samples/_v1_agentconfig.yaml +++ b/config/samples/_v1_agentconfig.yaml @@ -11,3 +11,5 @@ spec: volumeSize: 64Mi pullPolicy: Always installationServiceAccount: installation-agent + plugins: + - name: "kubernetes" \ No newline at end of file diff --git a/controllers/agentaction_controller.go b/controllers/agentaction_controller.go index 4739926f..0b835955 100644 --- a/controllers/agentaction_controller.go +++ b/controllers/agentaction_controller.go @@ -237,7 +237,7 @@ func (r *AgentActionReconciler) getSharedAgentLabels(action *porterv1.AgentActio return labels } -func (r *AgentActionReconciler) createAgentVolume(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec) (*corev1.PersistentVolumeClaim, error) { +func (r *AgentActionReconciler) createAgentVolume(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpecAdapter) (*corev1.PersistentVolumeClaim, error) { labels := r.getSharedAgentLabels(action) var results corev1.PersistentVolumeClaimList if err := r.List(ctx, &results, client.InNamespace(action.Namespace), client.MatchingLabels(labels)); err != nil { @@ -349,11 +349,11 @@ func (r *AgentActionReconciler) createWorkdirSecret(ctx context.Context, log log } // creates a secret for the porter configuration directory -func (r *AgentActionReconciler) getImagePullSecret(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec) (*corev1.Secret, error) { +func (r *AgentActionReconciler) getImagePullSecret(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpecAdapter) (*corev1.Secret, error) { installationSvcAccountName := "default" - if agentCfg.InstallationServiceAccount != "" { - installationSvcAccountName = agentCfg.InstallationServiceAccount + if agentCfg.GetInstallationServiceAccount() != "" { + installationSvcAccountName = agentCfg.GetInstallationServiceAccount() } log.V(Log4Debug).Info("checking service accounts for image pull secrets", "installation_service_account", installationSvcAccountName, "action_name", action.Name, "action_namespace", action.Namespace) @@ -390,14 +390,14 @@ func (r *AgentActionReconciler) getAgentJobLabels(action *porterv1.AgentAction) } func (r *AgentActionReconciler) createAgentJob(ctx context.Context, log logr.Logger, - action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec, + action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpecAdapter, pvc *corev1.PersistentVolumeClaim, configSecret *corev1.Secret, workdirSecret *corev1.Secret, imgPullSecret *corev1.Secret) (batchv1.Job, error) { // not checking for an existing job because that happens earlier during reconcile labels := r.getAgentJobLabels(action) env, envFrom := r.getAgentEnv(action, agentCfg, pvc) - volumes, volumeMounts := r.getAgentVolumes(action, pvc, configSecret, workdirSecret, imgPullSecret) + volumes, volumeMounts := r.getAgentVolumes(ctx, log, action, agentCfg, pvc, configSecret, workdirSecret, imgPullSecret) porterJob := batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ @@ -440,7 +440,7 @@ func (r *AgentActionReconciler) createAgentJob(ctx context.Context, log logr.Log }, Volumes: volumes, RestartPolicy: "Never", // TODO: Make the retry policy configurable on the Installation - ServiceAccountName: agentCfg.ServiceAccount, + ServiceAccountName: agentCfg.GetServiceAccount(), ImagePullSecrets: nil, // TODO: Make pulling from a private registry possible SecurityContext: &corev1.PodSecurityContext{ // Run as the well-known nonroot user that Porter uses for the invocation image and the agent @@ -463,7 +463,7 @@ func (r *AgentActionReconciler) createAgentJob(ctx context.Context, log logr.Log return porterJob, nil } -func (r *AgentActionReconciler) resolveAgentConfig(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (porterv1.AgentConfigSpec, error) { +func (r *AgentActionReconciler) resolveAgentConfig(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (porterv1.AgentConfigSpecAdapter, error) { log.V(Log5Trace).Info("Resolving porter agent configuration") logConfig := func(level string, config *porterv1.AgentConfig) { @@ -474,14 +474,15 @@ func (r *AgentActionReconciler) resolveAgentConfig(ctx context.Context, log logr log.V(Log4Debug).Info("Found porter agent configuration", "level", level, "namespace", config.Namespace, - "name", config.Name) + "name", config.Name, + "plugin", config.Spec.Plugins) } // Read agent configuration defined at the system level systemCfg := &porterv1.AgentConfig{} err := r.Get(ctx, types.NamespacedName{Name: "default", Namespace: operatorNamespace}, systemCfg) if err != nil && !apierrors.IsNotFound(err) { - return porterv1.AgentConfigSpec{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") + return porterv1.AgentConfigSpecAdapter{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") } logConfig("system", systemCfg) @@ -489,7 +490,7 @@ func (r *AgentActionReconciler) resolveAgentConfig(ctx context.Context, log logr nsCfg := &porterv1.AgentConfig{} err = r.Get(ctx, types.NamespacedName{Name: "default", Namespace: action.Namespace}, nsCfg) if err != nil && !apierrors.IsNotFound(err) { - return porterv1.AgentConfigSpec{}, errors.Wrap(err, "cannot retrieve namespace level porter agent configuration") + return porterv1.AgentConfigSpecAdapter{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") } logConfig("namespace", nsCfg) @@ -498,26 +499,35 @@ func (r *AgentActionReconciler) resolveAgentConfig(ctx context.Context, log logr if action.Spec.AgentConfig != nil { err = r.Get(ctx, types.NamespacedName{Name: action.Spec.AgentConfig.Name, Namespace: action.Namespace}, instCfg) if err != nil && !apierrors.IsNotFound(err) { - return porterv1.AgentConfigSpec{}, errors.Wrapf(err, "cannot retrieve agent configuration %s specified by the agent action", action.Spec.AgentConfig.Name) + return porterv1.AgentConfigSpecAdapter{}, errors.Wrap(err, "cannot retrieve system level porter agent configuration") } logConfig("instance", instCfg) } // Apply overrides - base := &systemCfg.Spec - cfg, err := base.MergeConfig(nsCfg.Spec, instCfg.Spec) + // the merging logic here is each subsequent config will override the previous config. + // for example, if namespace Spec.Plugins is {"azure": {}, "hashicorp": {}} and installation Spec.Plugins is {"kubernetes": {}} + // the result of the merge will be {"kubernetes": {}} + base := systemCfg + cfg, err := base.MergeReadyConfigs(*nsCfg, *instCfg) if err != nil { - return porterv1.AgentConfigSpec{}, err + return porterv1.AgentConfigSpecAdapter{}, err + } + + if !cfg.Status.Ready && !action.CreatedByAgentConfig() { + return porterv1.AgentConfigSpecAdapter{}, errors.New("resolved agent configuration is not ready to be used. Waiting for the next retry") } + cfgList := porterv1.NewAgentConfigSpecAdapter(cfg.Spec) log.V(Log4Debug).Info("resolved porter agent configuration", - "porterImage", cfg.GetPorterImage(), - "pullPolicy", cfg.GetPullPolicy(), - "serviceAccount", cfg.ServiceAccount, - "volumeSize", cfg.GetVolumeSize(), - "installationServiceAccount", cfg.InstallationServiceAccount, + "porterImage", cfgList.GetPorterImage(), + "pullPolicy", cfgList.GetPullPolicy(), + "serviceAccount", cfgList.GetServiceAccount(), + "volumeSize", cfgList.GetVolumeSize(), + "installationServiceAccount", cfgList.GetInstallationServiceAccount(), + "plugin", cfgList.Plugins.GetNames(), ) - return cfg, nil + return cfgList, nil } func (r *AgentActionReconciler) resolvePorterConfig(ctx context.Context, log logr.Logger, action *porterv1.AgentAction) (porterv1.PorterConfigSpec, error) { @@ -583,7 +593,7 @@ func (r *AgentActionReconciler) resolvePorterConfig(ctx context.Context, log log return cfg, nil } -func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpec, pvc *corev1.PersistentVolumeClaim) ([]corev1.EnvVar, []corev1.EnvFromSource) { +func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpecAdapter, pvc *corev1.PersistentVolumeClaim) ([]corev1.EnvVar, []corev1.EnvFromSource) { sharedLabels := r.getSharedAgentLabels(action) env := []corev1.EnvVar{ @@ -618,7 +628,7 @@ func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentC }, { Name: "SERVICE_ACCOUNT", - Value: agentCfg.InstallationServiceAccount, + Value: agentCfg.GetInstallationServiceAccount(), }, { Name: "AFFINITY_MATCH_LABELS", @@ -645,7 +655,7 @@ func (r *AgentActionReconciler) getAgentEnv(action *porterv1.AgentAction, agentC return env, envFrom } -func (r *AgentActionReconciler) getAgentVolumes(action *porterv1.AgentAction, pvc *corev1.PersistentVolumeClaim, configSecret *corev1.Secret, workdirSecret *corev1.Secret, imgPullSecret *corev1.Secret) ([]corev1.Volume, []corev1.VolumeMount) { +func (r *AgentActionReconciler) getAgentVolumes(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg porterv1.AgentConfigSpecAdapter, pvc *corev1.PersistentVolumeClaim, configSecret *corev1.Secret, workdirSecret *corev1.Secret, imgPullSecret *corev1.Secret) ([]corev1.Volume, []corev1.VolumeMount) { volumes := []corev1.Volume{ { Name: porterv1.VolumePorterSharedName, @@ -707,6 +717,24 @@ func (r *AgentActionReconciler) getAgentVolumes(action *porterv1.AgentAction, pv }, ) } + // Only add the plugin volume if the action is not created to configure porter itself + if !action.CreatedByAgentConfig() { + claimName := agentCfg.GetPluginsPVCName(action.Namespace) + log.V(Log4Debug).Info("mounting porter plugin volume", "claim name", claimName) + volumes = append(volumes, corev1.Volume{ + Name: porterv1.VolumePorterPluginsName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + }, + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: porterv1.VolumePorterPluginsName, + MountPath: porterv1.VolumePorterPluginsPath, + SubPath: "plugins", + }) + } volumes = append(volumes, action.Spec.Volumes...) diff --git a/controllers/agentaction_controller_test.go b/controllers/agentaction_controller_test.go index b9f79e44..2d24120d 100644 --- a/controllers/agentaction_controller_test.go +++ b/controllers/agentaction_controller_test.go @@ -165,7 +165,8 @@ func TestAgentActionReconciler_Reconcile(t *testing.T) { name := "mybuns-install" testdata := []client.Object{ &porterv1.AgentAction{ - ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Generation: 1}}, + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Generation: 1}, + }, &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "default"}, ImagePullSecrets: []corev1.LocalObjectReference{{ @@ -173,6 +174,12 @@ func TestAgentActionReconciler_Reconcile(t *testing.T) { }, }, }, + &porterv1.AgentConfig{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "default", Generation: 1}, + Status: porterv1.AgentConfigStatus{ + Ready: true, + }, + }, } controller := setupAgentActionController(testdata...) @@ -380,7 +387,8 @@ func TestAgentActionReconciler_createAgentVolume(t *testing.T) { err := controller.Client.Create(context.Background(), existingPvc) require.NoError(t, err) } - pvc, err := controller.createAgentVolume(context.Background(), logr.Discard(), action, agentCfg) + spec := porterv1.NewAgentConfigSpecAdapter(agentCfg) + pvc, err := controller.createAgentVolume(context.Background(), logr.Discard(), action, spec) require.NoError(t, err) // Verify the pvc properties @@ -696,8 +704,7 @@ func TestAgentActionReconciler_createAgentJob(t *testing.T) { assert.Equal(t, "test", podTemplate.Namespace, "incorrect pod namespace") assertSharedAgentLabels(t, podTemplate.Labels) assertContains(t, podTemplate.Labels, "testLabel", "abc123", "incorrect label") - assert.Len(t, podTemplate.Spec.Volumes, 3, "incorrect pod volumes") - assert.Len(t, podTemplate.Spec.Volumes, 3) + assert.Len(t, podTemplate.Spec.Volumes, 4, "incorrect pod volumes") assert.Equal(t, porterv1.VolumePorterSharedName, podTemplate.Spec.Volumes[0].Name, "expected the porter-shared volume") assert.Equal(t, porterv1.VolumePorterConfigName, podTemplate.Spec.Volumes[1].Name, "expected the porter-config volume") assert.Equal(t, porterv1.VolumePorterWorkDirName, podTemplate.Spec.Volumes[2].Name, "expected the porter-workdir volume") @@ -722,7 +729,7 @@ func TestAgentActionReconciler_createAgentJob(t *testing.T) { assertEnvVar(t, agentContainer.Env, "LABELS", "porter.sh/jobType=bundle-installer porter.sh/managed=true porter.sh/resourceGeneration=1 porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/retry= testLabel=abc123") assertEnvVar(t, agentContainer.Env, "AFFINITY_MATCH_LABELS", "porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/resourceGeneration=1 porter.sh/retry=") assertEnvFrom(t, agentContainer.EnvFrom, "porter-env", pointer.BoolPtr(true)) - assert.Len(t, agentContainer.VolumeMounts, 3) + assert.Len(t, agentContainer.VolumeMounts, 4) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterConfigName, porterv1.VolumePorterConfigPath) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterSharedName, porterv1.VolumePorterSharedPath) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterWorkDirName, porterv1.VolumePorterWorkDirPath) @@ -749,15 +756,18 @@ func testAgentAction() *porterv1.AgentAction { }, } } -func testAgentCfgSpec() porterv1.AgentConfigSpec { - return porterv1.AgentConfigSpec{ +func testAgentCfgSpec() porterv1.AgentConfigSpecAdapter { + spec := porterv1.AgentConfigSpec{ VolumeSize: "128Mi", PorterRepository: "getporter/custom-agent", PorterVersion: "v1.0.0", PullPolicy: "Always", ServiceAccount: "porteraccount", InstallationServiceAccount: "installeraccount", + Plugins: map[string]porterv1.Plugin{"kubernetes": {}}, } + + return porterv1.NewAgentConfigSpecAdapter(spec) } func TestAgentActionReconciler_createAgentJob_withImagePullSecrets(t *testing.T) { @@ -799,12 +809,13 @@ func TestAgentActionReconciler_createAgentJob_withImagePullSecrets(t *testing.T) assert.Equal(t, "test", podTemplate.Namespace, "incorrect pod namespace") assertSharedAgentLabels(t, podTemplate.Labels) assertContains(t, podTemplate.Labels, "testLabel", "abc123", "incorrect label") - assert.Len(t, podTemplate.Spec.Volumes, 4, "incorrect pod volumes") + assert.Len(t, podTemplate.Spec.Volumes, 5, "incorrect pod volumes") assert.Equal(t, porterv1.VolumePorterSharedName, podTemplate.Spec.Volumes[0].Name, "expected the porter-shared volume") assert.Equal(t, porterv1.VolumePorterConfigName, podTemplate.Spec.Volumes[1].Name, "expected the porter-config volume") assert.Equal(t, porterv1.VolumePorterWorkDirName, podTemplate.Spec.Volumes[2].Name, "expected the porter-workdir volume") assert.Equal(t, porterv1.VolumeImgPullSecretName, podTemplate.Spec.Volumes[3].Name, "expected the img-pull-secret volume") assert.Equal(t, testSA.ImagePullSecrets[0].Name, podTemplate.Spec.Volumes[3].Secret.SecretName, "expected the service account image pull secret name") + assert.Equal(t, porterv1.VolumePorterPluginsName, podTemplate.Spec.Volumes[4].Name, "expected the porter-workdir volume") assert.Equal(t, "porteraccount", podTemplate.Spec.ServiceAccountName, "incorrect service account for the pod") assert.Equal(t, pointer.Int64Ptr(65532), podTemplate.Spec.SecurityContext.RunAsUser, "incorrect RunAsUser") assert.Equal(t, pointer.Int64Ptr(0), podTemplate.Spec.SecurityContext.RunAsGroup, "incorrect RunAsGroup") @@ -826,12 +837,83 @@ func TestAgentActionReconciler_createAgentJob_withImagePullSecrets(t *testing.T) assertEnvVar(t, agentContainer.Env, "LABELS", "porter.sh/jobType=bundle-installer porter.sh/managed=true porter.sh/resourceGeneration=1 porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/retry= testLabel=abc123") assertEnvVar(t, agentContainer.Env, "AFFINITY_MATCH_LABELS", "porter.sh/resourceKind=AgentAction porter.sh/resourceName=porter-hello porter.sh/resourceGeneration=1 porter.sh/retry=") assertEnvFrom(t, agentContainer.EnvFrom, "porter-env", pointer.BoolPtr(true)) - assert.Len(t, agentContainer.VolumeMounts, 4) + assert.Len(t, agentContainer.VolumeMounts, 5) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterConfigName, porterv1.VolumePorterConfigPath) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterSharedName, porterv1.VolumePorterSharedPath) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterWorkDirName, porterv1.VolumePorterWorkDirPath) assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumeImgPullSecretName, porterv1.VolumeImgPullSecretPath) + assertVolumeMount(t, agentContainer.VolumeMounts, porterv1.VolumePorterPluginsName, porterv1.VolumePorterPluginsPath) + +} +func TestAgentActionReconciler_getAgentVolumes_agentconfigaction(t *testing.T) { + controller := setupAgentActionController() + action := testAgentAction() + agentCfg := testAgentCfgSpec() + pvc := &corev1.PersistentVolumeClaim{ObjectMeta: metav1.ObjectMeta{Name: "mypvc"}} + configSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-agent-config"}} + workDirSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "myagentconfig"}} + volumes, volumeMounts := controller.getAgentVolumes(context.Background(), logr.Discard(), action, agentCfg, pvc, configSecret, workDirSecret, nil) + + assert.Len(t, volumes, 4, "incorrect pod volumes") + assert.Equal(t, porterv1.VolumePorterSharedName, volumes[0].Name, "expected the porter-shared volume") + assert.Equal(t, porterv1.VolumePorterConfigName, volumes[1].Name, "expected the porter-config volume") + assert.Equal(t, porterv1.VolumePorterWorkDirName, volumes[2].Name, "expected the porter-workdir volume") + assert.Equal(t, porterv1.VolumePorterPluginsName, volumes[3].Name, "expected the porter-plugins volume") + + assert.Len(t, volumeMounts, 4) + assertVolumeMount(t, volumeMounts, porterv1.VolumePorterConfigName, porterv1.VolumePorterConfigPath) + assertVolumeMount(t, volumeMounts, porterv1.VolumePorterSharedName, porterv1.VolumePorterSharedPath) + assertVolumeMount(t, volumeMounts, porterv1.VolumePorterWorkDirName, porterv1.VolumePorterWorkDirPath) + assertVolumeMount(t, volumeMounts, porterv1.VolumePorterPluginsName, porterv1.VolumePorterPluginsPath) + + // if the action is created by AgentConfig CRD, the plugin volume should not be mounted + action.OwnerReferences = append(action.OwnerReferences, metav1.OwnerReference{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentConfig", + }) + volumesForAgentCfg, volumeMountsForAgentCfg := controller.getAgentVolumes(context.Background(), logr.Discard(), action, agentCfg, pvc, configSecret, workDirSecret, nil) + assert.Len(t, volumesForAgentCfg, 3, "incorrect pod volumes") + assert.Equal(t, porterv1.VolumePorterSharedName, volumesForAgentCfg[0].Name, "expected the porter-shared volume") + assert.Equal(t, porterv1.VolumePorterConfigName, volumesForAgentCfg[1].Name, "expected the porter-config volume") + assert.Equal(t, porterv1.VolumePorterWorkDirName, volumesForAgentCfg[2].Name, "expected the porter-workdir volume") + + assert.Len(t, volumeMountsForAgentCfg, 3) + assertVolumeMount(t, volumeMountsForAgentCfg, porterv1.VolumePorterConfigName, porterv1.VolumePorterConfigPath) + assertVolumeMount(t, volumeMountsForAgentCfg, porterv1.VolumePorterSharedName, porterv1.VolumePorterSharedPath) + assertVolumeMount(t, volumeMountsForAgentCfg, porterv1.VolumePorterWorkDirName, porterv1.VolumePorterWorkDirPath) +} + +func TestAgentActionReconciler_resolveAgentConfig(t *testing.T) { + systemCfg := porterv1.AgentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: operatorNamespace}, + Status: porterv1.AgentConfigStatus{ + Ready: true, + }, + Spec: porterv1.AgentConfigSpec{ + PorterVersion: "v1.0", + }, + } + actionWithOverride := testAgentAction() + overrideCfg := porterv1.AgentConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: actionWithOverride.Namespace}, + Status: porterv1.AgentConfigStatus{ + Ready: false, + }, + Spec: porterv1.AgentConfigSpec{ + PorterVersion: "v2", + }, + } + actionWithOverride.Spec.AgentConfig = &corev1.LocalObjectReference{Name: overrideCfg.Name} + actionWithNoOverride := testAgentAction() + actionWithNoOverride.Name = "no override" + controller := setupAgentActionController(&systemCfg, &overrideCfg, actionWithOverride, actionWithNoOverride) + + _, err := controller.resolveAgentConfig(context.Background(), logr.Discard(), actionWithOverride) + require.ErrorContains(t, err, "resolved agent configuration is not ready to be used") + + _, err = controller.resolveAgentConfig(context.Background(), logr.Discard(), actionWithNoOverride) + require.NoError(t, err) } func assertSharedAgentLabels(t *testing.T, labels map[string]string) { diff --git a/controllers/agentconfig_controller.go b/controllers/agentconfig_controller.go new file mode 100644 index 00000000..dba1e763 --- /dev/null +++ b/controllers/agentconfig_controller.go @@ -0,0 +1,757 @@ +package controllers + +import ( + "context" + "fmt" + "reflect" + + porterv1 "get.porter.sh/operator/api/v1" + "github.com/go-logr/logr" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// AgentConfigReconciler calls porter to execute changes made to an AgentConfig CRD +type AgentConfigReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=porter.sh,resources=agentconfigs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=porter.sh,resources=agentconfigs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=porter.sh,resources=agentconfigs/finalizers,verbs=update +//+kubebuilder:rbac:groups=porter.sh,resources=porterconfigs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=persistentvolumes,verbs=get;list;watch;create;update;patch;delete + +// SetupWithManager sets up the controller with the Manager. +func (r *AgentConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&porterv1.AgentConfig{}, builder.WithPredicates(resourceChanged{})). + Owns(&porterv1.AgentAction{}). + Owns(&corev1.PersistentVolumeClaim{}). + Complete(r) +} + +// Reconcile is called when the spec of an agent config is changed +// or a job associated with an agent config is updated. +// Either schedule a job to handle a spec change, or update the agent config status in response to the job's state. +func (r *AgentConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := r.Log.WithValues("agent config", req.Name, "namespace", req.Namespace) + + // Retrieve the agent config + agentCfgData := &porterv1.AgentConfig{} + err := r.Get(ctx, req.NamespacedName, agentCfgData) + if err != nil { + if apierrors.IsNotFound(err) { + log.V(Log5Trace).Info("Reconciliation skipped: AgentConfig CRD or one of its owned resources was deleted.") + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + agentCfg := porterv1.NewAgentConfigAdapter(*agentCfgData) + + log = log.WithValues("resourceVersion", agentCfg.ResourceVersion, "generation", agentCfg.Generation, "observedGeneration", agentCfg.Status.ObservedGeneration, "status", agentCfg.Status.Ready) + log.V(Log5Trace).Info("Reconciling agent config") + + // Check if we have requested an agent run yet + action, handled, err := r.isHandled(ctx, log, agentCfg) + if err != nil { + return ctrl.Result{}, err + } + + if action != nil { + log = log.WithValues("agentaction", action.Name) + } + + // Sync the agent config status from the action + if err = r.syncStatus(ctx, log, agentCfg, action); err != nil { + return ctrl.Result{}, err + } + + // Check if we have finished removing the agentCfg from the pvc owner reference + processed, err := r.isReadyToBeDeleted(ctx, log, agentCfg) + if err != nil { + return ctrl.Result{}, err + } + if processed { + err = removeAgentCfgFinalizer(ctx, log, r.Client, agentCfg) + log.V(Log4Debug).Info("Reconciliation complete: Finalizer has been removed from the AgentConfig.") + return ctrl.Result{}, err + } + + updatedStatus, err := r.syncPluginInstallStatus(ctx, log, agentCfg) + if err != nil { + return ctrl.Result{}, err + } + if updatedStatus { + return ctrl.Result{}, nil + } + + // Check if we have already handled any spec changes + if handled { + // Check if a retry was requested + if action.GetRetryLabelValue() != agentCfg.GetRetryLabelValue() { + err = r.retry(ctx, log, agentCfg, action) + log.V(Log4Debug).Info("Reconciliation complete: The associated porter agent action was retried.") + return ctrl.Result{}, err + } + + err := r.renamePluginVolume(ctx, log, action, agentCfg) + if err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + } + + if r.shouldDelete(agentCfg) { + log.V(Log4Debug).Info("Reconciliation complete: cleaning up pvc and pv created by this agent config", "agentCfg", agentCfg.Name) + err = r.cleanup(ctx, log, agentCfg) + return ctrl.Result{}, err + } else if isDeleted(agentCfg) { + log.V(Log4Debug).Info("Reconciliation complete: AgentConfig CRD is ready for deletion.") + return ctrl.Result{}, nil + + } + + updated, err := ensureFinalizerSet(ctx, log, r.Client, &agentCfg.AgentConfig) + if err != nil { + return ctrl.Result{}, err + } + if updated { + // if we added a finalizer, stop processing and we will finish when the updated resource is reconciled + log.V(Log4Debug).Info("Reconciliation complete: A finalizer has been set on the agent config.") + return ctrl.Result{}, nil + } + + pvc, created, err := r.createEmptyPluginVolume(ctx, log, agentCfg) + if err != nil { + return ctrl.Result{}, err + } + if created { + log.V(Log4Debug).Info("Created new temporary persistent volume claim.", "name", pvc.Name) + } + // Use porter to finish reconciling the agent config + err = r.applyAgentConfig(ctx, log, pvc, agentCfg) + if err != nil { + return ctrl.Result{}, err + } + + log.V(Log4Debug).Info("Reconciliation complete: A porter agent has been dispatched to apply changes to the agent config.") + return ctrl.Result{}, nil +} + +// Determines if this AgentConfig has been handled by Porter +func (r *AgentConfigReconciler) isHandled(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) (*porterv1.AgentAction, bool, error) { + labels := getActionLabels(agentCfg) + results := porterv1.AgentActionList{} + err := r.List(ctx, &results, client.InNamespace(agentCfg.Namespace), client.MatchingLabels(labels)) + if err != nil { + return nil, false, errors.Wrapf(err, "could not query for the current agent action") + } + + if len(results.Items) == 0 { + log.V(Log4Debug).Info("No existing agent action was found") + return nil, false, nil + } + action := results.Items[0] + log.V(Log4Debug).Info("Found existing agent action", "agentaction", action.Name) + return &action, true, nil +} + +// Run the porter agent with the command `porter plugins install ` +func (r *AgentConfigReconciler) applyAgentConfig(ctx context.Context, log logr.Logger, pvc *corev1.PersistentVolumeClaim, agentCfg *porterv1.AgentConfigAdapter) error { + log.V(Log5Trace).Info("Initializing agent config status") + agentCfg.Status.Initialize() + if err := r.saveStatus(ctx, log, agentCfg); err != nil { + return err + } + + return r.runPorterPluginInstall(ctx, log, pvc, agentCfg) +} + +// createEmptyPluginVolume returns a volume resources that will be used to install plugins on. +// it returns the a volume claim and whether it's newly created. +func (r *AgentConfigReconciler) createEmptyPluginVolume(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) (*corev1.PersistentVolumeClaim, bool, error) { + pvc, exists, err := r.getPersistentVolumeClaim(ctx, agentCfg.Namespace, agentCfg.GetPluginsPVCName()) + if err != nil { + return nil, false, err + } + + if exists { + return pvc, false, nil + } + + labels := agentCfg.Spec.Plugins.GetLabels() + pvc = &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: agentCfg.Name + "-", + Namespace: agentCfg.Namespace, + Labels: labels, + Annotations: agentCfg.GetPluginsPVCNameAnnotation(), + OwnerReferences: []metav1.OwnerReference{ + { // I'm not using controllerutil.SetControllerReference because I can't track down why that throws a panic when running our tests + APIVersion: agentCfg.APIVersion, + Kind: agentCfg.Kind, + Name: agentCfg.Name, + UID: agentCfg.UID, + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: agentCfg.Spec.GetVolumeSize(), + }, + }, + }, + } + + if err := r.Create(ctx, pvc); err != nil { + return nil, false, errors.Wrap(err, "error creating the agent volume (pvc)") + } + + log.V(Log4Debug).Info("Created PersistentVolumeClaim for the Porter agent", "name", pvc.Name) + return pvc, true, nil +} + +// runPorterPluginInstall creates an AgentAction that triggers a porter run to install plugins on the passed in volume. +func (r *AgentConfigReconciler) runPorterPluginInstall(ctx context.Context, log logr.Logger, pvc *corev1.PersistentVolumeClaim, agentCfg *porterv1.AgentConfigAdapter) error { + if agentCfg.Spec.Plugins.IsZero() { + return nil + } + + pluginNames := agentCfg.Spec.Plugins.GetNames() + if len(pluginNames) > 1 { + log.V(Log5Trace).Error(errors.New("unexpected number of plugins defined"), "Currently only the first plugins defined in a AgentConfig resource will be installed.") + } + + installCmd := []string{"plugins", "install"} + for _, name := range pluginNames { + installCmd = append(installCmd, name) + plugin, _ := agentCfg.Spec.Plugins.GetByName(name) + if plugin.Mirror != "" { + installCmd = append(installCmd, "--mirror", plugin.Mirror) + } + if plugin.URL != "" { + installCmd = append(installCmd, "--url", plugin.URL) + } + if plugin.FeedURL != "" { + installCmd = append(installCmd, "--feed-url", plugin.FeedURL) + } + if plugin.Version != "" { + installCmd = append(installCmd, "--version", plugin.Version) + } + // TODO: once porter has ability to install multiple plugins with one command, we will allow users + // to install multiple plugins. Currently, only the first item defined in the plugin list will be + // installed. + //lint:ignore SA4004 current implementation only support one plugin but we eventually will support multiple + break + } + action, err := r.createAgentAction(ctx, log, pvc, agentCfg, installCmd) + if err != nil { + return err + } + + // Update the agent config Status with the agent action + return r.syncStatus(ctx, log, agentCfg, action) +} + +// createAgentAction creates an AgentAction with the temporary volumes that's used for plugin installation. +func (r *AgentConfigReconciler) createAgentAction(ctx context.Context, log logr.Logger, pvc *corev1.PersistentVolumeClaim, agentCfg *porterv1.AgentConfigAdapter, args []string) (*porterv1.AgentAction, error) { + log.V(Log5Trace).Info("Creating porter agent action") + + labels := getActionLabels(agentCfg) + for k, v := range agentCfg.Labels { + labels[k] = v + } + + volumn, volumnMount := definePluginVomeAndMount(pvc) + agentCfgName := &corev1.LocalObjectReference{Name: agentCfg.Name} + + action := &porterv1.AgentAction{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: agentCfg.Namespace, + GenerateName: agentCfg.Name + "-", + Labels: labels, + Annotations: agentCfg.Annotations, + OwnerReferences: []metav1.OwnerReference{ + { // I'm not using controllerutil.SetControllerReference because I can't track down why that throws a panic when running our tests + APIVersion: agentCfg.APIVersion, + Kind: agentCfg.Kind, + Name: agentCfg.Name, + UID: agentCfg.UID, + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: porterv1.AgentActionSpec{ + AgentConfig: agentCfgName, + Args: args, + Volumes: []corev1.Volume{volumn}, + VolumeMounts: []corev1.VolumeMount{volumnMount}, + }, + } + + if err := r.Create(ctx, action); err != nil { + return nil, errors.Wrap(err, "error creating the porter agent action") + } + + log.V(Log4Debug).Info("Created porter agent action", "name", action.Name) + return action, nil +} + +// Check the status of the porter-agent job and use that to update the AgentAction status +func (r *AgentConfigReconciler) syncStatus(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter, action *porterv1.AgentAction) error { + + origStatus := agentCfg.Status + + applyAgentAction(log, agentCfg, action) + + // if the spec changed, we need to reset the readiness of the agent config + if origStatus.Ready && origStatus.ObservedGeneration != agentCfg.Generation || agentCfg.Status.Phase != porterv1.PhaseSucceeded { + agentCfg.Status.Ready = false + } + + if !reflect.DeepEqual(origStatus, agentCfg.Status) { + return r.saveStatus(ctx, log, agentCfg) + } + + return nil +} + +// Only update the status with a PATCH, don't clobber the entire agent config +func (r *AgentConfigReconciler) saveStatus(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) error { + log.V(Log5Trace).Info("Patching agent config status") + cfg := &agentCfg.AgentConfig + return PatchObjectWithRetry(ctx, log, r.Client, r.Client.Status().Patch, cfg, func() client.Object { + return &porterv1.AgentConfig{} + }) +} + +// Sync the retry annotation from the agent config to the agent action to trigger another run. +func (r *AgentConfigReconciler) retry(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter, action *porterv1.AgentAction) error { + log.V(Log5Trace).Info("Initializing agent config status") + agentCfg.Status.Initialize() + agentCfg.Status.Action = &corev1.LocalObjectReference{Name: action.Name} + agentCfg.Status.Ready = false + if err := r.saveStatus(ctx, log, agentCfg); err != nil { + return err + } + log.V(Log5Trace).Info("Retrying associated porter agent action") + retry := agentCfg.GetRetryLabelValue() + action.SetRetryAnnotation(retry) + if err := r.Update(ctx, action); err != nil { + return errors.Wrap(err, "error updating the associated porter agent action") + } + + log.V(Log4Debug).Info("Retried associated porter agent action", "name", "retry", action.Name, retry) + return nil +} + +func definePluginVomeAndMount(pvc *corev1.PersistentVolumeClaim) (corev1.Volume, corev1.VolumeMount) { + volume := corev1.Volume{ + Name: porterv1.VolumePorterPluginsName, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + } + + volumeMount := corev1.VolumeMount{ + Name: porterv1.VolumePorterPluginsName, + MountPath: porterv1.VolumePorterPluginsPath, + SubPath: "plugins", + } + + return volume, volumeMount +} + +// createHashPVC creates a new pvc using the hash of all plugins metadata. +// It uses the label selector to make sure we will select the volum that has plugins installed. +func (r *AgentConfigReconciler) createHashPVC( + ctx context.Context, + log logr.Logger, + agentCfg *porterv1.AgentConfigAdapter, +) (*corev1.PersistentVolumeClaim, error) { + log.V(Log4Debug).Info("Creating new pvc using the hash of all plugins metadata", "new persistentvolumeclaim", agentCfg.GetPluginsPVCName()) + labels := agentCfg.Spec.Plugins.GetLabels() + labels[porterv1.LabelResourceName] = agentCfg.Name + + selector := &metav1.LabelSelector{ + MatchLabels: labels, + } + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentCfg.GetPluginsPVCName(), + Namespace: agentCfg.Namespace, + Labels: labels, + OwnerReferences: []metav1.OwnerReference{ + { // I'm not using controllerutil.SetControllerReference because I can't track down why that throws a panic when running our tests + APIVersion: agentCfg.APIVersion, + Kind: agentCfg.Kind, + Name: agentCfg.Name, + UID: agentCfg.UID, + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany}, + Selector: selector, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: agentCfg.Spec.GetVolumeSize(), + }, + }, + }, + } + + if err := r.Create(ctx, pvc); err != nil { + return nil, errors.Wrap(err, "error creating the agent volume (pvc)") + } + + return pvc, nil + +} + +// removeAgentCfgFinalizer deletes the porter finalizer from the specified resource and saves it. +func removeAgentCfgFinalizer(ctx context.Context, log logr.Logger, client client.Client, agentCfg *porterv1.AgentConfigAdapter) error { + log.V(Log5Trace).Info("removing finalizer") + controllerutil.RemoveFinalizer(agentCfg, porterv1.FinalizerName) + return client.Update(ctx, &agentCfg.AgentConfig) +} + +func (r *AgentConfigReconciler) shouldDelete(agentCfg *porterv1.AgentConfigAdapter) bool { + // ignore a deleted CRD with no finalizers + return isDeleted(agentCfg) && isFinalizerSet(agentCfg) +} + +// cleanup remove the owner references on both pvc and pv so the when no resource is referencing them, GC can clean them up after the agentCfg has been deleted +func (r *AgentConfigReconciler) cleanup(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) error { + if agentCfg.Status.Ready { + agentCfg.Status.Ready = false + return r.saveStatus(ctx, log, agentCfg) + } + + pvcName := agentCfg.GetPluginsPVCName() + log.V(Log4Debug).Info("Start cleaning up persistent volume claim.", "persistentvolumeclaim", pvcName, "namespace", agentCfg.Namespace) + + key := client.ObjectKey{Namespace: agentCfg.Namespace, Name: pvcName} + pvc := &corev1.PersistentVolumeClaim{} + err := r.Get(ctx, key, pvc) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + pv := &corev1.PersistentVolume{} + pvKey := client.ObjectKey{Namespace: pvc.Namespace, Name: pvc.Spec.VolumeName} + err = r.Get(ctx, pvKey, pv) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + // remove owner reference from the persistent volume claim + if idx, exist := containsOwner(pvc.GetOwnerReferences(), agentCfg); exist { + pvc.OwnerReferences = removeOwnerAtIdx(pvc.GetOwnerReferences(), idx) + err := r.Update(ctx, pvc) + if err != nil { + return err + } + return nil + } + + return nil + +} + +func (r *AgentConfigReconciler) getPersistentVolumeClaim(ctx context.Context, namespace string, name string) (*corev1.PersistentVolumeClaim, bool, error) { + key := client.ObjectKey{Namespace: namespace, Name: name} + newPVC := &corev1.PersistentVolumeClaim{} + err := r.Get(ctx, key, newPVC) + if apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + + return newPVC, true, nil + +} + +func (r *AgentConfigReconciler) getPersistentVolume(ctx context.Context, namespace string, name string) (*corev1.PersistentVolume, error) { + key := client.ObjectKey{Namespace: namespace, Name: name} + pv := &corev1.PersistentVolume{} + err := r.Get(ctx, key, pv) + if err != nil { + return nil, err + } + + return pv, nil + +} + +// bindPVWithPluginPVC binds the persistent volume to the claim with a name created by the hash of all plugins defined on a AgentConfigSpec. +func (r *AgentConfigReconciler) bindPVWithPluginPVC(ctx context.Context, log logr.Logger, tempPVC *corev1.PersistentVolumeClaim, agentCfg *porterv1.AgentConfigAdapter) (bool, error) { + pv, err := r.getPersistentVolume(ctx, tempPVC.Namespace, tempPVC.Spec.VolumeName) + if err != nil { + return false, err + } + + if _, exist := pv.Labels[porterv1.LabelPluginsHash]; !exist { + labels := agentCfg.Spec.Plugins.GetLabels() + labels[porterv1.LabelResourceName] = agentCfg.Name + pv.Labels = labels + pv.Spec.AccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + pv.Spec.ClaimRef = &corev1.ObjectReference{ + Kind: tempPVC.Kind, + Namespace: tempPVC.Namespace, + Name: agentCfg.GetPluginsPVCName(), + APIVersion: tempPVC.APIVersion, + } + if err := r.Update(ctx, pv); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +// deleteTemporaryPVC deletes the persistent volume claim created by the agent action controller. +func (r *AgentConfigReconciler) deleteTemporaryPVC(ctx context.Context, log logr.Logger, tempPVC *corev1.PersistentVolumeClaim, agentCfg *porterv1.AgentConfigAdapter) error { + hasFinalizer := controllerutil.ContainsFinalizer(tempPVC, "kubernetes.io/pvc-protection") + if hasFinalizer { + log.V(Log4Debug).Info("Starting to remove finalizers from temporary pvc.", "persistentvolumeclaim", tempPVC.Name, "namespace", tempPVC.Namespace) + controllerutil.RemoveFinalizer(tempPVC, "kubernetes.io/pvc-protection") + if err := r.Client.Update(ctx, tempPVC); err != nil { + return err + } + log.V(Log4Debug).Info("Removed finalizers from temporary pvc.", "persistentvolumeclaim", tempPVC.Name, "namespace", tempPVC.Namespace) + + return nil + } + + log.V(Log4Debug).Info("Deleting temporary persistent volume claim.", "persistentvolumeclaim", tempPVC.Name, "namespace", tempPVC.Namespace) + if err := r.Delete(ctx, tempPVC); err != nil { + return err + } + log.V(Log4Debug).Info("Deleted temporary persistent volume claim.", "persistentvolumeclaim", tempPVC.Name, "namespace", tempPVC.Namespace) + return nil +} + +// isReadyToBeDeleted checks if an AgentConfig is ready to be deleted. +// It checks if any related persistent volume resources are released. +func (r *AgentConfigReconciler) isReadyToBeDeleted(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) (bool, error) { + if !isDeleted(agentCfg) { + return false, nil + } + + pvc, exists, err := r.getPersistentVolumeClaim(ctx, agentCfg.Namespace, agentCfg.GetPluginsPVCName()) + if err != nil { + return false, err + } + if exists { + ref := pvc.GetOwnerReferences() + if _, exist := containsOwner(ref, agentCfg); exist { + return false, nil + } + } + + return true, nil +} + +func (r *AgentConfigReconciler) getExistingPluginPVCs(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) (readyPVC *corev1.PersistentVolumeClaim, tempPVC *corev1.PersistentVolumeClaim, err error) { + results := &corev1.PersistentVolumeClaimList{} + err = r.List(ctx, results, client.InNamespace(agentCfg.Namespace), client.MatchingLabels(agentCfg.Spec.Plugins.GetLabels())) + if err != nil && !apierrors.IsNotFound(err) { + return nil, nil, err + } + + hashedName := agentCfg.Spec.Plugins.GetPVCName(agentCfg.Namespace) + for _, item := range results.Items { + item := item + if item.Name == hashedName { + readyPVC = &item + log.V(Log4Debug).Info("Plugin persistent volume claims found", "persistentvolumeclaim", readyPVC.Name, "namespace", readyPVC.Namespace, "status", readyPVC.Status.Phase) + continue + } + + if annotation := item.GetAnnotations(); annotation != nil { + hash, ok := annotation[porterv1.AnnotationAgentCfgPluginsHash] + if ok && hash == hashedName { + tempPVC = &item + log.V(Log4Debug).Info("Temporary plugin persistent volume claims found", "persistentvolumeclaim", tempPVC.Name, "namespace", tempPVC.Namespace, "status", tempPVC.Status.Phase) + } + } + + if readyPVC != nil && tempPVC != nil { + break + } + } + + return readyPVC, tempPVC, nil +} + +func checkPluginAndAgentReadiness(agentCfg *porterv1.AgentConfigAdapter, hashedPluginsPVC, tempPVC *corev1.PersistentVolumeClaim) (pvcReady bool, cfgReady bool) { + if hashedPluginsPVC != nil && tempPVC == nil && !isDeleted(agentCfg) { + cfgReady = agentCfg.Status.Phase == porterv1.PhaseSucceeded + pvcReady = hashedPluginsPVC.Status.Phase == corev1.ClaimBound + } + + return pvcReady, cfgReady +} + +func (r *AgentConfigReconciler) updateOwnerReference(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter, readyPVC *corev1.PersistentVolumeClaim) (bool, error) { + // update readyPVC to include this agentCfg in its ownerRference so when a delete happens, we know other agentCfg is still using this pvc + if _, exist := containsOwner(readyPVC.OwnerReferences, agentCfg); !exist { + err := controllerutil.SetOwnerReference(&agentCfg.AgentConfig, readyPVC, r.Scheme) + if err != nil { + return false, fmt.Errorf("failed to set owner reference: %w", err) + } + err = r.Update(ctx, readyPVC) + if err != nil { + return false, err + } + return true, nil + + } + + var shouldUpdateStatus bool + if agentCfg.Status.Phase == porterv1.PhasePending { + // update the agentCfg status to be ready + agentCfg.Status.Phase = porterv1.PhaseSucceeded + shouldUpdateStatus = true + } + if !agentCfg.Status.Ready { + agentCfg.Status.Ready = true + shouldUpdateStatus = true + } + if shouldUpdateStatus { + if err := r.saveStatus(ctx, log, agentCfg); err != nil { + return false, err + } + return true, nil + } + + return false, nil +} + +func containsOwner(owners []metav1.OwnerReference, agentCfg *porterv1.AgentConfigAdapter) (int, bool) { + for i, owner := range owners { + if owner.APIVersion == agentCfg.APIVersion && owner.Kind == agentCfg.Kind && owner.Name == agentCfg.Name { + return i, true + } + + } + return -1, false +} + +func removeOwnerAtIdx(s []metav1.OwnerReference, index int) []metav1.OwnerReference { + ret := make([]metav1.OwnerReference, 0) + ret = append(ret, s[:index]...) + return append(ret, s[index+1:]...) +} + +func (r *AgentConfigReconciler) syncPluginInstallStatus(ctx context.Context, log logr.Logger, agentCfg *porterv1.AgentConfigAdapter) (bool, error) { + if agentCfg.Spec.Plugins.IsZero() && !agentCfg.Status.Ready { + agentCfg.Status.Ready = true + err := r.saveStatus(ctx, log, agentCfg) + return true, err + } + + readyPVC, tempPVC, err := r.getExistingPluginPVCs(ctx, log, agentCfg) + if err != nil { + return false, err + } + // Check to see if there is a plugin volume already has all the defined plugins installed + pluginReady, agentCfgReady := checkPluginAndAgentReadiness(agentCfg, readyPVC, tempPVC) + log.V(Log4Debug).Info("Existing volume and agent Status", "volume status", pluginReady, "agent status", agentCfgReady) + if pluginReady && agentCfgReady { + + updated, err := r.updateOwnerReference(ctx, log, agentCfg, readyPVC) + if err != nil { + return false, err + } + + if updated { + return true, nil + } + } + + // if plugin is not ready, we just need to wait for it before we move forward + if agentCfgReady { + return true, nil + } + + return false, nil +} + +func (r *AgentConfigReconciler) renamePluginVolume(ctx context.Context, log logr.Logger, action *porterv1.AgentAction, agentCfg *porterv1.AgentConfigAdapter) error { + // if the plugin install action is not finished, we need to wait for it before acting further + if !(apimeta.IsStatusConditionTrue(action.Status.Conditions, string(porterv1.ConditionComplete)) && action.Status.Phase == porterv1.PhaseSucceeded) { + log.V(Log4Debug).Info("Plugins is not ready yet.", "action status", action.Status) + return nil + } + + log.V(Log4Debug).Info("Renaming temporary persistent volume claim.", "new persistentvolumeclaim", agentCfg.GetPluginsPVCName()) + + readyPVC, tempPVC, err := r.getExistingPluginPVCs(ctx, log, agentCfg) + if err != nil { + return err + } + // delete the temporary pvc first once the plugin install action has been completed + shouldRenameTmpPVC := tempPVC != nil && readyPVC == nil + if shouldRenameTmpPVC { + updated, err := r.bindPVWithPluginPVC(ctx, log, tempPVC, agentCfg) + if err != nil { + return err + } + if updated { + return nil + } + return r.deleteTemporaryPVC(ctx, log, tempPVC, agentCfg) + } + + if tempPVC == nil { + hashedPVCName := agentCfg.GetPluginsPVCName() + // create the pvc with the hash of plugins metadata + _, err = r.createHashPVC(ctx, log, agentCfg) + if err != nil { + return err + } + + log.V(Log4Debug).Info("Created the new PVC with plugins hash as its name.", "new persistentvolumeclaim", hashedPVCName) + } + + return nil +} diff --git a/controllers/agentconfig_controller_test.go b/controllers/agentconfig_controller_test.go new file mode 100644 index 00000000..6cd5d23a --- /dev/null +++ b/controllers/agentconfig_controller_test.go @@ -0,0 +1,392 @@ +package controllers + +import ( + "context" + "testing" + "time" + + porterv1 "get.porter.sh/operator/api/v1" + "github.com/go-logr/logr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/pointer" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestAgentConfigReconciler_Reconcile(t *testing.T) { + // long test is long + // Run through a full resource lifecycle: create, update, delete + ctx := context.Background() + + namespace := "test" + name := "mybuns" + testAgentCfg := &porterv1.AgentConfig{ + ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name, Generation: 1}, + Spec: porterv1.AgentConfigSpec{ + Plugins: map[string]porterv1.Plugin{"kubernetes": {}}, + }, + } + testdata := []client.Object{ + testAgentCfg, + } + controller := setupAgentConfigController(testdata...) + + var ( + agentCfg *porterv1.AgentConfigAdapter + agentCfgData porterv1.AgentConfig + ) + + triggerReconcile := func() { + fullname := types.NamespacedName{Namespace: namespace, Name: testAgentCfg.Name} + key := client.ObjectKey{Namespace: namespace, Name: testAgentCfg.Name} + + request := controllerruntime.Request{ + NamespacedName: fullname, + } + result, err := controller.Reconcile(ctx, request) + require.NoError(t, err) + require.True(t, result.IsZero()) + + err = controller.Get(ctx, key, &agentCfgData) + if !apierrors.IsNotFound(err) { + require.NoError(t, err) + } + agentCfg = porterv1.NewAgentConfigAdapter(agentCfgData) + } + + triggerReconcile() + + // Verify the agent config was picked up and the status initialized + assert.Equal(t, porterv1.PhaseUnknown, agentCfg.Status.Phase, "New resources should be initialized to Phase: Unknown") + + triggerReconcile() + _, ok := agentCfg.Spec.Plugins.GetByName("kubernetes") + require.True(t, ok) + + triggerReconcile() + + // Verify an AgentAction was created and set on the status + require.NotNil(t, agentCfg.Status.Action, "expected Action to be set") + var action porterv1.AgentAction + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: agentCfg.Status.Action.Name}, &action)) + assert.Equal(t, "1", action.Labels[porterv1.LabelResourceGeneration], "The wrong action is set on the status") + + // Mark the action as scheduled + action.Status.Phase = porterv1.PhasePending + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionScheduled), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) + + triggerReconcile() + + // Verify the agent config status was synced with the action + assert.Equal(t, porterv1.PhasePending, agentCfg.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(agentCfg.Status.Conditions, string(porterv1.ConditionScheduled))) + + // Mark the action as started + action.Status.Phase = porterv1.PhaseRunning + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionStarted), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) + + triggerReconcile() + + // Verify that the agent config status was synced with the action + assert.Equal(t, porterv1.PhaseRunning, agentCfg.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(agentCfg.Status.Conditions, string(porterv1.ConditionStarted))) + + // Complete the action + action.Status.Phase = porterv1.PhaseSucceeded + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionComplete), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) + require.False(t, agentCfg.Status.Ready) + + // once the agent action is completed, the PVC should have been bound to a PV created by kubernetes + pvc := &corev1.PersistentVolumeClaim{} + key := client.ObjectKey{Namespace: agentCfg.Namespace, Name: action.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName} + require.NoError(t, controller.Get(ctx, key, pvc)) + pv := &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-agent-config", + Namespace: agentCfg.Namespace, + OwnerReferences: pvc.OwnerReferences, + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Kind: pvc.Kind, + Namespace: pvc.Namespace, + Name: pvc.Name, + UID: pvc.UID, + APIVersion: pvc.APIVersion, + ResourceVersion: pvc.ResourceVersion, + }, + }, + } + require.NoError(t, controller.Create(ctx, pv)) + pvc.Spec.VolumeName = pv.Name + pvc.Status.Phase = corev1.ClaimBound + // the pvc controller should have updated the pvc with the pvc-protection finalizer + pvc.Finalizers = append(pvc.Finalizers, "kubernetes.io/pvc-protection") + require.NoError(t, controller.Update(ctx, pvc)) + + triggerReconcile() + + // Verify that the agent config status was synced with the action + var actionList porterv1.AgentAction + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: agentCfg.Status.Action.Name}, &actionList)) + assert.Equal(t, "1", actionList.Labels[porterv1.LabelResourceGeneration], "The wrong action is set on the status") + require.NotNil(t, agentCfg.Status.Action, "expected Action to still be set") + assert.Equal(t, porterv1.PhaseSucceeded, agentCfg.Status.Phase, "incorrect Phase") + require.False(t, agentCfg.Status.Ready) + + require.NotEmpty(t, actionList.Spec.Volumes) + + // verify that the pv that has plugins installed has been updated with the expected labels and claim reference + pluginsPV := &corev1.PersistentVolume{} + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: pv.Name}, pluginsPV)) + pluginLabels, exists := pluginsPV.Labels[porterv1.LabelPluginsHash] + require.True(t, exists) + require.Equal(t, agentCfg.Spec.Plugins.GetLabels()[porterv1.LabelPluginsHash], pluginLabels) + rn, exists := pluginsPV.Labels[porterv1.LabelResourceName] + require.True(t, exists) + require.Equal(t, agentCfg.Name, rn) + require.Equal(t, agentCfg.GetPluginsPVCName(), pluginsPV.Spec.ClaimRef.Name) + + triggerReconcile() + + // verify that the tmp pvc's finalizer is deleted + tmpPVC := &corev1.PersistentVolumeClaim{} + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: pvc.Name}, tmpPVC)) + require.Empty(t, tmpPVC.GetFinalizers()) + + triggerReconcile() + + tmpPVC = &corev1.PersistentVolumeClaim{} + require.True(t, apierrors.IsNotFound(controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: pvc.Name}, tmpPVC))) + + triggerReconcile() + require.False(t, agentCfg.Status.Ready) + + // the renamed pvc should be created with label selector set and correct access mode + renamedPVC := &corev1.PersistentVolumeClaim{} + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: agentCfg.GetPluginsPVCName()}, renamedPVC)) + readonlyMany := []corev1.PersistentVolumeAccessMode{corev1.ReadOnlyMany} + require.Equal(t, readonlyMany, renamedPVC.Spec.AccessModes) + matchLables := agentCfg.Spec.Plugins.GetLabels() + matchLables[porterv1.LabelResourceName] = agentCfg.Name + require.Equal(t, matchLables, renamedPVC.Spec.Selector.MatchLabels) + + // the renamed pvc should eventually be bounded the to pv + renamedPVC.Spec.VolumeName = pv.Name + renamedPVC.Status.Phase = corev1.ClaimBound + require.NoError(t, controller.Update(ctx, renamedPVC)) + + triggerReconcile() + require.True(t, agentCfg.Status.Ready) + + // Fail the action + action.Status.Phase = porterv1.PhaseFailed + action.Status.Conditions = []metav1.Condition{{Type: string(porterv1.ConditionFailed), Status: metav1.ConditionTrue}} + require.NoError(t, controller.Status().Update(ctx, &action)) + + triggerReconcile() + + // Verify that the agent config status shows the action is failed + require.NotNil(t, agentCfg.Status.Action, "expected Action to still be set") + require.False(t, agentCfg.Status.Ready, "agent config should not be ready if the agent action has failed") + assert.Equal(t, porterv1.PhaseFailed, agentCfg.Status.Phase, "incorrect Phase") + assert.True(t, apimeta.IsStatusConditionTrue(agentCfg.Status.Conditions, string(porterv1.ConditionFailed))) + + // Edit the agent config spec + agentCfgData.Generation = 2 + agentCfgData.Spec.Plugins = map[string]porterv1.Plugin{"azure": {}} + require.NoError(t, controller.Update(ctx, &agentCfgData)) + + triggerReconcile() + + // Verify that the agent config status was re-initialized + assert.Equal(t, int64(2), agentCfg.Status.ObservedGeneration) + assert.Equal(t, porterv1.PhaseUnknown, agentCfg.Status.Phase, "New resources should be initialized to Phase: Unknown") + assert.Empty(t, agentCfg.Status.Conditions, "Conditions should have been reset") + assert.False(t, agentCfg.Status.Ready) + + triggerReconcile() + + // Retry the last action + lastAction := agentCfg.Status.Action.Name + agentCfgData.Annotations = map[string]string{porterv1.AnnotationRetry: "retry-1"} + require.NoError(t, controller.Update(ctx, &agentCfgData)) + + triggerReconcile() + + // Verify that action has retry set on it now + require.NotNil(t, agentCfg.Status.Action, "Expected the action to still be set") + assert.Equal(t, lastAction, agentCfg.Status.Action.Name, "Expected the action to be the same") + // get the latest version of the action + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: agentCfg.Status.Action.Name}, &action)) + assert.NotEmpty(t, action.Annotations[porterv1.AnnotationRetry], "Expected the action to have its retry annotation set") + + assert.Equal(t, int64(2), agentCfg.Status.ObservedGeneration) + assert.NotEmpty(t, agentCfg.Status.Action, "Expected the action to still be set") + assert.Equal(t, porterv1.PhaseUnknown, agentCfg.Status.Phase, "New resources should be initialized to Phase: Unknown") + assert.Empty(t, agentCfg.Status.Conditions, "Conditions should have been reset") + + // Delete the agent config (setting the delete timestamp directly instead of client.Delete because otherwise the fake client just removes it immediately) + // The fake client doesn't really follow finalizer logic + now := metav1.NewTime(time.Now()) + agentCfgData.Generation = 3 + agentCfgData.Spec.Plugins = map[string]porterv1.Plugin{"kubernetes": {}} + agentCfgData.DeletionTimestamp = &now + require.NoError(t, controller.Update(ctx, &agentCfgData)) + triggerReconcile() + + // remove the agent config from the pvc's owner reference list + triggerReconcile() + // Verify that pvc and pv no longer has the agent config in their owner reference list + require.NoError(t, controller.Get(ctx, client.ObjectKey{Namespace: agentCfg.Namespace, Name: agentCfg.GetPluginsPVCName()}, renamedPVC)) + for _, owner := range renamedPVC.OwnerReferences { + require.NotEqual(t, "AgentConfig", owner.Kind, "failed to remove agent config from pv's owner reference list after deletion") + } + + // this trigger will then remove the agent config's finalizer + triggerReconcile() + // Verify that the agent config was removed + err := controller.Get(ctx, client.ObjectKeyFromObject(&agentCfg.AgentConfig), &agentCfg.AgentConfig) + require.True(t, apierrors.IsNotFound(err), "expected the agent config was deleted") + + // Verify that reconcile doesn't error out after it's deleted + triggerReconcile() +} + +func TestAgentConfigReconciler_createAgentAction(t *testing.T) { + ctx := context.Background() + + agentCfg := &porterv1.AgentConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test", + Name: "myblog", + UID: "random-uid", + Generation: 1, + Labels: map[string]string{ + "testLabel": "abc123", + }, + Annotations: map[string]string{ + porterv1.AnnotationRetry: "2021-2-2 12:00:00", + }, + }, + Spec: porterv1.AgentConfigSpec{ + Plugins: map[string]porterv1.Plugin{"test": {Version: "v1.2.3"}}, + VolumeSize: "64Mi", + }, + } + wrapper := porterv1.NewAgentConfigAdapter(*agentCfg) + // once the agent action is completed, the PVC should have been bound to a PV created by kubernetes + labels := wrapper.Spec.Plugins.GetLabels() + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: agentCfg.Name + "-", + Namespace: agentCfg.Namespace, + Labels: labels, + Annotations: wrapper.GetPluginsPVCNameAnnotation(), + OwnerReferences: []metav1.OwnerReference{ + { // I'm not using controllerutil.SetControllerReference because I can't track down why that throws a panic when running our tests + APIVersion: agentCfg.APIVersion, + Kind: agentCfg.Kind, + Name: agentCfg.Name, + UID: agentCfg.UID, + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + }, + }, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: wrapper.Spec.GetVolumeSize(), + }, + }, + }, + } + pv := &corev1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pv-agent-config", + Namespace: agentCfg.Namespace, + OwnerReferences: pvc.OwnerReferences, + }, + Spec: corev1.PersistentVolumeSpec{ + ClaimRef: &corev1.ObjectReference{ + Kind: pvc.Kind, + Namespace: pvc.Namespace, + Name: pvc.Name, + UID: pvc.UID, + APIVersion: pvc.APIVersion, + ResourceVersion: pvc.ResourceVersion, + }, + }, + } + pvc.Spec.VolumeName = pv.Name + pvc.Status.Phase = corev1.ClaimBound + // the pvc controller should have updated the pvc with the pvc-protection finalizer + pvc.Finalizers = append(pvc.Finalizers, "kubernetes.io/pvc-protection") + controller := setupAgentConfigController(pvc, pv) + action, err := controller.createAgentAction(ctx, logr.Discard(), pvc, wrapper, nil) + require.NoError(t, err) + assert.Equal(t, "test", action.Namespace) + assert.Contains(t, action.Name, "myblog-") + assert.Len(t, action.OwnerReferences, 1, "expected an owner reference") + wantOwnerRef := metav1.OwnerReference{ + APIVersion: porterv1.GroupVersion.String(), + Kind: "AgentConfig", + Name: "myblog", + UID: "random-uid", + Controller: pointer.BoolPtr(true), + BlockOwnerDeletion: pointer.BoolPtr(true), + } + assert.Equal(t, wantOwnerRef, action.OwnerReferences[0], "incorrect owner reference") + + assertContains(t, action.Annotations, porterv1.AnnotationRetry, agentCfg.Annotations[porterv1.AnnotationRetry], "incorrect annotation") + assertContains(t, action.Labels, porterv1.LabelManaged, "true", "incorrect label") + assertContains(t, action.Labels, porterv1.LabelResourceKind, "AgentConfig", "incorrect label") + assertContains(t, action.Labels, porterv1.LabelResourceName, "myblog", "incorrect label") + assertContains(t, action.Labels, porterv1.LabelResourceGeneration, "1", "incorrect label") + assertContains(t, action.Labels, "testLabel", "abc123", "incorrect label") + + assert.NotEmpty(t, action.Spec.Volumes, "incorrect Volumes") + assert.Equal(t, action.Spec.Volumes[0].Name, porterv1.VolumePorterPluginsName, "incorrect Volumes") + assert.Equal(t, action.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, pvc.Name, "incorrect Volumes") + assert.NotEmpty(t, action.Spec.VolumeMounts, "incorrect Volumes mounts") + assert.Equal(t, action.Spec.VolumeMounts[0].Name, porterv1.VolumePorterPluginsName, "incorrect VolumeMounts") + assert.Equal(t, action.Spec.VolumeMounts[0].MountPath, porterv1.VolumePorterPluginsPath, "incorrect VolumeMounts") + assert.Equal(t, action.Spec.VolumeMounts[0].SubPath, "plugins", "incorrect VolumeMounts") +} + +func setupAgentConfigController(objs ...client.Object) AgentConfigReconciler { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(porterv1.AddToScheme(scheme)) + + fakeBuilder := fake.NewClientBuilder() + fakeBuilder.WithScheme(scheme) + fakeBuilder.WithObjects(objs...) + fakeClient := fakeBuilder.Build() + + return AgentConfigReconciler{ + Log: logr.Discard(), + Client: fakeClient, + Scheme: scheme, + } +} diff --git a/controllers/porter_resource.go b/controllers/porter_resource.go index c331e3f4..27c28791 100644 --- a/controllers/porter_resource.go +++ b/controllers/porter_resource.go @@ -87,13 +87,13 @@ func applyAgentAction(log logr.Logger, resource PorterResource, action *porterv1 resource.SetStatus(status) } -// this is our kubectl delete check +// isDeleted checks whether a porter resource is deleted. func isDeleted(resource PorterResource) bool { timestamp := resource.GetDeletionTimestamp() return timestamp != nil && !timestamp.IsZero() } -// ensure delete action is completed before delete +// isDeletedProcessed ensures delete action is completed before delete func isDeleteProcessed(resource PorterResource) bool { status := resource.GetStatus() return isDeleted(resource) && apimeta.IsStatusConditionTrue(status.Conditions, string(porterv1.ConditionComplete)) diff --git a/docs/content/file-formats.md b/docs/content/file-formats.md index cb99a82a..2350a1f8 100644 --- a/docs/content/file-formats.md +++ b/docs/content/file-formats.md @@ -153,6 +153,9 @@ spec: volumeSize: 64Mi pullPolicy: Always installationServiceAccount: installation-agent + plugins: + kubernetes: + version: v1.0.0 ``` | Field | Required | Default | Description | @@ -163,7 +166,11 @@ spec: | installationServiceAccount | false | (none) | The service account to run the Kubernetes pod/job for the installation image. | | volumeSize | false | 64Mi | The size of the persistent volume that Porter will request when running the Porter Agent. It is used to share data between the Porter Agent and the bundle invocation image. It must be large enough to store any files used by the bundle including credentials, parameters and outputs. | | pullPolicy | false | PullAlways when the tag is canary or latest, otherwise PullIfNotPresent. | Specifies when to pull the Porter Agent image | - +| plugins | false | (none) ] | The plugins that porter operator needs to install before bundle runs | +| plugins..version | false | latest | The version of the plugin | +| plugins..feedURL | false | https://cdn.porter.sh/plugins/atom.xml | The url of an atom feed where the plugin can be downloaded | +| plugins..url | false | https://cdn.porter.sh/plugins/ | The url from where the plugin can be downloaded | +| plugins..mirror | false | https://cdn.porter.sh/ | The mirror of the official Porter assets | [AgentConfig]: /operator/glossary/#agentconfig ### Service Account diff --git a/docs/content/glossary.md b/docs/content/glossary.md index 233d2cba..0cf133dd 100644 --- a/docs/content/glossary.md +++ b/docs/content/glossary.md @@ -107,6 +107,8 @@ Values are merged from all resolved AgentConfig resources, so that you can defin * Using the AgentConfig with the name "default" defined in the operator namespace. * By default, using a reasonable set of defaults for the default installation of the Operator, assuming that the default RBAC roles exist in the cluster. +🚨 WARNING: Currently, only one plugin per AgentConfig can be installed through the plugins configuration. + [AgentConfig]: /operator/file-formats/#agentconfig ### PorterConfig diff --git a/docs/content/quickstart/_index.md b/docs/content/quickstart/_index.md index c41a5b6c..f10ebc38 100644 --- a/docs/content/quickstart/_index.md +++ b/docs/content/quickstart/_index.md @@ -312,6 +312,30 @@ Currently the Operator only supports the first imagePullSecret in a service acco A single secret with authentication for multiple registries can achieved by [creating a secret from a file](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials). +## Install Plugins + +Create the `quickstart_agentconfig.yaml` with the following content: +```yaml +apiVersion: porter.sh/v1 +kind: AgentConfig +metadata: + name: agentconfig-quickstart + namespace: quickstart +spec: + plugins: + kubernetes: + version: v1.0.0 + feedUrl: https://cdn.porter.sh/plugins/atom.xml +``` + +Create the `AgentConfig` custom resource by running `kubectl apply -f quickstart_agentconfig.yaml` + +The operator will use `porter plugins install` to install defined plugins. Any bundle actions that depend on configured plugins will wait to execute until plugins installation finishes. + +If no plugins are required, this field is optional. + +🚨 WARNING: Currently, the operator can only install one plugin per AgentConfig. If more than one plugins are defined in the CRD, it will only install the first plugin in the config file and omit the rest. The plugins are sorted in alphabetical order. + ## Next Steps You now know how to install and configure the Porter Operator. The project is still incomplete, so watch this repository for updates! diff --git a/installer/manifests/namespace/porter-agentconfig.yaml b/installer/manifests/namespace/porter-agentconfig.yaml index a89de1ce..ce072c7a 100644 --- a/installer/manifests/namespace/porter-agentconfig.yaml +++ b/installer/manifests/namespace/porter-agentconfig.yaml @@ -5,5 +5,7 @@ metadata: labels: porter.sh/generator: "porter-operator-bundle" spec: + plugins: + kubernetes: {} # Values are set in helpers.sh based on the bundle parameters which are copied into agentconfig/* # where each file is a setting on the spec. \ No newline at end of file diff --git a/magefiles/magefile.go b/magefiles/magefile.go index 9bae4d3e..d8bcbd86 100644 --- a/magefiles/magefile.go +++ b/magefiles/magefile.go @@ -18,6 +18,8 @@ import ( . "get.porter.sh/magefiles/docker" "get.porter.sh/magefiles/porter" "get.porter.sh/magefiles/releases" + + //mage:import . "get.porter.sh/magefiles/tests" "get.porter.sh/magefiles/tools" . "get.porter.sh/operator/mage" @@ -53,12 +55,15 @@ const ( porterVersion = "v1.0.1" ) -var srcDirs = []string{"api", "config", "controllers", "installer", "installer-olm"} -var binDir = "bin" +var ( + srcDirs = []string{"api", "config", "controllers", "installer", "installer-olm"} + binDir = "bin" +) -// Porter agent that has k8s plugin included -var porterAgentImgRepository = "ghcr.io/getporter/dev/porter-agent-kubernetes" -var porterAgentImgVersion = porterVersion +var ( + porterAgentImgRepository = "ghcr.io/getporter/porter-agent" + porterAgentImgVersion = "v1.0.2" +) // Local porter agent image name to use for local testing var localAgentImgName = "localhost:5000/porter-agent:canary-dev" @@ -119,7 +124,7 @@ func GenerateController() error { // Build the porter-operator bundle. func BuildBundle() { - mg.SerialDeps(getPlugins, getMixins, StartDockerRegistry, PublishImages) + mg.SerialDeps(getMixins, StartDockerRegistry, PublishImages) buildManifests() @@ -149,44 +154,6 @@ func BuildImages() { mgx.Must(p.SetEnv("MANAGER_IMAGE", img)) } -func getPlugins() error { - // TODO: move this to a shared target in porter - - plugins := []struct { - name string - url string - feed string - version string - }{ - {name: "kubernetes", feed: "https://cdn.porter.sh/plugins/atom.xml", version: "v1.0.1"}, - } - var errG errgroup.Group - for _, plugin := range plugins { - plugin := plugin - pluginDir := filepath.Join("bin/plugins/", plugin.name) - if _, err := os.Stat(pluginDir); err == nil { - log.Println("Plugin already installed into bin:", plugin.name) - continue - } - - errG.Go(func() error { - log.Println("Installing plugin:", plugin.name) - if plugin.version == "" { - plugin.version = "latest" - } - var source string - if plugin.feed != "" { - source = "--feed-url=" + plugin.feed - } else { - source = "--url=" + plugin.url - } - return buildPorterCmd("plugin", "install", plugin.name, "--version", plugin.version, source).Run() - }) - } - - return errG.Wait() -} - func getMixins() error { // TODO: move this to a shared target in porter @@ -646,7 +613,7 @@ func buildPorterCmd(args ...string) shx.PreparedCommand { func BuildLocalPorterAgent() { mg.SerialDeps(porter.UseBinForPorterHome, ensurePorterAt) - mg.SerialDeps(getPlugins, getMixins) + mg.SerialDeps(getMixins) porterRegistry := "ghcr.io/getporter" buildImage := func(img string) error { _, err := shx.Output("docker", "build", "-t", img, diff --git a/main.go b/main.go index c0956d21..fe053565 100644 --- a/main.go +++ b/main.go @@ -93,6 +93,14 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "ParameterSet") os.Exit(1) } + if err = (&controllers.AgentConfigReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AgentConfig"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AgentConfig") + os.Exit(1) + } // +kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { diff --git a/tests/integration/agentconfig_test.go b/tests/integration/agentconfig_test.go new file mode 100644 index 00000000..c0692a5a --- /dev/null +++ b/tests/integration/agentconfig_test.go @@ -0,0 +1,87 @@ +//go:build integration + +package integration_test + +import ( + "context" + + . "github.com/onsi/ginkgo" + "github.com/tidwall/gjson" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + porterv1 "get.porter.sh/operator/api/v1" + . "github.com/onsi/gomega" +) + +var _ = Describe("AgentConfig delete", func() { + Context("when an existing AgentConfig is deleted", func() { + It("should delete AgentConfig and remove owner reference from all volumes it's associated with", func() { + By("creating an agent action", func() { + ctx := context.Background() + ns := createTestNamespace(ctx) + + agentCfg := NewTestAgentCfg() + agentCfg.Namespace = ns + + Expect(k8sClient.Create(ctx, &agentCfg.AgentConfig)).Should(Succeed()) + Expect(waitForPorter(ctx, &agentCfg.AgentConfig, 1, "waiting for plugins to be installed")).Should(Succeed()) + validateResourceConditions(agentCfg) + Expect(len(agentCfg.Spec.Plugins.GetNames())).To(Equal(1)) + + Log("verify it's created") + jsonOut := runAgentAction(ctx, "create-check-plugins-list", ns, []string{"plugins", "list", "-o", "json"}) + firstName := gjson.Get(jsonOut, "0.name").String() + numPluginsInstalled := gjson.Get(jsonOut, "#").Int() + Expect(int64(1)).To(Equal(numPluginsInstalled)) + _, ok := agentCfg.Spec.Plugins.GetByName(firstName) + Expect(ok).To(BeTrue()) + + Log("delete a agent config") + Expect(k8sClient.Delete(ctx, &agentCfg.AgentConfig)).Should(Succeed()) + Expect(waitForResourceDeleted(ctx, &agentCfg.AgentConfig)).Should(Succeed()) + + Log("verify persistent volume and claim no longer has the agent config in their owner reference") + results := &corev1.PersistentVolumeClaimList{} + Expect(k8sClient.List(ctx, results, client.InNamespace(agentCfg.Namespace), client.MatchingLabels(agentCfg.Spec.Plugins.GetLabels()))).Should(Succeed()) + for _, pvc := range results.Items { + for _, ow := range pvc.OwnerReferences { + if ow.Kind == "AgentConfig" { + + Expect(ow.Name).ShouldNot(Equal(agentCfg.Name)) + } + } + key := client.ObjectKey{Namespace: agentCfg.Namespace, Name: pvc.Spec.VolumeName} + pv := &corev1.PersistentVolume{} + Expect(k8sClient.Get(ctx, key, pv)).Should(Succeed()) + for _, ow := range pv.OwnerReferences { + if ow.Kind == "AgentConfig" { + + Expect(ow.Name).ShouldNot(Equal(agentCfg.Name)) + } + } + } + }) + }) + }) +}) + +// NewTestCredSet minimal CredentialSet CRD for tests +func NewTestAgentCfg() *porterv1.AgentConfigAdapter { + cs := porterv1.AgentConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "porter.sh/v1", + Kind: "AgentConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "porter-test-me-", + }, + Spec: porterv1.AgentConfigSpec{ + Plugins: map[string]porterv1.Plugin{ + "kubernetes": {}, + }, + }, + } + return porterv1.NewAgentConfigAdapter(cs) +} diff --git a/tests/integration/suite_test.go b/tests/integration/suite_test.go index 908bc2d6..251a61da 100644 --- a/tests/integration/suite_test.go +++ b/tests/integration/suite_test.go @@ -103,6 +103,12 @@ var _ = BeforeSuite(func(done Done) { }).SetupWithManager(k8sManager) Expect(err).ToNot(HaveOccurred()) + err = (&controllers.AgentConfigReconciler{ + Client: k8sManager.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("AgentConfig"), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + go func() { err = k8sManager.Start(ctrl.SetupSignalHandler()) Expect(err).ToNot(HaveOccurred()) @@ -191,6 +197,7 @@ func createTestNamespace(ctx context.Context) string { PorterVersion: agentVersion, ServiceAccount: svc.Name, InstallationServiceAccount: "installation-agent", + Plugins: map[string]porterv1.Plugin{"kubernetes": {FeedURL: "https://cdn.porter.sh/plugins/atom.xml", Version: "v1.0.0"}}, }, } Expect(k8sClient.Create(ctx, agentCfg)).To(Succeed()) diff --git a/tests/integration/testdata/Dockerfile.k8s-plugin-agent b/tests/integration/testdata/Dockerfile.k8s-plugin-agent deleted file mode 100644 index 89e1ff99..00000000 --- a/tests/integration/testdata/Dockerfile.k8s-plugin-agent +++ /dev/null @@ -1,4 +0,0 @@ -ARG PORTER_VERSION -ARG REGISTRY -FROM $REGISTRY/porter-agent:$PORTER_VERSION -COPY --chown=65532:65532 bin/plugins/kubernetes/runtimes/kubernetes-runtime /app/.porter/plugins/kubernetes/kubernetes diff --git a/tests/integration/testdata/operator_porter_config.yaml b/tests/integration/testdata/operator_porter_config.yaml index 73a3646e..ca7c681f 100644 --- a/tests/integration/testdata/operator_porter_config.yaml +++ b/tests/integration/testdata/operator_porter_config.yaml @@ -1,6 +1,5 @@ -debug: true -debug-plugins: true default-secrets: "kubernetes-secrets" +verbosity: "debug" default-storage: "in-cluster-mongodb" storage: - name: "in-cluster-mongodb" diff --git a/tests/integration/utils_test.go b/tests/integration/utils_test.go index e38314ab..b0da3c16 100644 --- a/tests/integration/utils_test.go +++ b/tests/integration/utils_test.go @@ -36,6 +36,7 @@ var resourceTypeMap = map[string]string{ "CredentialSet": "credentialsets", "Installation": "installations", "AgentAction": "agentactions", + "AgentConfig": "agentconfigs", } var gvrVersion = "v1" @@ -282,7 +283,9 @@ func getAgentActionCmdOut(action *porterv1.AgentAction, aaOut string) string { return strings.SplitAfterN(strings.Replace(aaOut, "\n", "", -1), strings.Join(action.Spec.Args, " "), 2)[1] } -/* Fully execute an agent action and return the associated result of the command executed. For example an agent action +/* + Fully execute an agent action and return the associated result of the command executed. For example an agent action + that does "porter credentials list" will return just the result of the porter command from the job logs. This can be used to run porter commands inside the cluster to validate porter state */