Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] pkg/helm: custom storage backend per CR with ownerref #1100

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions pkg/helm/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"time"

yaml "gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
Expand Down Expand Up @@ -78,6 +79,11 @@ func Add(mgr manager.Manager, options WatchOptions) error {
return err
}

// Watch release secrets
if err := c.Watch(&source.Kind{Type: &corev1.Secret{}}, &crthandler.EnqueueRequestForOwner{OwnerType: o, IsController: true}); err != nil {
return err
}

if options.WatchDependentResources {
watchDependentResources(mgr, r, c)
}
Expand Down
7 changes: 6 additions & 1 deletion pkg/helm/controller/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,12 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
return reconcile.Result{}, err
}

manager := r.ManagerFactory.NewManager(o)
manager, err := r.ManagerFactory.NewManager(o)
if err != nil {
log.Error(err, "Failed to get release manager")
return reconcile.Result{}, err
}

status := types.StatusFor(o)
log = log.WithValues("release", manager.ReleaseName())

Expand Down
38 changes: 6 additions & 32 deletions pkg/helm/release/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,6 @@ func (m manager) IsUpdateRequired() bool {
// Sync ensures the Helm storage backend is in sync with the status of the
// custom resource.
func (m *manager) Sync(ctx context.Context) error {
if err := m.syncReleaseStatus(*m.status); err != nil {
return fmt.Errorf("failed to sync release status to storage backend: %s", err)
}

// Get release history for this release name
releases, err := m.storageBackend.History(m.releaseName)
if err != nil && !notFoundErr(err) {
Expand Down Expand Up @@ -147,31 +143,6 @@ func (m *manager) Sync(ctx context.Context) error {
return nil
}

func (m manager) syncReleaseStatus(status types.HelmAppStatus) error {
var release *rpb.Release
for _, condition := range status.Conditions {
if condition.Type == types.ConditionDeployed && condition.Status == types.StatusTrue {
release = condition.Release
break
}
}
if release == nil {
return nil
}

name := release.GetName()
version := release.GetVersion()
_, err := m.storageBackend.Get(name, version)
if err == nil {
return nil
}

if !notFoundErr(err) {
return err
}
return m.storageBackend.Create(release)
}

func notFoundErr(err error) bool {
return strings.Contains(err.Error(), "not found")
}
Expand Down Expand Up @@ -392,12 +363,15 @@ func (m manager) UninstallRelease(ctx context.Context) (*rpb.Release, error) {
func uninstallRelease(ctx context.Context, storageBackend *storage.Storage, tiller *tiller.ReleaseServer, releaseName string) (*rpb.Release, error) {
// Get history of this release
h, err := storageBackend.History(releaseName)
if err != nil {

// If the error is not nil, only return it if it is something other than
// a not found error.
if err != nil && !notFoundErr(err) {
return nil, fmt.Errorf("failed to get release history: %s", err)
}

// If there is no history, the release has already been uninstalled,
// so return ErrNotFound.
// If there is no history, the release was never installed or has already
// been uninstalled, so return ErrNotFound.
if len(h) == 0 {
return nil, ErrNotFound
}
Expand Down
66 changes: 46 additions & 20 deletions pkg/helm/release/manager_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,62 +20,76 @@ import (

"github.com/martinlindhe/base36"
"github.com/pborman/uuid"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
apitypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
helmengine "k8s.io/helm/pkg/engine"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/storage"
"k8s.io/helm/pkg/tiller"
"k8s.io/helm/pkg/tiller/environment"
crmanager "sigs.k8s.io/controller-runtime/pkg/manager"

"github.com/operator-framework/operator-sdk/pkg/helm/client"
"github.com/operator-framework/operator-sdk/pkg/helm/engine"
"github.com/operator-framework/operator-sdk/pkg/helm/internal/types"
"github.com/operator-framework/operator-sdk/pkg/helm/storage/driver"
)

// ManagerFactory creates Managers that are specific to custom resources. It is
// used by the HelmOperatorReconciler during resource reconciliation, and it
// improves decoupling between reconciliation logic and the Helm backend
// components used to manage releases.
type ManagerFactory interface {
NewManager(r *unstructured.Unstructured) Manager
NewManager(r *unstructured.Unstructured) (Manager, error)
}

type managerFactory struct {
storageBackend *storage.Storage
tillerKubeClient *kube.Client
chartDir string
crmanager crmanager.Manager
chartDir string
}

// NewManagerFactory returns a new Helm manager factory capable of installing and uninstalling releases.
func NewManagerFactory(storageBackend *storage.Storage, tillerKubeClient *kube.Client, chartDir string) ManagerFactory {
return &managerFactory{storageBackend, tillerKubeClient, chartDir}
func NewManagerFactory(crmanager crmanager.Manager, chartDir string) ManagerFactory {
return &managerFactory{crmanager, chartDir}
}

func (f managerFactory) NewManager(r *unstructured.Unstructured) Manager {
func (f managerFactory) NewManager(r *unstructured.Unstructured) (Manager, error) {
return f.newManagerForCR(r)
}

func (f managerFactory) newManagerForCR(r *unstructured.Unstructured) Manager {
func (f managerFactory) newManagerForCR(r *unstructured.Unstructured) (Manager, error) {
storageBackend, err := f.getStorageBackend(r)
if err != nil {
return nil, err
}
tillerKubeClient, err := client.NewFromManager(f.crmanager)
if err != nil {
return nil, err
}
releaseServer, err := tillerRendererForCR(r, tillerKubeClient, storageBackend)
if err != nil {
return nil, err
}
return &manager{
storageBackend: f.storageBackend,
tillerKubeClient: f.tillerKubeClient,
storageBackend: storageBackend,
tillerKubeClient: tillerKubeClient,
chartDir: f.chartDir,

tiller: f.tillerRendererForCR(r),
tiller: releaseServer,
releaseName: getReleaseName(r),
namespace: r.GetNamespace(),

spec: r.Object["spec"],
status: types.StatusFor(r),
}
}, nil
}

// tillerRendererForCR creates a ReleaseServer configured with a rendering engine that adds ownerrefs to rendered assets
// based on the CR.
func (f managerFactory) tillerRendererForCR(r *unstructured.Unstructured) *tiller.ReleaseServer {
func tillerRendererForCR(r *unstructured.Unstructured, tillerKubeClient *kube.Client, storageBackend *storage.Storage) (*tiller.ReleaseServer, error) {
controllerRef := metav1.NewControllerRef(r, r.GroupVersionKind())
ownerRefs := []metav1.OwnerReference{
*controllerRef,
Expand All @@ -87,13 +101,15 @@ func (f managerFactory) tillerRendererForCR(r *unstructured.Unstructured) *tille
}
env := &environment.Environment{
EngineYard: ey,
Releases: f.storageBackend,
KubeClient: f.tillerKubeClient,
Releases: storageBackend,
KubeClient: tillerKubeClient,
}
kubeconfig, _ := f.tillerKubeClient.ToRESTConfig()
cs := clientset.NewForConfigOrDie(kubeconfig)

return tiller.NewReleaseServer(env, cs, false)
kubeconfig, _ := tillerKubeClient.ToRESTConfig()
cs, err := clientset.NewForConfig(kubeconfig)
if err != nil {
return nil, err
}
return tiller.NewReleaseServer(env, cs, false), nil
}

func getReleaseName(r *unstructured.Unstructured) string {
Expand All @@ -108,3 +124,13 @@ func shortenUID(uid apitypes.UID) string {
}
return strings.ToLower(base36.EncodeBytes(uidBytes))
}

func (f managerFactory) getStorageBackend(r *unstructured.Unstructured) (*storage.Storage, error) {
ownerRef := metav1.NewControllerRef(r, r.GroupVersionKind())
clientv1, err := corev1.NewForConfig(f.crmanager.GetConfig())
if err != nil {
return nil, err
}
secrets := driver.NewOwnerSecrets(*ownerRef, clientv1.Secrets(r.GetNamespace()))
return storage.Init(secrets), nil
}
13 changes: 1 addition & 12 deletions pkg/helm/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"os"
"runtime"

"github.com/operator-framework/operator-sdk/pkg/helm/client"
"github.com/operator-framework/operator-sdk/pkg/helm/controller"
hoflags "github.com/operator-framework/operator-sdk/pkg/helm/flags"
"github.com/operator-framework/operator-sdk/pkg/helm/release"
Expand All @@ -30,8 +29,6 @@ import (
sdkVersion "github.com/operator-framework/operator-sdk/version"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/helm/pkg/storage"
"k8s.io/helm/pkg/storage/driver"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
Expand Down Expand Up @@ -73,14 +70,6 @@ func Run(flags *hoflags.HelmOperatorFlags) error {
return err
}

// Create Tiller's storage backend and kubernetes client
storageBackend := storage.Init(driver.NewMemory())
tillerKubeClient, err := client.NewFromManager(mgr)
if err != nil {
log.Error(err, "Failed to create new Tiller client.")
return err
}

watches, err := watches.Load(flags.WatchesFile)
if err != nil {
log.Error(err, "Failed to create new manager factories.")
Expand All @@ -92,7 +81,7 @@ func Run(flags *hoflags.HelmOperatorFlags) error {
err := controller.Add(mgr, controller.WatchOptions{
Namespace: namespace,
GVK: w.GroupVersionKind,
ManagerFactory: release.NewManagerFactory(storageBackend, tillerKubeClient, w.ChartDir),
ManagerFactory: release.NewManagerFactory(mgr, w.ChartDir),
ReconcilePeriod: flags.ReconcilePeriod,
WatchDependentResources: w.WatchDependentResources,
})
Expand Down
46 changes: 46 additions & 0 deletions pkg/helm/storage/driver/labels.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Copyright 2019 The Operator-SDK Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package driver

// labels is a map of key value pairs to be included as metadata in a configmap object.
type labels map[string]string

func (lbs *labels) init() { *lbs = labels(make(map[string]string)) }
func (lbs labels) get(key string) string { return lbs[key] }
func (lbs labels) set(key, val string) { lbs[key] = val }

func (lbs labels) keys() (ls []string) {
for key := range lbs {
ls = append(ls, key)
}
return
}

func (lbs labels) match(set labels) bool {
for _, key := range set.keys() {
if lbs.get(key) != set.get(key) {
return false
}
}
return true
}

func (lbs labels) toMap() map[string]string { return lbs }

func (lbs *labels) fromMap(kvs map[string]string) {
for k, v := range kvs {
lbs.set(k, v)
}
}
Loading