Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

distributedstorage: Implementing rook installation using the fleet pl… #413

Merged
merged 3 commits into from
Oct 26, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pkg/fleet-manager/fleet_plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ func (f *FleetManager) reconcilePlugins(ctx context.Context, fleet *fleetapi.Fle
f.reconcileGrafanaPlugin,
f.reconcileKyvernoPlugin,
f.reconcileBackupPlugin,
f.reconcileDistributedStoragePlugin,
}

resultsChannel := make(chan reconcileResult, len(funcs))
Expand Down
104 changes: 104 additions & 0 deletions pkg/fleet-manager/fleet_plugin_distributedstorage.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/*
Copyright Kurator Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package fleet

import (
"context"
"time"

"helm.sh/helm/v3/pkg/kube"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"

"kurator.dev/kurator/pkg/apis/fleet/v1alpha1"
"kurator.dev/kurator/pkg/fleet-manager/plugin"
"kurator.dev/kurator/pkg/infra/util"
)

func (f *FleetManager) reconcileDistributedStoragePlugin(ctx context.Context, fleet *v1alpha1.Fleet, fleetClusters map[ClusterKey]*fleetCluster) (kube.ResourceList, ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)

distributedStorageCfg := fleet.Spec.Plugin.DistributedStorage

if distributedStorageCfg == nil {
// reconcilePluginResources will delete all resources if plugin is nil
return nil, ctrl.Result{}, nil
}

fleetNN := types.NamespacedName{
Namespace: fleet.Namespace,
Name: fleet.Name,
}

fleetOwnerRef := ownerReference(fleet)
var resources kube.ResourceList

// First install rook-operator for the specified multicluster.
for key, cluster := range fleetClusters {
b, err := plugin.RendeStorageOperator(f.Manifests, fleetNN, fleetOwnerRef, plugin.FleetCluster{
Name: key.Name,
SecretName: cluster.Secret,
SecretKey: cluster.SecretKey,
}, distributedStorageCfg)
if err != nil {
return nil, ctrl.Result{}, err
}

// apply Rook helm resources
rookResources, err := util.PatchResources(b)
if err != nil {
return nil, ctrl.Result{}, err
}
resources = append(resources, rookResources...)
}

log.V(4).Info("wait for rook helm release to be reconciled")
if !f.helmReleaseReady(ctx, fleet, resources) {
// wait for HelmRelease to be ready
return nil, ctrl.Result{
// HelmRelease check interval is 1m, so we set 30s here
RequeueAfter: 30 * time.Second,
}, nil
}

// After Rook operator are created, starts to install rook-ceph
LiZhenCheng9527 marked this conversation as resolved.
Show resolved Hide resolved
if distributedStorageCfg.Storage != nil {
for key, cluster := range fleetClusters {
b, err := plugin.RenderClusterStorage(f.Manifests, fleetNN, fleetOwnerRef, plugin.FleetCluster{
Name: key.Name,
SecretName: cluster.Secret,
SecretKey: cluster.SecretKey,
}, distributedStorageCfg)
if err != nil {
return nil, ctrl.Result{}, err
}

rookCephResources, err := util.PatchResources(b)
if err != nil {
return nil, ctrl.Result{}, err
}
resources = append(resources, rookCephResources...)
}
}

if !f.helmReleaseReady(ctx, fleet, resources) {
// wait for HelmRelease to be ready
return nil, ctrl.Result{
// HelmRelease check interval is 1m, so we set 30s here
RequeueAfter: 30 * time.Second,
}, nil
}

return resources, ctrl.Result{}, nil
}
5 changes: 5 additions & 0 deletions pkg/fleet-manager/manifests/plugins/rook-ceph.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
type: default
repo: https://charts.rook.io/release
name: rook-ceph-cluster
version: 1.11.11
targetNamespace: rook-ceph
5 changes: 5 additions & 0 deletions pkg/fleet-manager/manifests/plugins/rook.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
type: default
repo: https://charts.rook.io/release
name: rook-ceph
version: 1.11.11
targetNamespace: rook-ceph
185 changes: 181 additions & 4 deletions pkg/fleet-manager/plugin/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,21 @@ import (
)

const (
MetricPluginName = "metric"
GrafanaPluginName = "grafana"
KyvernoPluginName = "kyverno"
BackupPluginName = "backup"
MetricPluginName = "metric"
GrafanaPluginName = "grafana"
KyvernoPluginName = "kyverno"
BackupPluginName = "backup"
StorageOperatorPluginName = "storage-operator"
ClusterStoragePluginName = "cluster-storage"

ThanosComponentName = "thanos"
PrometheusComponentName = "prometheus"
GrafanaComponentName = "grafana"
KyvernoComponentName = "kyverno"
KyvernoPolicyComponentName = "kyverno-policies"
VeleroComponentName = "velero"
RookOperatorComponentName = "rook"
RookClusterComponentName = "rook-ceph"

OCIReposiotryPrefix = "oci://"
)
Expand Down Expand Up @@ -296,6 +300,179 @@ func RenderVelero(
})
}

// Build configuration of the rendering rook-operator.
func RendeStorageOperator(
fsys fs.FS,
fleetNN types.NamespacedName,
fleetRef *metav1.OwnerReference,
cluster FleetCluster,
distributedStorageCfg *fleetv1a1.DistributedStorageConfig,
) ([]byte, error) {
// get and merge the chart config
c, err := getFleetPluginChart(fsys, RookOperatorComponentName)
if err != nil {
return nil, err
}
mergeChartConfig(c, distributedStorageCfg.Chart)

values, err := toMap(distributedStorageCfg.ExtraArgs)
if err != nil {
return nil, err
}

return renderFleetPlugin(fsys, FleetPluginConfig{
Name: StorageOperatorPluginName,
Component: RookOperatorComponentName,
Fleet: fleetNN,
Cluster: &cluster,
OwnerReference: fleetRef,
Chart: *c,
Values: values,
})
}

// Build configuration of the rendering rook-ceph-cluster.
func RenderClusterStorage(
fsys fs.FS,
fleetNN types.NamespacedName,
fleetRef *metav1.OwnerReference,
cluster FleetCluster,
distributedStorageCfg *fleetv1a1.DistributedStorageConfig,
) ([]byte, error) {
c, err := getFleetPluginChart(fsys, RookClusterComponentName)
if err != nil {
return nil, err
}
mergeChartConfig(c, distributedStorageCfg.Chart)

// get default values
defaultValues := c.Values
// In the rook, the Labels, annotation and Placement of Monitor and manager are configured under the Labels, annotation and Placement fields.
// So it need to be rebuild customValues using user settings in distributedStorage.

customValues := buildStorageClusterValue(*distributedStorageCfg)
cephClusterValue := make(map[string]interface{})
cephClusterValue["cephClusterSpec"] = customValues
extraValues, err := toMap(distributedStorageCfg.ExtraArgs)
if err != nil {
return nil, err
}
// Add custom extraValues to cephClusterValue.
cephClusterValue = transform.MergeMaps(cephClusterValue, extraValues)
// Replace the default values with custom values to obtain the actual values.
values := transform.MergeMaps(defaultValues, cephClusterValue)

return renderFleetPlugin(fsys, FleetPluginConfig{
Name: ClusterStoragePluginName,
Component: RookClusterComponentName,
Fleet: fleetNN,
Cluster: &cluster,
OwnerReference: fleetRef,
Chart: *c,
Values: values,
})
}

// According to distributedStorageCfg, generate the configuration for rook-ceph
func buildStorageClusterValue(distributedStorageCfg fleetv1a1.DistributedStorageConfig) map[string]interface{} {
customValues := make(map[string]interface{})
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please abstract below values construction into a separate function

if distributedStorageCfg.Storage.DataDirHostPath != nil {
customValues["dataDirHostPath"] = distributedStorageCfg.Storage.DataDirHostPath
}
if distributedStorageCfg.Storage.Storage != nil {
customValues["storage"] = distributedStorageCfg.Storage.Storage
}
if distributedStorageCfg.Storage.Monitor != nil {
monitorCfg := distributedStorageCfg.Storage.Monitor
if monitorCfg.Count != nil {
monitorMap := make(map[string]interface{})
monitorMap["count"] = monitorCfg.Count
customValues["mon"] = monitorMap
}
if monitorCfg.Labels != nil {
_, ok := customValues["labels"]
if !ok {
labelsMap := make(map[string]interface{})
labelsMap["mon"] = monitorCfg.Labels
customValues["labels"] = labelsMap
} else {
labelsMap := customValues["labels"].(map[string]interface{})
labelsMap["mon"] = monitorCfg.Labels
customValues["labels"] = labelsMap
}
}
if monitorCfg.Annotations != nil {
_, ok := customValues["annotations"]
if !ok {
annotationsMap := make(map[string]interface{})
annotationsMap["mon"] = monitorCfg.Annotations
customValues["annotations"] = annotationsMap
} else {
annotationsMap := customValues["annotations"].(map[string]interface{})
annotationsMap["mon"] = monitorCfg.Annotations
customValues["annotations"] = annotationsMap
}
}
if monitorCfg.Placement != nil {
_, ok := customValues["placement"]
if !ok {
placementMap := make(map[string]interface{})
placementMap["mon"] = monitorCfg.Placement
customValues["placement"] = placementMap
} else {
placementMap := customValues["placement"].(map[string]interface{})
placementMap["mon"] = monitorCfg.Placement
customValues["placement"] = placementMap
}
}
}
if distributedStorageCfg.Storage.Manager != nil {
managerCfg := distributedStorageCfg.Storage.Manager
if managerCfg.Count != nil {
managerMap := make(map[string]interface{})
managerMap["count"] = managerCfg.Count
customValues["mgr"] = managerMap
}
if managerCfg.Labels != nil {
_, ok := customValues["labels"]
if !ok {
labelsMap := make(map[string]interface{})
labelsMap["mgr"] = managerCfg.Labels
customValues["labels"] = labelsMap
} else {
labelsMap := customValues["labels"].(map[string]interface{})
labelsMap["mgr"] = managerCfg.Labels
customValues["labels"] = labelsMap
}
}
if managerCfg.Annotations != nil {
_, ok := customValues["annotations"]
if !ok {
annotationsMap := make(map[string]interface{})
annotationsMap["mgr"] = managerCfg.Annotations
customValues["annotations"] = annotationsMap
} else {
annotationsMap := customValues["annotations"].(map[string]interface{})
annotationsMap["mgr"] = managerCfg.Annotations
customValues["annotations"] = annotationsMap
}
}
if managerCfg.Placement != nil {
_, ok := customValues["placement"]
if !ok {
placementMap := make(map[string]interface{})
placementMap["mgr"] = managerCfg.Placement
customValues["placement"] = placementMap
} else {
placementMap := customValues["placement"].(map[string]interface{})
placementMap["mgr"] = managerCfg.Placement
customValues["placement"] = placementMap
}
}
}
return customValues
}

func mergeChartConfig(origin *ChartConfig, target *fleetv1a1.ChartConfig) {
if target == nil {
return
Expand Down
Loading