Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Block volume support and refactor of node publish/unpublish #93

Merged
merged 5 commits into from
Sep 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ The Seagate Exos X CSI Driver supports the following storage arrays
- Seagate Exos X and AssuredSAN (4006/5005/4005/3005)
- Dell PowerVault ME4 and ME5 Series

iSCSI, SAS, and FC host interfaces are supported.
iSCSI, SAS, and FC host interfaces are supported for both block and filesystem mount types

[![Go Report Card](https://goreportcard.com/badge/github.com/Seagate/seagate-exos-x-csi)](https://goreportcard.com/report/github.com/Seagate/seagate-exos-x-csi)

Expand All @@ -31,7 +31,7 @@ This project implements the **Container Storage Interface** in order to facilita
This CSI driver is an open-source project under the Apache 2.0 [license](./LICENSE).

## Key Features
- Manage persistent volumes backed by iSCSI protocols on Exos X enclosures
- Manage persistent volumes on Exos X enclosures
- Control multiple Exos X systems within a single Kubernetes cluster
- Manage Exos X snapshots and clones, including restoring from snapshots
- Clone, extend and manage persistent volumes created outside of the Exos CSI Driver
Expand Down
31 changes: 31 additions & 0 deletions example/block-volume-pod.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: systems-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Block
storageClassName: block-vol-storageclass
resources:
requests:
storage: 5Gi
---
apiVersion: v1
kind: Pod
metadata:
name: test-pod
spec:
containers:
- image: ghcr.io/seagate/seagate-exos-x-testapp
command: ["/bin/sh", "-c", "while sleep 60; do echo hello > /vol/test && ls -l /vol && cat /vol/test && rm /vol/test; done"]
name: test-pod-container
volumeDevices:
- devicePath: /block-vol
name: volume
ports:
- containerPort: 8080
volumes:
- name: volume
persistentVolumeClaim:
claimName: systems-pvc
21 changes: 21 additions & 0 deletions example/block-volume-storageclass.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
provisioner: csi-exos-x.seagate.com # Check pkg/driver.go, Required for the plugin to recognize this storage class as handled by itself.
volumeBindingMode: WaitForFirstConsumer # Prefer this value to avoid unschedulable pods (https://kubernetes.io/docs/concepts/storage/storage-classes/#volume-binding-mode)
allowVolumeExpansion: true
metadata:
name: block-vol-storageclass
parameters:
# Secrets name and namespace, they can be the same for provisioner, controller-publish and controller-expand sections.
csi.storage.k8s.io/provisioner-secret-name: seagate-exos-x-csi-secrets
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-publish-secret-name: seagate-exos-x-csi-secrets
csi.storage.k8s.io/controller-publish-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: seagate-exos-x-csi-secrets
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-publish-secret-name: seagate-exos-x-csi-secrets
csi.storage.k8s.io/node-publish-secret-namespace: default
pool: A # Pool to use on the IQN to provision volumes
volPrefix: stx # Desired prefix for volume naming, an underscore is appended
storageProtocol: iscsi # iscsi, fc or sas
AccessType: block
5 changes: 5 additions & 0 deletions pkg/common/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,11 @@ const (
NodeServicePortEnvVar = "CSI_NODE_SERVICE_PORT"
)

var SupportedAccessModes = [2]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
}

// Driver contains main resources needed by the driver and references the underlying specific driver
type Driver struct {
Server *grpc.Server
Expand Down
40 changes: 18 additions & 22 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,6 @@ const (
invalidArgumentErrorCode = -10058
)

var volumeCapabilities = []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
}

var csiMutexes = map[string]*sync.Mutex{
"/csi.v1.Controller/CreateVolume": {},
"/csi.v1.Controller/ControllerPublishVolume": {},
Expand Down Expand Up @@ -170,8 +159,7 @@ func (controller *Controller) ControllerGetCapabilities(ctx context.Context, req
return &csi.ControllerGetCapabilitiesResponse{Capabilities: csc}, nil
}

// ValidateVolumeCapabilities checks whether the volume capabilities requested
// are supported.
// ValidateVolumeCapabilities checks whether a provisioned volume supports the capabilities requested
func (controller *Controller) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
volumeName, _ := common.VolumeIdGetName(req.GetVolumeId())

Expand All @@ -188,7 +176,7 @@ func (controller *Controller) ValidateVolumeCapabilities(ctx context.Context, re

return &csi.ValidateVolumeCapabilitiesResponse{
Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{
VolumeCapabilities: volumeCapabilities,
VolumeCapabilities: req.GetVolumeCapabilities(),
},
}, nil
}
Expand Down Expand Up @@ -301,19 +289,27 @@ func runPreflightChecks(parameters map[string]string, capabilities *[]*csi.Volum
return status.Error(codes.InvalidArgument, "missing volume capabilities")
}
for _, capability := range *capabilities {
if capability.GetAccessMode().GetMode() != csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER {
return status.Error(codes.FailedPrecondition, "storage only supports ReadWriteOnce access mode")
accessMode := capability.GetAccessMode().GetMode()
accessModeSupported := false
for _, mode := range common.SupportedAccessModes {
if accessMode == mode {
accessModeSupported = true
}
}
if !accessModeSupported {
return status.Errorf(codes.FailedPrecondition, "driver does not support access mode %v", accessMode)
}
if capability.GetMount().GetFsType() == "" {
if err := checkIfKeyExistsInConfig(common.FsTypeConfigKey); err != nil {
return status.Error(codes.FailedPrecondition, "no fstype specified in storage class")
} else {
klog.InfoS("storage class parameter "+common.FsTypeConfigKey+" is deprecated. Please migrate to 'csi.storage.k8s.io/fstype'", "parameter", common.FsTypeConfigKey)
if mount := capability.GetMount(); mount != nil {
if mount.GetFsType() == "" {
if err := checkIfKeyExistsInConfig(common.FsTypeConfigKey); err != nil {
return status.Error(codes.FailedPrecondition, "no fstype specified in storage class")
} else {
klog.InfoS("storage class parameter "+common.FsTypeConfigKey+" is deprecated. Please migrate to 'csi.storage.k8s.io/fstype'", "parameter", common.FsTypeConfigKey)
}
}
}
}
}

return nil
}

Expand Down
32 changes: 10 additions & 22 deletions pkg/controller/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,6 @@ import (
"k8s.io/klog/v2"
)

var (
volumeCaps = []csi.VolumeCapability_AccessMode{
{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
}
)

// Extract available SAS addresses for Nodes from topology segments
// This will contain all SAS initiators for all nodes unless the storage class
// has specified allowed or preferred topologies
Expand Down Expand Up @@ -203,23 +192,22 @@ func getSizeStr(size int64) string {
// isValidVolumeCapabilities validates the given VolumeCapability array is valid
func isValidVolumeCapabilities(volCaps []*csi.VolumeCapability) error {
if len(volCaps) == 0 {
return fmt.Errorf("CreateVolume Volume capabilities must be provided")
return fmt.Errorf("volume capabilities to validate not provided")
}
hasSupport := func(cap *csi.VolumeCapability) error {
if blk := cap.GetBlock(); blk != nil {
return fmt.Errorf("driver only supports mount access type volume capability")
}
for _, c := range volumeCaps {
if c.GetMode() == cap.AccessMode.GetMode() {
return nil

hasSupport := func(cap *csi.VolumeCapability) bool {
for _, supportedMode := range common.SupportedAccessModes {
// we currently support block and mount volumes with both supported access modes, so don't check mount types
if cap.GetAccessMode().Mode == supportedMode {
return true
}
}
return fmt.Errorf("driver does not support access mode %v", cap.AccessMode.GetMode())
return false
}

for _, c := range volCaps {
if err := hasSupport(c); err != nil {
return err
if !hasSupport(c) {
return fmt.Errorf("driver does not support access mode %v", c.GetAccessMode())
}
}
return nil
Expand Down
76 changes: 62 additions & 14 deletions pkg/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -162,50 +162,98 @@ func (node *Node) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapab
return &csi.NodeGetCapabilitiesResponse{Capabilities: csc}, nil
}

// NodePublishVolume mounts the volume mounted to the staging path to the target path
// NodePublishVolume mounts the device to the target path
func (node *Node) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {

if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "cannot publish volume with empty id")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "cannot publish volume at an empty path")
}
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "cannot publish volume without capabilities")
}
if req.GetVolumeCapability().GetBlock() != nil &&
req.GetVolumeCapability().GetMount() != nil {
return nil, status.Error(codes.InvalidArgument, "cannot have both block and mount access type")
}
if req.GetVolumeCapability().GetBlock() == nil &&
req.GetVolumeCapability().GetMount() == nil {
return nil, status.Error(codes.InvalidArgument, "volume access type not specified, must be either block or mount")
}
// Extract the volume name and the storage protocol from the augmented volume id
volumeName, _ := common.VolumeIdGetName(req.GetVolumeId())
storageProtocol, _ := common.VolumeIdGetStorageProtocol(req.GetVolumeId())

klog.Infof("NodePublishVolume called with volume name %s", volumeName)
// Ensure that NodePublishVolume is only called once per volume
storage.AddGatekeeper(volumeName)
defer storage.RemoveGatekeeper(volumeName)

klog.InfoS("NodePublishVolume call", "volumeName", volumeName)

config := make(map[string]string)
config["connectorInfoPath"] = node.getConnectorInfoPath(storageProtocol, volumeName)
klog.V(2).Infof("NodePublishVolume connectorInfoPath (%v)", config["connectorInfoPath"])

// Get storage handler
storageNode, err := storage.NewStorageNode(storageProtocol, config)
if storageNode != nil {
return storageNode.NodePublishVolume(ctx, req)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// Do any required device discovery and return path of the device on the node fs
path, err := storageNode.AttachStorage(ctx, req)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

klog.Errorf("NodePublishVolume error for storage protocol (%v): %v", storageProtocol, err)
return nil, status.Errorf(codes.Internal, "Unable to process for storage protocol (%v)", storageProtocol)
if req.GetVolumeCapability().GetMount() != nil {
err = storage.MountFilesystem(req, path)
} else if req.GetVolumeCapability().GetBlock() != nil {
err = storage.MountDevice(req, path)
}
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

return &csi.NodePublishVolumeResponse{}, nil
}

// NodeUnpublishVolume unmounts the volume from the target path
// NodeUnpublishVolume unmounts the volume from the target path and removes devices
func (node *Node) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "cannot unpublish volume with an empty volume id")
}
if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "cannot unpublish volume with an empty target path")
}

// Extract the volume name and the storage protocol from the augmented volume id
volumeName, _ := common.VolumeIdGetName(req.GetVolumeId())
storageProtocol, _ := common.VolumeIdGetStorageProtocol(req.GetVolumeId())

klog.Infof("NodeUnpublishVolume volume %s at target path %s", volumeName, req.GetTargetPath())
// Ensure that NodeUnpublishVolume is only called once per volume
storage.AddGatekeeper(volumeName)
defer storage.RemoveGatekeeper(volumeName)

klog.InfoS("NodeUnpublishVolume volume", "volumeName", volumeName, "targetPath", req.GetTargetPath())

config := make(map[string]string)
config["connectorInfoPath"] = node.getConnectorInfoPath(storageProtocol, volumeName)
klog.V(2).Infof("NodeUnpublishVolume connectorInfoPath (%v)", config["connectorInfoPath"])
klog.V(2).InfoS("NodeUnpublishVolume", "connectorInfoPath", config["connectorInfoPath"])

// Get storage handler
storageNode, err := storage.NewStorageNode(storageProtocol, config)
if storageNode != nil {
return storageNode.NodeUnpublishVolume(ctx, req)
if storageNode == nil {
klog.ErrorS(err, "Error creating storage node")
return nil, status.Errorf(codes.Internal, "unable to create storage node")
}
storage.Unmount(req.GetTargetPath())
err = storageNode.DetachStorage(ctx, req)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

klog.Errorf("NodeUnpublishVolume error for storage protocol (%v): %v", storageProtocol, err)
return nil, status.Errorf(codes.Internal, "Unable to process for storage protocol (%v)", storageProtocol)
return &csi.NodeUnpublishVolumeResponse{}, nil
}

// NodeExpandVolume finalizes volume expansion on the node
Expand Down
Loading