Merge pull request #432 from woehrl01/ondelete_subdir
feat: provide option to retain subdir on delete
This commit is contained in:
commit
fafa015708
@ -66,6 +66,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
|
||||
| `controller.runOnMaster` | run controller on master node(deprecated on k8s 1.25+) |`false` |
|
||||
| `controller.runOnControlPlane` | run controller on control plane node |`false` |
|
||||
| `controller.dnsPolicy` | dnsPolicy of controller driver, available values: `Default`, `ClusterFirstWithHostNet`, `ClusterFirst` | `Default` |
|
||||
| `controller.defaultOnDeletePolicy` | default policy for deleting subdirectory when deleting a volume, available values: `delete`, `retain` | `delete` |
|
||||
| `controller.logLevel` | controller driver log level |`5` |
|
||||
| `controller.workingMountDir` | working directory for provisioner to mount nfs shares temporarily | `/tmp` |
|
||||
| `controller.affinity` | controller pod affinity | `{}` |
|
||||
|
||||
Binary file not shown.
@ -93,6 +93,7 @@ spec:
|
||||
- "--drivername={{ .Values.driver.name }}"
|
||||
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
|
||||
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
|
||||
- "--default-ondelete-policy={{ .Values.controller.defaultOnDeletePolicy }}"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
|
||||
@ -44,8 +44,9 @@ controller:
|
||||
livenessProbe:
|
||||
healthPort: 29652
|
||||
logLevel: 5
|
||||
workingMountDir: "/tmp"
|
||||
workingMountDir: /tmp
|
||||
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
|
||||
defaultOnDeletePolicy: delete # available values: delete, retain
|
||||
affinity: {}
|
||||
nodeSelector: {}
|
||||
tolerations:
|
||||
|
||||
@ -26,11 +26,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
mountPermissions = flag.Uint64("mount-permissions", 0, "mounted folder permissions")
|
||||
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
|
||||
workingMountDir = flag.String("working-mount-dir", "/tmp", "working directory for provisioner to mount nfs shares temporarily")
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
mountPermissions = flag.Uint64("mount-permissions", 0, "mounted folder permissions")
|
||||
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
|
||||
workingMountDir = flag.String("working-mount-dir", "/tmp", "working directory for provisioner to mount nfs shares temporarily")
|
||||
defaultOnDeletePolicy = flag.String("default-ondelete-policy", "", "default policy for deleting subdirectory when deleting a volume")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -50,11 +51,12 @@ func main() {
|
||||
|
||||
func handle() {
|
||||
driverOptions := nfs.DriverOptions{
|
||||
NodeID: *nodeID,
|
||||
DriverName: *driverName,
|
||||
Endpoint: *endpoint,
|
||||
MountPermissions: *mountPermissions,
|
||||
WorkingMountDir: *workingMountDir,
|
||||
NodeID: *nodeID,
|
||||
DriverName: *driverName,
|
||||
Endpoint: *endpoint,
|
||||
MountPermissions: *mountPermissions,
|
||||
WorkingMountDir: *workingMountDir,
|
||||
DefaultOnDeletePolicy: *defaultOnDeletePolicy,
|
||||
}
|
||||
d := nfs.NewDriver(&driverOptions)
|
||||
d.Run(false)
|
||||
|
||||
@ -10,6 +10,7 @@ server | NFS Server address | domain name `nfs-server.default.svc.cluster.local`
|
||||
share | NFS share path | `/` | Yes |
|
||||
subDir | sub directory under nfs share | | No | if sub directory does not exist, this driver would create a new one
|
||||
mountPermissions | mounted folder permissions. The default is `0`, if set as non-zero, driver will perform `chmod` after mount | | No |
|
||||
onDelete | when volume is deleted, keep the directory if it's `retain` | `delete`(default), `retain` | No | `delete`
|
||||
|
||||
- VolumeID(`volumeHandle`) is the identifier of the volume handled by the driver, format of VolumeID:
|
||||
```
|
||||
|
||||
@ -55,6 +55,8 @@ type nfsVolume struct {
|
||||
size int64
|
||||
// pv name when subDir is not empty
|
||||
uuid string
|
||||
// on delete action
|
||||
onDelete string
|
||||
}
|
||||
|
||||
// Ordering of elements in the CSI volume id.
|
||||
@ -68,11 +70,10 @@ const (
|
||||
idBaseDir
|
||||
idSubDir
|
||||
idUUID
|
||||
idOnDelete
|
||||
totalIDElements // Always last
|
||||
)
|
||||
|
||||
const separator = "#"
|
||||
|
||||
// CreateVolume create a volume
|
||||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
name := req.GetName()
|
||||
@ -96,6 +97,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
case paramServer:
|
||||
case paramShare:
|
||||
case paramSubDir:
|
||||
case paramOnDelete:
|
||||
case pvcNamespaceKey:
|
||||
case pvcNameKey:
|
||||
case pvNameKey:
|
||||
@ -112,7 +114,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
}
|
||||
}
|
||||
|
||||
nfsVol, err := newNFSVolume(name, reqCapacity, parameters)
|
||||
nfsVol, err := newNFSVolume(name, reqCapacity, parameters, cs.Driver.defaultOnDeletePolicy)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
@ -187,22 +189,30 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
}
|
||||
}
|
||||
|
||||
// mount nfs base share so we can delete the subdirectory
|
||||
if err = cs.internalMount(ctx, nfsVol, nil, volCap); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
|
||||
if nfsVol.onDelete == "" {
|
||||
nfsVol.onDelete = cs.Driver.defaultOnDeletePolicy
|
||||
}
|
||||
defer func() {
|
||||
if err = cs.internalUnmount(ctx, nfsVol); err != nil {
|
||||
klog.Warningf("failed to unmount nfs server: %v", err.Error())
|
||||
|
||||
if !strings.EqualFold(nfsVol.onDelete, retain) {
|
||||
// mount nfs base share so we can delete the subdirectory
|
||||
if err = cs.internalMount(ctx, nfsVol, nil, volCap); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if err = cs.internalUnmount(ctx, nfsVol); err != nil {
|
||||
klog.Warningf("failed to unmount nfs server: %v", err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
// delete subdirectory under base-dir
|
||||
internalVolumePath := getInternalVolumePath(cs.Driver.workingMountDir, nfsVol)
|
||||
// delete subdirectory under base-dir
|
||||
internalVolumePath := getInternalVolumePath(cs.Driver.workingMountDir, nfsVol)
|
||||
|
||||
klog.V(2).Infof("Removing subdirectory at %v", internalVolumePath)
|
||||
if err = os.RemoveAll(internalVolumePath); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to delete subdirectory: %v", err.Error())
|
||||
klog.V(2).Infof("Removing subdirectory at %v", internalVolumePath)
|
||||
if err = os.RemoveAll(internalVolumePath); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to delete subdirectory: %v", err.Error())
|
||||
}
|
||||
} else {
|
||||
klog.V(2).Infof("DeleteVolume: volume(%s) is set to retain, not deleting subdirectory", volumeID)
|
||||
}
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
@ -368,8 +378,8 @@ func (cs *ControllerServer) copyVolume(ctx context.Context, req *csi.CreateVolum
|
||||
}
|
||||
|
||||
// newNFSVolume Convert VolumeCreate parameters to an nfsVolume
|
||||
func newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume, error) {
|
||||
var server, baseDir, subDir string
|
||||
func newNFSVolume(name string, size int64, params map[string]string, defaultOnDeletePolicy string) (*nfsVolume, error) {
|
||||
var server, baseDir, subDir, onDelete string
|
||||
subDirReplaceMap := map[string]string{}
|
||||
|
||||
// validate parameters (case-insensitive)
|
||||
@ -381,6 +391,8 @@ func newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume
|
||||
baseDir = v
|
||||
case paramSubDir:
|
||||
subDir = v
|
||||
case paramOnDelete:
|
||||
onDelete = v
|
||||
case pvcNamespaceKey:
|
||||
subDirReplaceMap[pvcNamespaceMetadata] = v
|
||||
case pvcNameKey:
|
||||
@ -408,6 +420,16 @@ func newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume
|
||||
// make volume id unique if subDir is provided
|
||||
vol.uuid = name
|
||||
}
|
||||
|
||||
if err := validateOnDeleteValue(onDelete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vol.onDelete = defaultOnDeletePolicy
|
||||
if onDelete != "" {
|
||||
vol.onDelete = onDelete
|
||||
}
|
||||
|
||||
vol.id = getVolumeIDFromNfsVol(vol)
|
||||
return vol, nil
|
||||
}
|
||||
@ -442,6 +464,9 @@ func getVolumeIDFromNfsVol(vol *nfsVolume) string {
|
||||
idElements[idBaseDir] = strings.Trim(vol.baseDir, "/")
|
||||
idElements[idSubDir] = strings.Trim(vol.subDir, "/")
|
||||
idElements[idUUID] = vol.uuid
|
||||
if strings.EqualFold(vol.onDelete, retain) {
|
||||
idElements[idOnDelete] = vol.onDelete
|
||||
}
|
||||
return strings.Join(idElements, separator)
|
||||
}
|
||||
|
||||
@ -450,10 +475,10 @@ func getVolumeIDFromNfsVol(vol *nfsVolume) string {
|
||||
//
|
||||
// new volumeID:
|
||||
// nfs-server.default.svc.cluster.local#share#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
|
||||
// nfs-server.default.svc.cluster.local#share#subdir#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
|
||||
// nfs-server.default.svc.cluster.local#share#subdir#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64#retain
|
||||
// old volumeID: nfs-server.default.svc.cluster.local/share/pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
|
||||
func getNfsVolFromID(id string) (*nfsVolume, error) {
|
||||
var server, baseDir, subDir, uuid string
|
||||
var server, baseDir, subDir, uuid, onDelete string
|
||||
segments := strings.Split(id, separator)
|
||||
if len(segments) < 3 {
|
||||
klog.V(2).Infof("could not split %s into server, baseDir and subDir with separator(%s)", id, separator)
|
||||
@ -473,14 +498,18 @@ func getNfsVolFromID(id string) (*nfsVolume, error) {
|
||||
if len(segments) >= 4 {
|
||||
uuid = segments[3]
|
||||
}
|
||||
if len(segments) >= 5 {
|
||||
onDelete = segments[4]
|
||||
}
|
||||
}
|
||||
|
||||
return &nfsVolume{
|
||||
id: id,
|
||||
server: server,
|
||||
baseDir: baseDir,
|
||||
subDir: subDir,
|
||||
uuid: uuid,
|
||||
id: id,
|
||||
server: server,
|
||||
baseDir: baseDir,
|
||||
subDir: subDir,
|
||||
uuid: uuid,
|
||||
onDelete: onDelete,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@ -35,15 +35,18 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
testServer = "test-server"
|
||||
testBaseDir = "test-base-dir"
|
||||
testBaseDirNested = "test/base/dir"
|
||||
testCSIVolume = "volume-name"
|
||||
testVolumeID = "test-server/test-base-dir/volume-name"
|
||||
newTestVolumeID = "test-server#test-base-dir#volume-name#"
|
||||
testVolumeIDNested = "test-server/test/base/dir/volume-name"
|
||||
newTestVolumeIDNested = "test-server#test/base/dir#volume-name#"
|
||||
newTestVolumeIDUUID = "test-server#test-base-dir#volume-name#uuid"
|
||||
testServer = "test-server"
|
||||
testBaseDir = "test-base-dir"
|
||||
testBaseDirNested = "test/base/dir"
|
||||
testCSIVolume = "volume-name"
|
||||
testVolumeID = "test-server/test-base-dir/volume-name"
|
||||
newTestVolumeID = "test-server#test-base-dir#volume-name##"
|
||||
newTestVolumeWithVolumeID = "test-server#test-base-dir#volume-name#volume-name#"
|
||||
testVolumeIDNested = "test-server/test/base/dir/volume-name"
|
||||
newTestVolumeIDNested = "test-server#test/base/dir#volume-name#"
|
||||
newTestVolumeIDUUID = "test-server#test-base-dir#volume-name#uuid"
|
||||
newTestVolumeOnDeleteRetain = "test-server#test-base-dir#volume-name#uuid#retain"
|
||||
newTestVolumeOnDeleteDelete = "test-server#test-base-dir#volume-name#uuid#delete"
|
||||
)
|
||||
|
||||
func initTestController(t *testing.T) *ControllerServer {
|
||||
@ -136,7 +139,7 @@ func TestCreateVolume(t *testing.T) {
|
||||
},
|
||||
resp: &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: newTestVolumeID + testCSIVolume,
|
||||
VolumeId: newTestVolumeWithVolumeID,
|
||||
VolumeContext: map[string]string{
|
||||
paramServer: testServer,
|
||||
paramShare: testBaseDir,
|
||||
@ -242,32 +245,44 @@ func TestCreateVolume(t *testing.T) {
|
||||
|
||||
func TestDeleteVolume(t *testing.T) {
|
||||
cases := []struct {
|
||||
desc string
|
||||
testOnWindows bool
|
||||
req *csi.DeleteVolumeRequest
|
||||
resp *csi.DeleteVolumeResponse
|
||||
expectedErr error
|
||||
desc string
|
||||
testOnWindows bool
|
||||
req *csi.DeleteVolumeRequest
|
||||
resp *csi.DeleteVolumeResponse
|
||||
expectedDeleteSubDir bool
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
desc: "Volume ID missing",
|
||||
testOnWindows: true,
|
||||
req: &csi.DeleteVolumeRequest{},
|
||||
resp: nil,
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
|
||||
desc: "Volume ID missing",
|
||||
testOnWindows: true,
|
||||
req: &csi.DeleteVolumeRequest{},
|
||||
resp: nil,
|
||||
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
|
||||
expectedDeleteSubDir: false,
|
||||
},
|
||||
{
|
||||
desc: "Valid request",
|
||||
testOnWindows: false,
|
||||
req: &csi.DeleteVolumeRequest{VolumeId: testVolumeID},
|
||||
resp: &csi.DeleteVolumeResponse{},
|
||||
expectedErr: nil,
|
||||
desc: "Valid request",
|
||||
testOnWindows: false,
|
||||
req: &csi.DeleteVolumeRequest{VolumeId: testVolumeID},
|
||||
resp: &csi.DeleteVolumeResponse{},
|
||||
expectedErr: nil,
|
||||
expectedDeleteSubDir: true,
|
||||
},
|
||||
{
|
||||
desc: "Valid request with newTestVolumeID",
|
||||
testOnWindows: true,
|
||||
req: &csi.DeleteVolumeRequest{VolumeId: newTestVolumeID},
|
||||
resp: &csi.DeleteVolumeResponse{},
|
||||
expectedErr: nil,
|
||||
desc: "Valid request with newTestVolumeID",
|
||||
testOnWindows: true,
|
||||
req: &csi.DeleteVolumeRequest{VolumeId: newTestVolumeID},
|
||||
resp: &csi.DeleteVolumeResponse{},
|
||||
expectedErr: nil,
|
||||
expectedDeleteSubDir: true,
|
||||
},
|
||||
{
|
||||
desc: "Valid request with onDelete:retain",
|
||||
testOnWindows: true,
|
||||
req: &csi.DeleteVolumeRequest{VolumeId: newTestVolumeOnDeleteRetain},
|
||||
resp: &csi.DeleteVolumeResponse{},
|
||||
expectedErr: nil,
|
||||
expectedDeleteSubDir: false,
|
||||
},
|
||||
}
|
||||
|
||||
@ -292,8 +307,13 @@ func TestDeleteVolume(t *testing.T) {
|
||||
if !reflect.DeepEqual(resp, test.resp) {
|
||||
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
|
||||
}
|
||||
if _, err := os.Stat(filepath.Join(cs.Driver.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) {
|
||||
t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc)
|
||||
|
||||
if _, err := os.Stat(filepath.Join(cs.Driver.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil {
|
||||
if !os.IsNotExist(err) && test.expectedDeleteSubDir {
|
||||
t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc)
|
||||
} else if os.IsNotExist(err) && !test.expectedDeleteSubDir {
|
||||
t.Errorf("test %q failed: expected volume subdirectory not deleted, it was deleted", test.desc)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -395,10 +415,11 @@ func TestNfsVolFromId(t *testing.T) {
|
||||
name: "valid request single baseDir with newTestVolumeID",
|
||||
volumeID: newTestVolumeID,
|
||||
resp: &nfsVolume{
|
||||
id: newTestVolumeID,
|
||||
server: testServer,
|
||||
baseDir: testBaseDir,
|
||||
subDir: testCSIVolume,
|
||||
id: newTestVolumeID,
|
||||
server: testServer,
|
||||
baseDir: testBaseDir,
|
||||
subDir: testCSIVolume,
|
||||
onDelete: "",
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
@ -436,6 +457,32 @@ func TestNfsVolFromId(t *testing.T) {
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid request nested ondelete retain",
|
||||
volumeID: newTestVolumeOnDeleteRetain,
|
||||
resp: &nfsVolume{
|
||||
id: newTestVolumeOnDeleteRetain,
|
||||
server: testServer,
|
||||
baseDir: testBaseDir,
|
||||
subDir: testCSIVolume,
|
||||
uuid: "uuid",
|
||||
onDelete: "retain",
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid request nested ondelete delete",
|
||||
volumeID: newTestVolumeOnDeleteDelete,
|
||||
resp: &nfsVolume{
|
||||
id: newTestVolumeOnDeleteDelete,
|
||||
server: testServer,
|
||||
baseDir: testBaseDir,
|
||||
subDir: testCSIVolume,
|
||||
uuid: "uuid",
|
||||
onDelete: "delete",
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
@ -556,12 +603,13 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
paramSubDir: "subdir",
|
||||
},
|
||||
expectVol: &nfsVolume{
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir#pv-name",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "subdir",
|
||||
size: 100,
|
||||
uuid: "pv-name",
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir#pv-name#",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "subdir",
|
||||
size: 100,
|
||||
uuid: "pv-name",
|
||||
onDelete: "delete",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -577,12 +625,13 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
pvNameKey: "pvname",
|
||||
},
|
||||
expectVol: &nfsVolume{
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir-pvcname-pvcnamespace-pvname#pv-name",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "subdir-pvcname-pvcnamespace-pvname",
|
||||
size: 100,
|
||||
uuid: "pv-name",
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir-pvcname-pvcnamespace-pvname#pv-name#",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "subdir-pvcname-pvcnamespace-pvname",
|
||||
size: 100,
|
||||
uuid: "pv-name",
|
||||
onDelete: "delete",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -594,12 +643,13 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
paramShare: "share",
|
||||
},
|
||||
expectVol: &nfsVolume{
|
||||
id: "nfs-server.default.svc.cluster.local#share#pv-name#",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "pv-name",
|
||||
size: 200,
|
||||
uuid: "",
|
||||
id: "nfs-server.default.svc.cluster.local#share#pv-name##",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "pv-name",
|
||||
size: 200,
|
||||
uuid: "",
|
||||
onDelete: "delete",
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -608,10 +658,20 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
expectVol: nil,
|
||||
expectErr: fmt.Errorf("%s is a required parameter", paramServer),
|
||||
},
|
||||
{
|
||||
desc: "invalid onDelete value",
|
||||
params: map[string]string{
|
||||
paramServer: "//nfs-server.default.svc.cluster.local",
|
||||
paramShare: "share",
|
||||
paramOnDelete: "invalid",
|
||||
},
|
||||
expectVol: nil,
|
||||
expectErr: fmt.Errorf("invalid value %s for OnDelete, supported values are %v", "invalid", supportedOnDeleteValues),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range cases {
|
||||
vol, err := newNFSVolume(test.name, test.size, test.params)
|
||||
vol, err := newNFSVolume(test.name, test.size, test.params, "delete")
|
||||
if !reflect.DeepEqual(err, test.expectErr) {
|
||||
t.Errorf("[test: %s] Unexpected error: %v, expected error: %v", test.desc, err, test.expectErr)
|
||||
}
|
||||
|
||||
@ -27,20 +27,22 @@ import (
|
||||
|
||||
// DriverOptions defines driver parameters specified in driver deployment
|
||||
type DriverOptions struct {
|
||||
NodeID string
|
||||
DriverName string
|
||||
Endpoint string
|
||||
MountPermissions uint64
|
||||
WorkingMountDir string
|
||||
NodeID string
|
||||
DriverName string
|
||||
Endpoint string
|
||||
MountPermissions uint64
|
||||
WorkingMountDir string
|
||||
DefaultOnDeletePolicy string
|
||||
}
|
||||
|
||||
type Driver struct {
|
||||
name string
|
||||
nodeID string
|
||||
version string
|
||||
endpoint string
|
||||
mountPermissions uint64
|
||||
workingMountDir string
|
||||
name string
|
||||
nodeID string
|
||||
version string
|
||||
endpoint string
|
||||
mountPermissions uint64
|
||||
workingMountDir string
|
||||
defaultOnDeletePolicy string
|
||||
|
||||
//ids *identityServer
|
||||
ns *NodeServer
|
||||
@ -59,6 +61,7 @@ const (
|
||||
// "base" instead of "/base"
|
||||
paramShare = "share"
|
||||
paramSubDir = "subdir"
|
||||
paramOnDelete = "ondelete"
|
||||
mountOptionsField = "mountoptions"
|
||||
mountPermissionsField = "mountpermissions"
|
||||
pvcNameKey = "csi.storage.k8s.io/pvc/name"
|
||||
|
||||
@ -32,6 +32,24 @@ import (
|
||||
netutil "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
separator = "#"
|
||||
delete = "delete"
|
||||
retain = "retain"
|
||||
)
|
||||
|
||||
var supportedOnDeleteValues = []string{"", delete, retain}
|
||||
|
||||
func validateOnDeleteValue(onDelete string) error {
|
||||
for _, v := range supportedOnDeleteValues {
|
||||
if strings.EqualFold(v, onDelete) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid value %s for OnDelete, supported values are %v", onDelete, supportedOnDeleteValues)
|
||||
}
|
||||
|
||||
func NewDefaultIdentityServer(d *Driver) *IdentityServer {
|
||||
return &IdentityServer{
|
||||
Driver: d,
|
||||
|
||||
@ -301,3 +301,49 @@ func TestSetKeyValueInMap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateOnDeleteValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
desc string
|
||||
onDelete string
|
||||
expected error
|
||||
}{
|
||||
{
|
||||
desc: "empty value",
|
||||
onDelete: "",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "delete value",
|
||||
onDelete: "delete",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "retain value",
|
||||
onDelete: "retain",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "Retain value",
|
||||
onDelete: "Retain",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "Delete value",
|
||||
onDelete: "Delete",
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "invalid value",
|
||||
onDelete: "invalid",
|
||||
expected: fmt.Errorf("invalid value %s for OnDelete, supported values are %v", "invalid", supportedOnDeleteValues),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := validateOnDeleteValue(test.onDelete)
|
||||
if !reflect.DeepEqual(result, test.expected) {
|
||||
t.Errorf("test[%s]: unexpected output: %v, expected result: %v", test.desc, result, test.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -325,4 +325,27 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
|
||||
}
|
||||
test.Run(cs, ns)
|
||||
})
|
||||
|
||||
ginkgo.It("should create a volume on demand with retaining subdir on delete [nfs.csi.k8s.io]", func() {
|
||||
pods := []testsuites.PodDetails{
|
||||
{
|
||||
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
|
||||
Volumes: []testsuites.VolumeDetails{
|
||||
{
|
||||
ClaimSize: "10Gi",
|
||||
VolumeMount: testsuites.VolumeMountDetails{
|
||||
NameGenerate: "test-volume-",
|
||||
MountPathGenerate: "/mnt/test-",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
|
||||
CSIDriver: testDriver,
|
||||
Pods: pods,
|
||||
StorageClassParameters: retainStorageClassParameters,
|
||||
}
|
||||
test.Run(cs, ns)
|
||||
})
|
||||
})
|
||||
|
||||
@ -67,6 +67,14 @@ var (
|
||||
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
|
||||
"mountPermissions": "0755",
|
||||
}
|
||||
retainStorageClassParameters = map[string]string{
|
||||
"server": nfsServerAddress,
|
||||
"share": nfsShare,
|
||||
"csi.storage.k8s.io/provisioner-secret-name": "mount-options",
|
||||
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
|
||||
"mountPermissions": "0755",
|
||||
"onDelete": "retain",
|
||||
}
|
||||
controllerServer *nfs.ControllerServer
|
||||
)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user