chore: refine onDelete sub dir feature
This commit is contained in:
parent
226ef17d02
commit
53b850ec68
@ -66,6 +66,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
|
||||
| `controller.runOnMaster` | run controller on master node(deprecated on k8s 1.25+) |`false` |
|
||||
| `controller.runOnControlPlane` | run controller on control plane node |`false` |
|
||||
| `controller.dnsPolicy` | dnsPolicy of controller driver, available values: `Default`, `ClusterFirstWithHostNet`, `ClusterFirst` | `Default` |
|
||||
| `controller.defaultOnDeletePolicy` | default policy for deleting subdirectory when deleting a volume, available values: `delete`, `retain` | `delete` |
|
||||
| `controller.logLevel` | controller driver log level |`5` |
|
||||
| `controller.workingMountDir` | working directory for provisioner to mount nfs shares temporarily | `/tmp` |
|
||||
| `controller.affinity` | controller pod affinity | `{}` |
|
||||
|
||||
@ -93,6 +93,7 @@ spec:
|
||||
- "--drivername={{ .Values.driver.name }}"
|
||||
- "--mount-permissions={{ .Values.driver.mountPermissions }}"
|
||||
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
|
||||
- "--default-ondelete-policy={{ .Values.controller.defaultOnDeletePolicy }}"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
|
||||
@ -44,8 +44,9 @@ controller:
|
||||
livenessProbe:
|
||||
healthPort: 29652
|
||||
logLevel: 5
|
||||
workingMountDir: "/tmp"
|
||||
workingMountDir: /tmp
|
||||
dnsPolicy: Default # available values: Default, ClusterFirstWithHostNet, ClusterFirst
|
||||
defaultOnDeletePolicy: delete # available values: delete, retain
|
||||
affinity: {}
|
||||
nodeSelector: {}
|
||||
tolerations:
|
||||
|
||||
@ -31,7 +31,7 @@ var (
|
||||
mountPermissions = flag.Uint64("mount-permissions", 0, "mounted folder permissions")
|
||||
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
|
||||
workingMountDir = flag.String("working-mount-dir", "/tmp", "working directory for provisioner to mount nfs shares temporarily")
|
||||
defaultOnDeletePolicy = flag.String("default-ondelete-policy", "delete", "default policy for deleting subdirectories when deleting a volume")
|
||||
defaultOnDeletePolicy = flag.String("default-ondelete-policy", "", "default policy for deleting subdirectory when deleting a volume")
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
||||
@ -10,6 +10,7 @@ server | NFS Server address | domain name `nfs-server.default.svc.cluster.local`
|
||||
share | NFS share path | `/` | Yes |
|
||||
subDir | sub directory under nfs share | | No | if sub directory does not exist, this driver would create a new one
|
||||
mountPermissions | mounted folder permissions. The default is `0`, if set as non-zero, driver will perform `chmod` after mount | | No |
|
||||
onDelete | when volume is deleted, keep the directory if it's `retain` | `delete`(default), `retain` | No | `delete`
|
||||
|
||||
- VolumeID(`volumeHandle`) is the identifier of the volume handled by the driver, format of VolumeID:
|
||||
```
|
||||
|
||||
@ -74,8 +74,6 @@ const (
|
||||
totalIDElements // Always last
|
||||
)
|
||||
|
||||
const separator = "#"
|
||||
|
||||
// CreateVolume create a volume
|
||||
func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
name := req.GetName()
|
||||
@ -195,9 +193,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
nfsVol.onDelete = cs.Driver.defaultOnDeletePolicy
|
||||
}
|
||||
|
||||
deleteSubdirOnVolumeDelete := nfsVol.onDelete != "retain"
|
||||
|
||||
if deleteSubdirOnVolumeDelete {
|
||||
if !strings.EqualFold(nfsVol.onDelete, retain) {
|
||||
// mount nfs base share so we can delete the subdirectory
|
||||
if err = cs.internalMount(ctx, nfsVol, nil, volCap); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
|
||||
@ -425,16 +421,12 @@ func newNFSVolume(name string, size int64, params map[string]string, defaultOnDe
|
||||
vol.uuid = name
|
||||
}
|
||||
|
||||
if onDelete == "" {
|
||||
if defaultOnDeletePolicy == "" {
|
||||
vol.onDelete = "delete"
|
||||
} else {
|
||||
vol.onDelete = defaultOnDeletePolicy
|
||||
}
|
||||
} else {
|
||||
if (onDelete != "retain") && (onDelete != "delete") {
|
||||
return nil, fmt.Errorf("%v is not a valid value for %v", onDelete, paramOnDelete)
|
||||
}
|
||||
if err := validateOnDeleteValue(onDelete); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vol.onDelete = defaultOnDeletePolicy
|
||||
if onDelete != "" {
|
||||
vol.onDelete = onDelete
|
||||
}
|
||||
|
||||
@ -472,7 +464,9 @@ func getVolumeIDFromNfsVol(vol *nfsVolume) string {
|
||||
idElements[idBaseDir] = strings.Trim(vol.baseDir, "/")
|
||||
idElements[idSubDir] = strings.Trim(vol.subDir, "/")
|
||||
idElements[idUUID] = vol.uuid
|
||||
idElements[idOnDelete] = vol.onDelete
|
||||
if strings.EqualFold(vol.onDelete, retain) {
|
||||
idElements[idOnDelete] = vol.onDelete
|
||||
}
|
||||
return strings.Join(idElements, separator)
|
||||
}
|
||||
|
||||
@ -481,7 +475,7 @@ func getVolumeIDFromNfsVol(vol *nfsVolume) string {
|
||||
//
|
||||
// new volumeID:
|
||||
// nfs-server.default.svc.cluster.local#share#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
|
||||
// nfs-server.default.svc.cluster.local#share#subdir#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
|
||||
// nfs-server.default.svc.cluster.local#share#subdir#pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64#retain
|
||||
// old volumeID: nfs-server.default.svc.cluster.local/share/pvc-4bcbf944-b6f7-4bd0-b50f-3c3dd00efc64
|
||||
func getNfsVolFromID(id string) (*nfsVolume, error) {
|
||||
var server, baseDir, subDir, uuid, onDelete string
|
||||
|
||||
@ -40,8 +40,8 @@ const (
|
||||
testBaseDirNested = "test/base/dir"
|
||||
testCSIVolume = "volume-name"
|
||||
testVolumeID = "test-server/test-base-dir/volume-name"
|
||||
newTestVolumeID = "test-server#test-base-dir#volume-name##delete"
|
||||
newTestVolumeWithVolumeID = "test-server#test-base-dir#volume-name#volume-name#delete"
|
||||
newTestVolumeID = "test-server#test-base-dir#volume-name##"
|
||||
newTestVolumeWithVolumeID = "test-server#test-base-dir#volume-name#volume-name#"
|
||||
testVolumeIDNested = "test-server/test/base/dir/volume-name"
|
||||
newTestVolumeIDNested = "test-server#test/base/dir#volume-name#"
|
||||
newTestVolumeIDUUID = "test-server#test-base-dir#volume-name#uuid"
|
||||
@ -419,7 +419,7 @@ func TestNfsVolFromId(t *testing.T) {
|
||||
server: testServer,
|
||||
baseDir: testBaseDir,
|
||||
subDir: testCSIVolume,
|
||||
onDelete: "delete",
|
||||
onDelete: "",
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
@ -603,7 +603,7 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
paramSubDir: "subdir",
|
||||
},
|
||||
expectVol: &nfsVolume{
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir#pv-name#delete",
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir#pv-name#",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "subdir",
|
||||
@ -625,7 +625,7 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
pvNameKey: "pvname",
|
||||
},
|
||||
expectVol: &nfsVolume{
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir-pvcname-pvcnamespace-pvname#pv-name#delete",
|
||||
id: "nfs-server.default.svc.cluster.local#share#subdir-pvcname-pvcnamespace-pvname#pv-name#",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "subdir-pvcname-pvcnamespace-pvname",
|
||||
@ -643,7 +643,7 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
paramShare: "share",
|
||||
},
|
||||
expectVol: &nfsVolume{
|
||||
id: "nfs-server.default.svc.cluster.local#share#pv-name##delete",
|
||||
id: "nfs-server.default.svc.cluster.local#share#pv-name##",
|
||||
server: "//nfs-server.default.svc.cluster.local",
|
||||
baseDir: "share",
|
||||
subDir: "pv-name",
|
||||
@ -659,14 +659,14 @@ func TestNewNFSVolume(t *testing.T) {
|
||||
expectErr: fmt.Errorf("%s is a required parameter", paramServer),
|
||||
},
|
||||
{
|
||||
desc: "invalid onDelete value",
|
||||
params: map[string]string{
|
||||
paramServer: "//nfs-server.default.svc.cluster.local",
|
||||
paramShare: "share",
|
||||
desc: "invalid onDelete value",
|
||||
params: map[string]string{
|
||||
paramServer: "//nfs-server.default.svc.cluster.local",
|
||||
paramShare: "share",
|
||||
paramOnDelete: "invalid",
|
||||
},
|
||||
expectVol: nil,
|
||||
expectErr: fmt.Errorf("invalid is not a valid value for %s", paramOnDelete),
|
||||
expectErr: fmt.Errorf("invalid value %s for OnDelete, supported values are %v", "invalid", supportedOnDeleteValues),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@ -32,6 +32,24 @@ import (
|
||||
netutil "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
separator = "#"
|
||||
delete = "delete"
|
||||
retain = "retain"
|
||||
)
|
||||
|
||||
var supportedOnDeleteValues = []string{"", delete, retain}
|
||||
|
||||
func validateOnDeleteValue(onDelete string) error {
|
||||
for _, v := range supportedOnDeleteValues {
|
||||
if strings.EqualFold(v, onDelete) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid value %s for OnDelete, supported values are %v", onDelete, supportedOnDeleteValues)
|
||||
}
|
||||
|
||||
func NewDefaultIdentityServer(d *Driver) *IdentityServer {
|
||||
return &IdentityServer{
|
||||
Driver: d,
|
||||
|
||||
@ -325,4 +325,27 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
|
||||
}
|
||||
test.Run(cs, ns)
|
||||
})
|
||||
|
||||
ginkgo.It("should create a volume on demand with retaining subdir on delete [nfs.csi.k8s.io]", func() {
|
||||
pods := []testsuites.PodDetails{
|
||||
{
|
||||
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
|
||||
Volumes: []testsuites.VolumeDetails{
|
||||
{
|
||||
ClaimSize: "10Gi",
|
||||
VolumeMount: testsuites.VolumeMountDetails{
|
||||
NameGenerate: "test-volume-",
|
||||
MountPathGenerate: "/mnt/test-",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
test := testsuites.DynamicallyProvisionedCmdVolumeTest{
|
||||
CSIDriver: testDriver,
|
||||
Pods: pods,
|
||||
StorageClassParameters: retainStorageClassParameters,
|
||||
}
|
||||
test.Run(cs, ns)
|
||||
})
|
||||
})
|
||||
|
||||
@ -67,6 +67,14 @@ var (
|
||||
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
|
||||
"mountPermissions": "0755",
|
||||
}
|
||||
retainStorageClassParameters = map[string]string{
|
||||
"server": nfsServerAddress,
|
||||
"share": nfsShare,
|
||||
"csi.storage.k8s.io/provisioner-secret-name": "mount-options",
|
||||
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
|
||||
"mountPermissions": "0755",
|
||||
"onDelete": "retain",
|
||||
}
|
||||
controllerServer *nfs.ControllerServer
|
||||
)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user