feat: add mountOptions parameter for inline volume

fix test failure
This commit is contained in:
andyzhangx 2022-01-11 13:08:58 +00:00
parent 60285578cf
commit e86dc68c2a
10 changed files with 33 additions and 25 deletions

View File

@ -42,7 +42,7 @@ REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g")
IMAGE_TAG = $(REGISTRY)/$(IMAGENAME):$(IMAGE_VERSION) IMAGE_TAG = $(REGISTRY)/$(IMAGENAME):$(IMAGE_VERSION)
IMAGE_TAG_LATEST = $(REGISTRY)/$(IMAGENAME):latest IMAGE_TAG_LATEST = $(REGISTRY)/$(IMAGENAME):latest
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always --set feature.enableInlineVolume=true
E2E_HELM_OPTIONS += ${EXTRA_HELM_OPTIONS} E2E_HELM_OPTIONS += ${EXTRA_HELM_OPTIONS}
# Output type of docker buildx build # Output type of docker buildx build

View File

@ -40,7 +40,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` | | `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `driver.mountPermissions` | mounted folder permissions name | `0777` | `driver.mountPermissions` | mounted folder permissions name | `0777`
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` | | `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
| `feature.enableInlineVolume` | enable inline volume | `true` | | `feature.enableInlineVolume` | enable inline volume | `false` |
| `image.nfs.repository` | csi-driver-nfs image | `mcr.microsoft.com/k8s/csi/nfs-csi` | | `image.nfs.repository` | csi-driver-nfs image | `mcr.microsoft.com/k8s/csi/nfs-csi` |
| `image.nfs.tag` | csi-driver-nfs image tag | `latest` | | `image.nfs.tag` | csi-driver-nfs image tag | `latest` |
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` | | `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |

View File

@ -30,7 +30,7 @@ driver:
feature: feature:
enableFSGroupPolicy: false enableFSGroupPolicy: false
enableInlineVolume: true enableInlineVolume: false
controller: controller:
name: csi-nfs-controller name: csi-nfs-controller

View File

@ -23,3 +23,4 @@ spec:
volumeAttributes: volumeAttributes:
server: nfs-server.default.svc.cluster.local # required server: nfs-server.default.svc.cluster.local # required
share: / # required share: / # required
mountOptions: "nfsvers=4.1,sec=sys" # optional

View File

@ -38,7 +38,8 @@ type NodeServer struct {
// NodePublishVolume mount the volume // NodePublishVolume mount the volume
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
if req.GetVolumeCapability() == nil { volCap := req.GetVolumeCapability()
if volCap == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request") return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
} }
volumeID := req.GetVolumeId() volumeID := req.GetVolumeId()
@ -49,6 +50,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if len(targetPath) == 0 { if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path not provided") return nil, status.Error(codes.InvalidArgument, "Target path not provided")
} }
mountOptions := volCap.GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
var server, baseDir string var server, baseDir string
for k, v := range req.GetVolumeContext() { for k, v := range req.GetVolumeContext() {
@ -57,6 +62,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
server = v server = v
case paramShare: case paramShare:
baseDir = v baseDir = v
case mountOptionsField:
if v != "" {
mountOptions = append(mountOptions, v)
}
} }
} }
@ -83,11 +92,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }
mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions) klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions) err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
if err != nil { if err != nil {

View File

@ -279,6 +279,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
Server: nfsServerAddress, Server: nfsServerAddress,
Share: nfsShare, Share: nfsShare,
MountOptions: "nfsvers=4.1,sec=sys",
ReadOnly: false, ReadOnly: false,
} }
test.Run(cs, ns) test.Run(cs, ns)

View File

@ -31,6 +31,7 @@ type DynamicallyProvisionedInlineVolumeTest struct {
Pods []PodDetails Pods []PodDetails
Server string Server string
Share string Share string
MountOptions string
ReadOnly bool ReadOnly bool
} }
@ -38,7 +39,7 @@ func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface,
for _, pod := range t.Pods { for _, pod := range t.Pods {
var tpod *TestPod var tpod *TestPod
var cleanup []func() var cleanup []func()
tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.ReadOnly) tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.MountOptions, t.ReadOnly)
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i]()

View File

@ -123,11 +123,11 @@ func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, names
return tpod, cleanupFuncs return tpod, cleanupFuncs
} }
func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share string, readOnly bool) (*TestPod, []func()) { func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share, mountOptions string, readOnly bool) (*TestPod, []func()) {
tpod := NewTestPod(client, namespace, pod.Cmd) tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0) cleanupFuncs := make([]func(), 0)
for n, v := range pod.Volumes { for n, v := range pod.Volumes {
tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, readOnly) tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, mountOptions, readOnly)
} }
return tpod, cleanupFuncs return tpod, cleanupFuncs
} }

View File

@ -596,7 +596,7 @@ func (t *TestPod) SetupVolumeMountWithSubpath(pvc *v1.PersistentVolumeClaim, nam
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume) t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
} }
func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share string, readOnly bool) { func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share, mountOptions string, readOnly bool) {
volumeMount := v1.VolumeMount{ volumeMount := v1.VolumeMount{
Name: name, Name: name,
MountPath: mountPath, MountPath: mountPath,
@ -612,6 +612,7 @@ func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share string, re
VolumeAttributes: map[string]string{ VolumeAttributes: map[string]string{
"server": server, "server": server,
"share": share, "share": share,
"mountOptions": mountOptions,
}, },
ReadOnly: &readOnly, ReadOnly: &readOnly,
}, },