feat: add mountOptions parameter for inline volume
fix test failure
This commit is contained in:
parent
60285578cf
commit
e86dc68c2a
2
Makefile
2
Makefile
@ -42,7 +42,7 @@ REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g")
|
||||
IMAGE_TAG = $(REGISTRY)/$(IMAGENAME):$(IMAGE_VERSION)
|
||||
IMAGE_TAG_LATEST = $(REGISTRY)/$(IMAGENAME):latest
|
||||
|
||||
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always
|
||||
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always --set feature.enableInlineVolume=true
|
||||
E2E_HELM_OPTIONS += ${EXTRA_HELM_OPTIONS}
|
||||
|
||||
# Output type of docker buildx build
|
||||
|
||||
@ -40,7 +40,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
|
||||
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
|
||||
| `driver.mountPermissions` | mounted folder permissions name | `0777`
|
||||
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
|
||||
| `feature.enableInlineVolume` | enable inline volume | `true` |
|
||||
| `feature.enableInlineVolume` | enable inline volume | `false` |
|
||||
| `image.nfs.repository` | csi-driver-nfs image | `mcr.microsoft.com/k8s/csi/nfs-csi` |
|
||||
| `image.nfs.tag` | csi-driver-nfs image tag | `latest` |
|
||||
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |
|
||||
|
||||
Binary file not shown.
@ -30,7 +30,7 @@ driver:
|
||||
|
||||
feature:
|
||||
enableFSGroupPolicy: false
|
||||
enableInlineVolume: true
|
||||
enableInlineVolume: false
|
||||
|
||||
controller:
|
||||
name: csi-nfs-controller
|
||||
|
||||
@ -23,3 +23,4 @@ spec:
|
||||
volumeAttributes:
|
||||
server: nfs-server.default.svc.cluster.local # required
|
||||
share: / # required
|
||||
mountOptions: "nfsvers=4.1,sec=sys" # optional
|
||||
|
||||
@ -38,7 +38,8 @@ type NodeServer struct {
|
||||
|
||||
// NodePublishVolume mount the volume
|
||||
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
if req.GetVolumeCapability() == nil {
|
||||
volCap := req.GetVolumeCapability()
|
||||
if volCap == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
|
||||
}
|
||||
volumeID := req.GetVolumeId()
|
||||
@ -49,6 +50,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
if len(targetPath) == 0 {
|
||||
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
|
||||
}
|
||||
mountOptions := volCap.GetMount().GetMountFlags()
|
||||
if req.GetReadonly() {
|
||||
mountOptions = append(mountOptions, "ro")
|
||||
}
|
||||
|
||||
var server, baseDir string
|
||||
for k, v := range req.GetVolumeContext() {
|
||||
@ -57,6 +62,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
server = v
|
||||
case paramShare:
|
||||
baseDir = v
|
||||
case mountOptionsField:
|
||||
if v != "" {
|
||||
mountOptions = append(mountOptions, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,11 +92,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
|
||||
if req.GetReadonly() {
|
||||
mountOptions = append(mountOptions, "ro")
|
||||
}
|
||||
|
||||
klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
|
||||
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
|
||||
if err != nil {
|
||||
|
||||
@ -275,11 +275,12 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
|
||||
}
|
||||
|
||||
test := testsuites.DynamicallyProvisionedInlineVolumeTest{
|
||||
CSIDriver: testDriver,
|
||||
Pods: pods,
|
||||
Server: nfsServerAddress,
|
||||
Share: nfsShare,
|
||||
ReadOnly: false,
|
||||
CSIDriver: testDriver,
|
||||
Pods: pods,
|
||||
Server: nfsServerAddress,
|
||||
Share: nfsShare,
|
||||
MountOptions: "nfsvers=4.1,sec=sys",
|
||||
ReadOnly: false,
|
||||
}
|
||||
test.Run(cs, ns)
|
||||
})
|
||||
|
||||
@ -27,18 +27,19 @@ import (
|
||||
// Waiting for the PV provisioner to create an inline volume
|
||||
// Testing if the Pod(s) Cmd is run with a 0 exit code
|
||||
type DynamicallyProvisionedInlineVolumeTest struct {
|
||||
CSIDriver driver.DynamicPVTestDriver
|
||||
Pods []PodDetails
|
||||
Server string
|
||||
Share string
|
||||
ReadOnly bool
|
||||
CSIDriver driver.DynamicPVTestDriver
|
||||
Pods []PodDetails
|
||||
Server string
|
||||
Share string
|
||||
MountOptions string
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
|
||||
for _, pod := range t.Pods {
|
||||
var tpod *TestPod
|
||||
var cleanup []func()
|
||||
tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.ReadOnly)
|
||||
tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.MountOptions, t.ReadOnly)
|
||||
// defer must be called here for resources not get removed before using them
|
||||
for i := range cleanup {
|
||||
defer cleanup[i]()
|
||||
|
||||
@ -123,11 +123,11 @@ func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, names
|
||||
return tpod, cleanupFuncs
|
||||
}
|
||||
|
||||
func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share string, readOnly bool) (*TestPod, []func()) {
|
||||
func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share, mountOptions string, readOnly bool) (*TestPod, []func()) {
|
||||
tpod := NewTestPod(client, namespace, pod.Cmd)
|
||||
cleanupFuncs := make([]func(), 0)
|
||||
for n, v := range pod.Volumes {
|
||||
tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, readOnly)
|
||||
tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, mountOptions, readOnly)
|
||||
}
|
||||
return tpod, cleanupFuncs
|
||||
}
|
||||
|
||||
@ -596,7 +596,7 @@ func (t *TestPod) SetupVolumeMountWithSubpath(pvc *v1.PersistentVolumeClaim, nam
|
||||
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
|
||||
}
|
||||
|
||||
func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share string, readOnly bool) {
|
||||
func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share, mountOptions string, readOnly bool) {
|
||||
volumeMount := v1.VolumeMount{
|
||||
Name: name,
|
||||
MountPath: mountPath,
|
||||
@ -610,8 +610,9 @@ func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share string, re
|
||||
CSI: &v1.CSIVolumeSource{
|
||||
Driver: nfs.DefaultDriverName,
|
||||
VolumeAttributes: map[string]string{
|
||||
"server": server,
|
||||
"share": share,
|
||||
"server": server,
|
||||
"share": share,
|
||||
"mountOptions": mountOptions,
|
||||
},
|
||||
ReadOnly: &readOnly,
|
||||
},
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user