Merge pull request #266 from andyzhangx/inline-volume

feat: add inline volume support
This commit is contained in:
Andy Zhang 2022-01-13 10:09:59 +08:00 committed by GitHub
commit fb28204a23
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 163 additions and 10 deletions

View File

@ -42,7 +42,7 @@ REGISTRY_NAME ?= $(shell echo $(REGISTRY) | sed "s/.azurecr.io//g")
IMAGE_TAG = $(REGISTRY)/$(IMAGENAME):$(IMAGE_VERSION)
IMAGE_TAG_LATEST = $(REGISTRY)/$(IMAGENAME):latest
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always
E2E_HELM_OPTIONS ?= --set image.nfs.repository=$(REGISTRY)/$(IMAGENAME) --set image.nfs.tag=$(IMAGE_VERSION) --set image.nfs.pullPolicy=Always --set feature.enableInlineVolume=true
E2E_HELM_OPTIONS += ${EXTRA_HELM_OPTIONS}
# Output type of docker buildx build

View File

@ -40,6 +40,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `driver.mountPermissions` | mounted folder permissions name | `0777`
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
| `feature.enableInlineVolume` | enable inline volume | `false` |
| `image.nfs.repository` | csi-driver-nfs image | `mcr.microsoft.com/k8s/csi/nfs-csi` |
| `image.nfs.tag` | csi-driver-nfs image tag | `latest` |
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |

View File

@ -6,6 +6,9 @@ spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
{{- if .Values.feature.enableInlineVolume}}
- Ephemeral
{{- end}}
{{- if .Values.feature.enableFSGroupPolicy}}
fsGroupPolicy: File
{{- end}}

View File

@ -30,6 +30,7 @@ driver:
feature:
enableFSGroupPolicy: false
enableInlineVolume: false
controller:
name: csi-nfs-controller

View File

@ -7,3 +7,4 @@ spec:
attachRequired: false
volumeLifecycleModes:
- Persistent
- Ephemeral

View File

@ -0,0 +1,26 @@
---
kind: Pod
apiVersion: v1
metadata:
name: nginx-pod-inline-volume
spec:
nodeSelector:
"kubernetes.io/os": linux
containers:
- image: mcr.microsoft.com/oss/nginx/nginx:1.19.5
name: nginx-nfs
command:
- "/bin/bash"
- "-c"
- set -euo pipefail; while true; do echo $(date) >> /mnt/nfs/outfile; sleep 1; done
volumeMounts:
- name: persistent-storage
mountPath: "/mnt/nfs"
volumes:
- name: persistent-storage
csi:
driver: nfs.csi.k8s.io
volumeAttributes:
server: nfs-server.default.svc.cluster.local # required
share: / # required
mountOptions: "nfsvers=4.1,sec=sys" # optional

View File

@ -20,7 +20,7 @@ rollout_and_wait() {
trap "echo \"Failed to apply config \\\"$1\\\"\" >&2" err
APPNAME=$(kubectl apply -f $1 | grep -E "^(:?daemonset|deployment|statefulset|pod)" | awk '{printf $1}')
if [[ -n $(expr "${APPNAME}" : "\(daemonset\|deployment\|statefulset\)" || true) ]]; then
if [[ -n $(expr "${APPNAME}" : "\(daemonset\|deployment\|statefulset\|pod\)" || true) ]]; then
kubectl rollout status $APPNAME --watch --timeout=5m
else
kubectl wait "${APPNAME}" --for condition=ready --timeout=5m

View File

@ -38,7 +38,8 @@ type NodeServer struct {
// NodePublishVolume mount the volume
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
if req.GetVolumeCapability() == nil {
volCap := req.GetVolumeCapability()
if volCap == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
volumeID := req.GetVolumeId()
@ -49,6 +50,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
if len(targetPath) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path not provided")
}
mountOptions := volCap.GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
var server, baseDir string
for k, v := range req.GetVolumeContext() {
@ -57,6 +62,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
server = v
case paramShare:
baseDir = v
case mountOptionsField:
if v != "" {
mountOptions = append(mountOptions, v)
}
}
}
@ -83,11 +92,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return &csi.NodePublishVolumeResponse{}, nil
}
mountOptions := req.GetVolumeCapability().GetMount().GetMountFlags()
if req.GetReadonly() {
mountOptions = append(mountOptions, "ro")
}
klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
if err != nil {

View File

@ -257,6 +257,33 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}
test.Run(cs, ns)
})
ginkgo.It("should create a CSI inline volume [nfs.csi.k8s.io]", func() {
pods := []testsuites.PodDetails{
{
Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"),
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
},
}
test := testsuites.DynamicallyProvisionedInlineVolumeTest{
CSIDriver: testDriver,
Pods: pods,
Server: nfsServerAddress,
Share: nfsShare,
MountOptions: "nfsvers=4.1,sec=sys",
ReadOnly: false,
}
test.Run(cs, ns)
})
})
func restClient(group string, version string) (restclientset.Interface, error) {

View File

@ -37,6 +37,8 @@ import (
const (
kubeconfigEnvVar = "KUBECONFIG"
testWindowsEnvVar = "TEST_WINDOWS"
nfsServerAddress = "nfs-server.default.svc.cluster.local"
nfsShare = "/"
)
var (
@ -44,8 +46,8 @@ var (
nfsDriver *nfs.Driver
isWindowsCluster = os.Getenv(testWindowsEnvVar) != ""
defaultStorageClassParameters = map[string]string{
"server": "nfs-server.default.svc.cluster.local",
"share": "/",
"server": nfsServerAddress,
"share": nfsShare,
"csi.storage.k8s.io/provisioner-secret-name": "mount-options",
"csi.storage.k8s.io/provisioner-secret-namespace": "default",
}

View File

@ -0,0 +1,54 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
)
// DynamicallyProvisionedInlineVolumeTest will provision required server, share
// Waiting for the PV provisioner to create an inline volume
// Testing if the Pod(s) Cmd is run with a 0 exit code
type DynamicallyProvisionedInlineVolumeTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
Server string
Share string
MountOptions string
ReadOnly bool
}
func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
var tpod *TestPod
var cleanup []func()
tpod, cleanup = pod.SetupWithCSIInlineVolumes(client, namespace, t.CSIDriver, t.Server, t.Share, t.MountOptions, t.ReadOnly)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i]()
}
ginkgo.By("deploying the pod")
tpod.Create()
defer tpod.Cleanup()
ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess()
}
}

View File

@ -123,6 +123,15 @@ func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, names
return tpod, cleanupFuncs
}
func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, server, share, mountOptions string, readOnly bool) (*TestPod, []func()) {
tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0)
for n, v := range pod.Volumes {
tpod.SetupCSIInlineVolume(fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), server, share, mountOptions, readOnly)
}
return tpod, cleanupFuncs
}
func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func()) {
cleanupFuncs := make([]func(), 0)
volume := pod.Volumes[0]

View File

@ -595,3 +595,28 @@ func (t *TestPod) SetupVolumeMountWithSubpath(pvc *v1.PersistentVolumeClaim, nam
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
}
func (t *TestPod) SetupCSIInlineVolume(name, mountPath, server, share, mountOptions string, readOnly bool) {
volumeMount := v1.VolumeMount{
Name: name,
MountPath: mountPath,
ReadOnly: readOnly,
}
t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount)
volume := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: nfs.DefaultDriverName,
VolumeAttributes: map[string]string{
"server": server,
"share": share,
"mountOptions": mountOptions,
},
ReadOnly: &readOnly,
},
},
}
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
}