Merge pull request #263 from andyzhangx/workingMountDir

feat: add workingMountDir in chart config
This commit is contained in:
Andy Zhang 2022-01-05 16:06:46 +08:00 committed by GitHub
commit 5ae2584cd7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 112 additions and 101 deletions

View File

@ -13,7 +13,7 @@ jobs:
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v2 uses: actions/setup-go@v2
with: with:
go-version: ^1.16 go-version: ^1.17
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory

View File

@ -58,6 +58,7 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| `controller.replicas` | the replicas of csi-nfs-controller | `2` | | `controller.replicas` | the replicas of csi-nfs-controller | `2` |
| `controller.runOnMaster` | run controller on master node | `false` | | `controller.runOnMaster` | run controller on master node | `false` |
| `controller.logLevel` | controller driver log level |`5` | | `controller.logLevel` | controller driver log level |`5` |
| `controller.workingMountDir` | working directory for provisioner to mount nfs shares temporarily | `/tmp` |
| `controller.tolerations` | controller pod tolerations | | | `controller.tolerations` | controller pod tolerations | |
| `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | 100Mi | | `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | 100Mi |
| `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | 10m | | `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | 10m |

View File

@ -73,6 +73,7 @@ spec:
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}" - "--drivername={{ .Values.driver.name }}"
- "--mount-permissions={{ .Values.driver.mountPermissions }}" - "--mount-permissions={{ .Values.driver.mountPermissions }}"
- "--working-mount-dir={{ .Values.controller.workingMountDir }}"
env: env:
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:

View File

@ -26,7 +26,7 @@ rbac:
driver: driver:
name: nfs.csi.k8s.io name: nfs.csi.k8s.io
mountPermissions: "0777" mountPermissions: 0777
feature: feature:
enableFSGroupPolicy: false enableFSGroupPolicy: false
@ -38,6 +38,7 @@ controller:
livenessProbe: livenessProbe:
healthPort: 29652 healthPort: 29652
logLevel: 5 logLevel: 5
workingMountDir: "/tmp"
tolerations: tolerations:
- key: "node-role.kubernetes.io/master" - key: "node-role.kubernetes.io/master"
operator: "Exists" operator: "Exists"

View File

@ -18,9 +18,7 @@ package main
import ( import (
"flag" "flag"
"fmt"
"os" "os"
"strconv"
"github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs" "github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs"
@ -28,10 +26,11 @@ import (
) )
var ( var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
nodeID = flag.String("nodeid", "", "node id") nodeID = flag.String("nodeid", "", "node id")
perm = flag.String("mount-permissions", "0777", "mounted folder permissions") mountPermissions = flag.Uint64("mount-permissions", 0777, "mounted folder permissions")
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver") driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
workingMountDir = flag.String("working-mount-dir", "/tmp", "working directory for provisioner to mount nfs shares temporarily")
) )
func init() { func init() {
@ -50,18 +49,13 @@ func main() {
} }
func handle() { func handle() {
// Converting string permission representation to *uint32 driverOptions := nfs.DriverOptions{
var parsedPerm *uint32 NodeID: *nodeID,
if perm != nil && *perm != "" { DriverName: *driverName,
permu64, err := strconv.ParseUint(*perm, 8, 32) Endpoint: *endpoint,
if err != nil { MountPermissions: *mountPermissions,
fmt.Fprintf(os.Stderr, "incorrect mount-permissions value: %q", *perm) WorkingMountDir: *workingMountDir,
os.Exit(1)
}
permu32 := uint32(permu64)
parsedPerm = &permu32
} }
d := nfs.NewDriver(&driverOptions)
d := nfs.NewDriver(*nodeID, *driverName, *endpoint, parsedPerm)
d.Run(false) d.Run(false)
} }

View File

@ -34,8 +34,6 @@ import (
// ControllerServer controller server setting // ControllerServer controller server setting
type ControllerServer struct { type ControllerServer struct {
Driver *Driver Driver *Driver
// Working directory for the provisioner to temporarily mount nfs shares at
workingMountDir string
} }
// nfsVolume is an internal representation of a volume // nfsVolume is an internal representation of a volume
@ -98,10 +96,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
} }
}() }()
fileMode := os.FileMode(0777) fileMode := os.FileMode(cs.Driver.mountPermissions)
if cs.Driver.perm != nil {
fileMode = os.FileMode(*cs.Driver.perm)
}
// Create subdirectory under base-dir // Create subdirectory under base-dir
internalVolumePath := cs.getInternalVolumePath(nfsVol) internalVolumePath := cs.getInternalVolumePath(nfsVol)
if err = os.Mkdir(internalVolumePath, fileMode); err != nil && !os.IsExist(err) { if err = os.Mkdir(internalVolumePath, fileMode); err != nil && !os.IsExist(err) {
@ -140,7 +135,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
} }
} }
// Mount nfs base share so we can delete the subdirectory // mount nfs base share so we can delete the subdirectory
if err = cs.internalMount(ctx, nfsVol, volCap); err != nil { if err = cs.internalMount(ctx, nfsVol, volCap); err != nil {
return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error()) return nil, status.Errorf(codes.Internal, "failed to mount nfs server: %v", err.Error())
} }
@ -150,7 +145,7 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
} }
}() }()
// Delete subdirectory under base-dir // delete subdirectory under base-dir
internalVolumePath := cs.getInternalVolumePath(nfsVol) internalVolumePath := cs.getInternalVolumePath(nfsVol)
klog.V(2).Infof("Removing subdirectory at %v", internalVolumePath) klog.V(2).Infof("Removing subdirectory at %v", internalVolumePath)
@ -293,10 +288,7 @@ func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume)
// Convert VolumeCreate parameters to an nfsVolume // Convert VolumeCreate parameters to an nfsVolume
func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume, error) { func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[string]string) (*nfsVolume, error) {
var ( var server, baseDir string
server string
baseDir string
)
// validate parameters (case-insensitive) // validate parameters (case-insensitive)
for k, v := range params { for k, v := range params {
@ -310,7 +302,6 @@ func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[str
} }
} }
// validate required parameters
if server == "" { if server == "" {
return nil, fmt.Errorf("%v is a required parameter", paramServer) return nil, fmt.Errorf("%v is a required parameter", paramServer)
} }
@ -331,11 +322,7 @@ func (cs *ControllerServer) newNFSVolume(name string, size int64, params map[str
// Get working directory for CreateVolume and DeleteVolume // Get working directory for CreateVolume and DeleteVolume
func (cs *ControllerServer) getInternalMountPath(vol *nfsVolume) string { func (cs *ControllerServer) getInternalMountPath(vol *nfsVolume) string {
// use default if empty return filepath.Join(cs.Driver.workingMountDir, vol.subDir)
if cs.workingMountDir == "" {
cs.workingMountDir = "/tmp"
}
return filepath.Join(cs.workingMountDir, vol.subDir)
} }
// Get internal path where the volume is created // Get internal path where the volume is created

View File

@ -47,12 +47,13 @@ var (
) )
func initTestController(t *testing.T) *ControllerServer { func initTestController(t *testing.T) *ControllerServer {
var perm *uint32
mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}} mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}}
driver := NewDriver("", "", "", perm) driver := NewDriver(&DriverOptions{
WorkingMountDir: "/tmp",
MountPermissions: 0777,
})
driver.ns = NewNodeServer(driver, mounter) driver.ns = NewNodeServer(driver, mounter)
cs := NewControllerServer(driver) cs := NewControllerServer(driver)
cs.workingMountDir = "/tmp"
return cs return cs
} }
@ -189,7 +190,7 @@ func TestCreateVolume(t *testing.T) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.name, resp, test.resp) t.Errorf("test %q failed: got resp %+v, expected %+v", test.name, resp, test.resp)
} }
if !test.expectErr { if !test.expectErr {
info, err := os.Stat(filepath.Join(cs.workingMountDir, test.req.Name, test.req.Name)) info, err := os.Stat(filepath.Join(cs.Driver.workingMountDir, test.req.Name, test.req.Name))
if err != nil { if err != nil {
t.Errorf("test %q failed: couldn't find volume subdirectory: %v", test.name, err) t.Errorf("test %q failed: couldn't find volume subdirectory: %v", test.name, err)
} }
@ -227,8 +228,8 @@ func TestDeleteVolume(t *testing.T) {
t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) {
// Setup // Setup
cs := initTestController(t) cs := initTestController(t)
_ = os.MkdirAll(filepath.Join(cs.workingMountDir, testCSIVolume), os.ModePerm) _ = os.MkdirAll(filepath.Join(cs.Driver.workingMountDir, testCSIVolume), os.ModePerm)
_, _ = os.Create(filepath.Join(cs.workingMountDir, testCSIVolume, testCSIVolume)) _, _ = os.Create(filepath.Join(cs.Driver.workingMountDir, testCSIVolume, testCSIVolume))
// Run // Run
resp, err := cs.DeleteVolume(context.TODO(), test.req) resp, err := cs.DeleteVolume(context.TODO(), test.req)
@ -243,7 +244,7 @@ func TestDeleteVolume(t *testing.T) {
if !reflect.DeepEqual(resp, test.resp) { if !reflect.DeepEqual(resp, test.resp) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp) t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
} }
if _, err := os.Stat(filepath.Join(cs.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) { if _, err := os.Stat(filepath.Join(cs.Driver.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) {
t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc) t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc)
} }
}) })

View File

@ -17,21 +17,27 @@ limitations under the License.
package nfs package nfs
import ( import (
"fmt"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"k8s.io/klog/v2" "k8s.io/klog/v2"
mount "k8s.io/mount-utils" mount "k8s.io/mount-utils"
) )
// DriverOptions defines driver parameters specified in driver deployment
type DriverOptions struct {
NodeID string
DriverName string
Endpoint string
MountPermissions uint64
WorkingMountDir string
}
type Driver struct { type Driver struct {
name string name string
nodeID string nodeID string
version string version string
endpoint string
endpoint string mountPermissions uint64
workingMountDir string
perm *uint32
//ids *identityServer //ids *identityServer
ns *NodeServer ns *NodeServer
@ -53,16 +59,17 @@ const (
mountOptionsField = "mountoptions" mountOptionsField = "mountoptions"
) )
func NewDriver(nodeID, driverName, endpoint string, perm *uint32) *Driver { func NewDriver(options *DriverOptions) *Driver {
klog.V(2).Infof("Driver: %v version: %v", driverName, driverVersion) klog.V(2).Infof("Driver: %v version: %v", options.DriverName, driverVersion)
n := &Driver{ n := &Driver{
name: driverName, name: options.DriverName,
version: driverVersion, version: driverVersion,
nodeID: nodeID, nodeID: options.NodeID,
endpoint: endpoint, endpoint: options.Endpoint,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{}, mountPermissions: options.MountPermissions,
perm: perm, workingMountDir: options.WorkingMountDir,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
} }
vcam := []csi.VolumeCapability_AccessMode_Mode{ vcam := []csi.VolumeCapability_AccessMode_Mode{
@ -102,7 +109,7 @@ func (n *Driver) Run(testMode bool) {
if err != nil { if err != nil {
klog.Fatalf("%v", err) klog.Fatalf("%v", err)
} }
klog.Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta) klog.V(2).Infof("\nDRIVER INFORMATION:\n-------------------\n%s\n\nStreaming logs below:", versionMeta)
n.ns = NewNodeServer(n, mount.New("")) n.ns = NewNodeServer(n, mount.New(""))
s := NewNonBlockingGRPCServer() s := NewNonBlockingGRPCServer()
@ -119,7 +126,6 @@ func (n *Driver) Run(testMode bool) {
func (n *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode { func (n *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_AccessMode_Mode) []*csi.VolumeCapability_AccessMode {
var vca []*csi.VolumeCapability_AccessMode var vca []*csi.VolumeCapability_AccessMode
for _, c := range vc { for _, c := range vc {
klog.Infof("Enabling volume access mode: %v", c.String())
vca = append(vca, &csi.VolumeCapability_AccessMode{Mode: c}) vca = append(vca, &csi.VolumeCapability_AccessMode{Mode: c})
n.cap[c] = true n.cap[c] = true
} }
@ -128,19 +134,15 @@ func (n *Driver) AddVolumeCapabilityAccessModes(vc []csi.VolumeCapability_Access
func (n *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) { func (n *Driver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {
var csc []*csi.ControllerServiceCapability var csc []*csi.ControllerServiceCapability
for _, c := range cl { for _, c := range cl {
klog.Infof("Enabling controller service capability: %v", c.String())
csc = append(csc, NewControllerServiceCapability(c)) csc = append(csc, NewControllerServiceCapability(c))
} }
n.cscap = csc n.cscap = csc
} }
func (n *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_Type) { func (n *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_Type) {
var nsc []*csi.NodeServiceCapability var nsc []*csi.NodeServiceCapability
for _, n := range nl { for _, n := range nl {
klog.Infof("Enabling node service capability: %v", n.String())
nsc = append(nsc, NewNodeServiceCapability(n)) nsc = append(nsc, NewNodeServiceCapability(n))
} }
n.nscap = nsc n.nscap = nsc
@ -148,6 +150,5 @@ func (n *Driver) AddNodeServiceCapabilities(nl []csi.NodeServiceCapability_RPC_T
func IsCorruptedDir(dir string) bool { func IsCorruptedDir(dir string) bool {
_, pathErr := mount.PathExists(dir) _, pathErr := mount.PathExists(dir)
fmt.Printf("IsCorruptedDir(%s) returned with error: %v", dir, pathErr)
return pathErr != nil && mount.IsCorruptedMnt(pathErr) return pathErr != nil && mount.IsCorruptedMnt(pathErr)
} }

View File

@ -32,7 +32,6 @@ const (
func NewEmptyDriver(emptyField string) *Driver { func NewEmptyDriver(emptyField string) *Driver {
var d *Driver var d *Driver
var perm *uint32
switch emptyField { switch emptyField {
case "version": case "version":
d = &Driver{ d = &Driver{
@ -40,7 +39,6 @@ func NewEmptyDriver(emptyField string) *Driver {
version: "", version: "",
nodeID: fakeNodeID, nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{}, cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
} }
case "name": case "name":
d = &Driver{ d = &Driver{
@ -48,7 +46,6 @@ func NewEmptyDriver(emptyField string) *Driver {
version: driverVersion, version: driverVersion,
nodeID: fakeNodeID, nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{}, cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
} }
default: default:
d = &Driver{ d = &Driver{
@ -56,7 +53,6 @@ func NewEmptyDriver(emptyField string) *Driver {
version: driverVersion, version: driverVersion,
nodeID: fakeNodeID, nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{}, cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
perm: perm,
} }
} }
d.volumeLocks = NewVolumeLocks() d.volumeLocks = NewVolumeLocks()

View File

@ -50,10 +50,28 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.InvalidArgument, "Target path not provided") return nil, status.Error(codes.InvalidArgument, "Target path not provided")
} }
var server, baseDir string
for k, v := range req.GetVolumeContext() {
switch strings.ToLower(k) {
case paramServer:
server = v
case paramShare:
baseDir = v
}
}
if server == "" {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%v is a required parameter", paramServer))
}
if baseDir == "" {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("%v is a required parameter", paramShare))
}
source := fmt.Sprintf("%s:%s", server, baseDir)
notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath) notMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
if err := os.MkdirAll(targetPath, 0750); err != nil { if err := os.MkdirAll(targetPath, os.FileMode(ns.Driver.mountPermissions)); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
notMnt = true notMnt = true
@ -70,10 +88,6 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
mountOptions = append(mountOptions, "ro") mountOptions = append(mountOptions, "ro")
} }
s := req.GetVolumeContext()[paramServer]
ep := req.GetVolumeContext()[paramShare]
source := fmt.Sprintf("%s:%s", s, ep)
klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions) klog.V(2).Infof("NodePublishVolume: volumeID(%v) source(%s) targetPath(%s) mountflags(%v)", volumeID, source, targetPath, mountOptions)
err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions) err = ns.mounter.Mount(source, targetPath, "nfs", mountOptions)
if err != nil { if err != nil {
@ -86,13 +100,10 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
if ns.Driver.perm != nil { klog.V(2).Infof("volumeID(%v): mount targetPath(%s) with permissions(0%o)", volumeID, targetPath, ns.Driver.mountPermissions)
klog.V(2).Infof("volumeID(%v): mount targetPath(%s) with permissions(0%o)", volumeID, targetPath, *ns.Driver.perm) if err := os.Chmod(targetPath, os.FileMode(ns.Driver.mountPermissions)); err != nil {
if err := os.Chmod(targetPath, os.FileMode(*ns.Driver.perm)); err != nil { return nil, status.Error(codes.Internal, err.Error())
return nil, status.Error(codes.Internal, err.Error())
}
} }
return &csi.NodePublishVolumeResponse{}, nil return &csi.NodePublishVolumeResponse{}, nil
} }

View File

@ -40,6 +40,11 @@ func TestNodePublishVolume(t *testing.T) {
t.Fatalf(err.Error()) t.Fatalf(err.Error())
} }
params := map[string]string{
"server": "server",
"share": "share",
}
volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER} volumeCap := csi.VolumeCapability_AccessMode{Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}
alreadyMountedTarget := testutil.GetWorkDirPath("false_is_likely_exist_target", t) alreadyMountedTarget := testutil.GetWorkDirPath("false_is_likely_exist_target", t)
targetTest := testutil.GetWorkDirPath("target_test", t) targetTest := testutil.GetWorkDirPath("target_test", t)
@ -70,39 +75,48 @@ func TestNodePublishVolume(t *testing.T) {
}, },
{ {
desc: "[Success] Stage target path missing", desc: "[Success] Stage target path missing",
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, req: csi.NodePublishVolumeRequest{
VolumeId: "vol_1", VolumeContext: params,
TargetPath: targetTest}, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
VolumeId: "vol_1",
TargetPath: targetTest},
expectedErr: nil, expectedErr: nil,
}, },
{ {
desc: "[Success] Valid request read only", desc: "[Success] Valid request read only",
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, req: csi.NodePublishVolumeRequest{
VolumeId: "vol_1", VolumeContext: params,
TargetPath: targetTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
Readonly: true}, VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: nil, expectedErr: nil,
}, },
{ {
desc: "[Success] Valid request already mounted", desc: "[Success] Valid request already mounted",
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, req: csi.NodePublishVolumeRequest{
VolumeId: "vol_1", VolumeContext: params,
TargetPath: alreadyMountedTarget, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
Readonly: true}, VolumeId: "vol_1",
TargetPath: alreadyMountedTarget,
Readonly: true},
expectedErr: nil, expectedErr: nil,
}, },
{ {
desc: "[Success] Valid request", desc: "[Success] Valid request",
req: csi.NodePublishVolumeRequest{VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap}, req: csi.NodePublishVolumeRequest{
VolumeId: "vol_1", VolumeContext: params,
TargetPath: targetTest, VolumeCapability: &csi.VolumeCapability{AccessMode: &volumeCap},
Readonly: true}, VolumeId: "vol_1",
TargetPath: targetTest,
Readonly: true},
expectedErr: nil, expectedErr: nil,
}, },
} }
// setup // setup
_ = makeDir(alreadyMountedTarget) _ = makeDir(alreadyMountedTarget)
_ = makeDir(targetTest)
for _, tc := range tests { for _, tc := range tests {
if tc.setup != nil { if tc.setup != nil {

View File

@ -41,7 +41,6 @@ const (
var ( var (
nodeID = os.Getenv("NODE_ID") nodeID = os.Getenv("NODE_ID")
perm *uint32
nfsDriver *nfs.Driver nfsDriver *nfs.Driver
isWindowsCluster = os.Getenv(testWindowsEnvVar) != "" isWindowsCluster = os.Getenv(testWindowsEnvVar) != ""
defaultStorageClassParameters = map[string]string{ defaultStorageClassParameters = map[string]string{
@ -70,7 +69,12 @@ var _ = ginkgo.BeforeSuite(func() {
handleFlags() handleFlags()
framework.AfterReadingAllFlags(&framework.TestContext) framework.AfterReadingAllFlags(&framework.TestContext)
nfsDriver = nfs.NewDriver(nodeID, nfs.DefaultDriverName, fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), perm) options := nfs.DriverOptions{
NodeID: nodeID,
DriverName: nfs.DefaultDriverName,
Endpoint: fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()),
}
nfsDriver = nfs.NewDriver(&options)
controllerServer = nfs.NewControllerServer(nfsDriver) controllerServer = nfs.NewControllerServer(nfsDriver)
// install nfs server // install nfs server