fix: internalUnmount failure and DeleteVolume failure
This commit is contained in:
parent
d325ee36ad
commit
868db6667a
3
Makefile
3
Makefile
@ -103,7 +103,8 @@ e2e-bootstrap: install-helm
|
||||
docker pull $(IMAGE_TAG) || make container push
|
||||
helm install csi-driver-nfs ./charts/latest/csi-driver-nfs --namespace kube-system --wait --timeout=15m -v=5 --debug \
|
||||
--set image.nfs.repository=$(REGISTRY)/$(IMAGE_NAME) \
|
||||
--set image.nfs.tag=$(IMAGE_VERSION)
|
||||
--set image.nfs.tag=$(IMAGE_VERSION) \
|
||||
--set image.nfs.pullPolicy=Always
|
||||
|
||||
.PHONY: e2e-teardown
|
||||
e2e-teardown:
|
||||
|
||||
@ -57,7 +57,7 @@ spec:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://plugin/csi.sock
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
imagePullPolicy: {{ .Values.image.nfs.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /plugin
|
||||
|
||||
@ -260,6 +260,7 @@ func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume)
|
||||
// Unmount nfs server at base-dir
|
||||
glog.V(4).Infof("internally unmounting %v", targetPath)
|
||||
_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{
|
||||
VolumeId: vol.id,
|
||||
TargetPath: cs.getInternalMountPath(vol),
|
||||
})
|
||||
return err
|
||||
@ -344,9 +345,9 @@ func (cs *ControllerServer) nfsVolToCSI(vol *nfsVolume, reqCapacity int64) *csi.
|
||||
// Given a nfsVolume, return a CSI volume id
|
||||
func (cs *ControllerServer) getVolumeIdFromNfsVol(vol *nfsVolume) string {
|
||||
idElements := make([]string, totalIDElements)
|
||||
idElements[idServer] = vol.server
|
||||
idElements[idBaseDir] = vol.baseDir
|
||||
idElements[idSubDir] = vol.subDir
|
||||
idElements[idServer] = strings.Trim(vol.server, "/")
|
||||
idElements[idBaseDir] = strings.Trim(vol.baseDir, "/")
|
||||
idElements[idSubDir] = strings.Trim(vol.subDir, "/")
|
||||
return strings.Join(idElements, "/")
|
||||
}
|
||||
|
||||
@ -354,7 +355,7 @@ func (cs *ControllerServer) getVolumeIdFromNfsVol(vol *nfsVolume) string {
|
||||
func (cs *ControllerServer) getNfsVolFromId(id string) (*nfsVolume, error) {
|
||||
tokens := strings.Split(id, "/")
|
||||
if len(tokens) != totalIDElements {
|
||||
return nil, fmt.Errorf("volume id %q unexpected format: got %v tokens", id, len(tokens))
|
||||
return nil, fmt.Errorf("volume id %q unexpected format: got %v token(s) instead of %v", id, len(tokens), totalIDElements)
|
||||
}
|
||||
|
||||
return &nfsVolume{
|
||||
|
||||
@ -44,7 +44,8 @@ func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface
|
||||
if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain {
|
||||
tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased)
|
||||
tpvc.DeleteBoundPersistentVolume()
|
||||
tpvc.DeleteBackingVolume(&t.ControllerServer)
|
||||
// The controler server cannot resolve the nfs server hosting inside the testing k8s cluster, skipping the cleanup step.
|
||||
// tpvc.DeleteBackingVolume(&t.ControllerServer)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user