feat: add volume resize support

fix

fix test

fix
This commit is contained in:
andyzhangx 2024-12-08 05:35:23 +00:00
parent 8813b5b626
commit b28c5264f5
12 changed files with 278 additions and 19 deletions

View File

@ -79,6 +79,9 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | 100Mi | | `controller.resources.csiProvisioner.limits.memory` | csi-provisioner memory limits | 100Mi |
| `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | 10m | | `controller.resources.csiProvisioner.requests.cpu` | csi-provisioner cpu requests limits | 10m |
| `controller.resources.csiProvisioner.requests.memory` | csi-provisioner memory requests limits | 20Mi | | `controller.resources.csiProvisioner.requests.memory` | csi-provisioner memory requests limits | 20Mi |
| `controller.resources.csiResizer.limits.memory` | csi-resizer memory limits | 400Mi |
| `controller.resources.csiResizer.requests.cpu` | csi-resizer cpu requests | 10m |
| `controller.resources.csiResizer.requests.memory` | csi-resizer memory requests | 20Mi |
| `controller.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 100Mi | | `controller.resources.livenessProbe.limits.memory` | liveness-probe memory limits | 100Mi |
| `controller.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m | | `controller.resources.livenessProbe.requests.cpu` | liveness-probe cpu requests limits | 10m |
| `controller.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi | | `controller.resources.livenessProbe.requests.memory` | liveness-probe memory requests limits | 20Mi |

View File

@ -75,6 +75,30 @@ spec:
capabilities: capabilities:
drop: drop:
- ALL - ALL
- name: csi-resizer
{{- if hasPrefix "/" .Values.image.csiResizer.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}"
{{- else }}
image: "{{ .Values.image.csiResizer.repository }}:{{ .Values.image.csiResizer.tag }}"
{{- end }}
args:
- "-csi-address=$(ADDRESS)"
- "-v=2"
- "-leader-election"
- "--leader-election-namespace={{ .Release.Namespace }}"
- '-handle-volume-inuse-error=false'
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: {{ .Values.image.csiResizer.pullPolicy }}
volumeMounts:
- name: socket-dir
mountPath: /csi
resources: {{- toYaml .Values.controller.resources.csiResizer | nindent 12 }}
securityContext:
capabilities:
drop:
- ALL
- name: csi-snapshotter - name: csi-snapshotter
{{- if hasPrefix "/" .Values.image.csiSnapshotter.repository }} {{- if hasPrefix "/" .Values.image.csiSnapshotter.repository }}
image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiSnapshotter.repository }}:{{ .Values.image.csiSnapshotter.tag }}" image: "{{ .Values.image.baseRepo }}{{ .Values.image.csiSnapshotter.repository }}:{{ .Values.image.csiSnapshotter.tag }}"

View File

@ -9,6 +9,10 @@ image:
repository: registry.k8s.io/sig-storage/csi-provisioner repository: registry.k8s.io/sig-storage/csi-provisioner
tag: v5.1.0 tag: v5.1.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
csiResizer:
repository: registry.k8s.io/sig-storage/csi-resizer
tag: v1.12.0
pullPolicy: IfNotPresent
csiSnapshotter: csiSnapshotter:
repository: registry.k8s.io/sig-storage/csi-snapshotter repository: registry.k8s.io/sig-storage/csi-snapshotter
tag: v8.1.0 tag: v8.1.0
@ -81,6 +85,12 @@ controller:
requests: requests:
cpu: 10m cpu: 10m
memory: 20Mi memory: 20Mi
csiResizer:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
csiSnapshotter: csiSnapshotter:
limits: limits:
memory: 200Mi memory: 200Mi

View File

@ -63,6 +63,30 @@ spec:
capabilities: capabilities:
drop: drop:
- ALL - ALL
- name: csi-resizer
image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0
args:
- "-csi-address=$(ADDRESS)"
- "-v=2"
- "-leader-election"
- "--leader-election-namespace=kube-system"
- '-handle-volume-inuse-error=false'
env:
- name: ADDRESS
value: /csi/csi.sock
volumeMounts:
- name: socket-dir
mountPath: /csi
resources:
limits:
memory: 400Mi
requests:
cpu: 10m
memory: 20Mi
securityContext:
capabilities:
drop:
- ALL
- name: csi-snapshotter - name: csi-snapshotter
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0 image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0
args: args:

View File

@ -62,9 +62,10 @@ pip install yq --ignore-installed PyYAML
# Extract images from csi-nfs-controller.yaml # Extract images from csi-nfs-controller.yaml
expected_csi_provisioner_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)" expected_csi_provisioner_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[0].image | head -n 1)"
expected_csi_snapshotter_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)" expected_csi_resizer_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[1].image | head -n 1)"
expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)" expected_csi_snapshotter_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[2].image | head -n 1)"
expected_nfs_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[3].image | head -n 1)" expected_liveness_probe_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[3].image | head -n 1)"
expected_nfs_image="$(cat ${PKG_ROOT}/deploy/csi-nfs-controller.yaml | yq -r .spec.template.spec.containers[4].image | head -n 1)"
csi_provisioner_image="$(get_image_from_helm_chart "csiProvisioner")" csi_provisioner_image="$(get_image_from_helm_chart "csiProvisioner")"
validate_image "${expected_csi_provisioner_image}" "${csi_provisioner_image}" validate_image "${expected_csi_provisioner_image}" "${csi_provisioner_image}"

View File

@ -476,8 +476,19 @@ func (cs *ControllerServer) ListSnapshots(_ context.Context, _ *csi.ListSnapshot
return nil, status.Error(codes.Unimplemented, "") return nil, status.Error(codes.Unimplemented, "")
} }
func (cs *ControllerServer) ControllerExpandVolume(_ context.Context, _ *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { func (cs *ControllerServer) ControllerExpandVolume(_ context.Context, req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "") if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if req.GetCapacityRange() == nil {
return nil, status.Error(codes.InvalidArgument, "Capacity Range missing in request")
}
volSizeBytes := int64(req.GetCapacityRange().GetRequiredBytes())
klog.V(2).Infof("ControllerExpandVolume(%s) successfully, currentQuota: %d bytes", req.VolumeId, volSizeBytes)
return &csi.ControllerExpandVolumeResponse{CapacityBytes: req.GetCapacityRange().GetRequiredBytes()}, nil
} }
// Mount nfs server at base-dir // Mount nfs server at base-dir

View File

@ -372,6 +372,13 @@ func TestControllerGetCapabilities(t *testing.T) {
}, },
}, },
}, },
{
Type: &csi.ControllerServiceCapability_Rpc{
Rpc: &csi.ControllerServiceCapability_RPC{
Type: csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
},
},
},
}, },
}, },
expectedErr: nil, expectedErr: nil,
@ -1057,6 +1064,60 @@ func TestDeleteSnapshot(t *testing.T) {
} }
} }
func TestControllerExpandVolume(t *testing.T) {
testCases := []struct {
name string
testFunc func(t *testing.T)
}{
{
name: "volume ID missing",
testFunc: func(t *testing.T) {
d := initTestController(t)
req := &csi.ControllerExpandVolumeRequest{}
_, err := d.ControllerExpandVolume(context.Background(), req)
expectedErr := status.Error(codes.InvalidArgument, "Volume ID missing in request")
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
{
name: "Capacity Range missing",
testFunc: func(t *testing.T) {
d := initTestController(t)
req := &csi.ControllerExpandVolumeRequest{
VolumeId: "unit-test",
}
_, err := d.ControllerExpandVolume(context.Background(), req)
expectedErr := status.Error(codes.InvalidArgument, "Capacity Range missing in request")
if !reflect.DeepEqual(err, expectedErr) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, expectedErr)
}
},
},
{
name: "Error = nil",
testFunc: func(t *testing.T) {
d := initTestController(t)
req := &csi.ControllerExpandVolumeRequest{
VolumeId: "unit-test",
CapacityRange: &csi.CapacityRange{
RequiredBytes: 10000,
},
}
_, err := d.ControllerExpandVolume(context.Background(), req)
if !reflect.DeepEqual(err, nil) {
t.Errorf("actualErr: (%v), expectedErr: (%v)", err, nil)
}
},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.testFunc)
}
}
func matchCreateSnapshotResponse(e, r *csi.CreateSnapshotResponse) error { func matchCreateSnapshotResponse(e, r *csi.CreateSnapshotResponse) error {
if e == nil && r == nil { if e == nil && r == nil {
return nil return nil

View File

@ -103,6 +103,7 @@ func NewDriver(options *DriverOptions) *Driver {
csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER, csi.ControllerServiceCapability_RPC_SINGLE_NODE_MULTI_WRITER,
csi.ControllerServiceCapability_RPC_CLONE_VOLUME, csi.ControllerServiceCapability_RPC_CLONE_VOLUME,
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT,
csi.ControllerServiceCapability_RPC_EXPAND_VOLUME,
}) })
n.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{ n.AddNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{

View File

@ -52,7 +52,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}) })
testDriver = driver.InitNFSDriver() testDriver = driver.InitNFSDriver()
ginkgo.It("should create a volume on demand with mount options [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a volume on demand with mount options", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -75,7 +75,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with zero mountPermissions [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a volume on demand with zero mountPermissions", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -98,7 +98,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done", Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done",
@ -135,7 +135,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}) })
// Track issue https://github.com/kubernetes/kubernetes/issues/70505 // Track issue https://github.com/kubernetes/kubernetes/issues/70505
ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a volume on demand and mount it as readOnly in a pod", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "touch /mnt/test-1/data", Cmd: "touch /mnt/test-1/data",
@ -159,7 +159,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again", func(ctx ginkgo.SpecContext) {
pod := testsuites.PodDetails{ pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done", Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done",
Volumes: []testsuites.VolumeDetails{ Volumes: []testsuites.VolumeDetails{
@ -188,7 +188,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("[subDir]should create a deployment object, write and read to it, delete the pod and write and read to it again [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("[subDir]should create a deployment object, write and read to it, delete the pod and write and read to it again", func(ctx ginkgo.SpecContext) {
pod := testsuites.PodDetails{ pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done", Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done",
Volumes: []testsuites.VolumeDetails{ Volumes: []testsuites.VolumeDetails{
@ -217,7 +217,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [nfs.csi.k8s.io]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) { ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) {
reclaimPolicy := v1.PersistentVolumeReclaimDelete reclaimPolicy := v1.PersistentVolumeReclaimDelete
volumes := []testsuites.VolumeDetails{ volumes := []testsuites.VolumeDetails{
{ {
@ -234,7 +234,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [nfs.csi.k8s.io]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) { ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) {
reclaimPolicy := v1.PersistentVolumeReclaimRetain reclaimPolicy := v1.PersistentVolumeReclaimRetain
volumes := []testsuites.VolumeDetails{ volumes := []testsuites.VolumeDetails{
{ {
@ -251,7 +251,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a pod with multiple volumes [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a pod with multiple volumes", func(ctx ginkgo.SpecContext) {
volumes := []testsuites.VolumeDetails{} volumes := []testsuites.VolumeDetails{}
for i := 1; i <= 6; i++ { for i := 1; i <= 6; i++ {
volume := testsuites.VolumeDetails{ volume := testsuites.VolumeDetails{
@ -278,7 +278,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a pod with volume mount subpath [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a pod with volume mount subpath", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"),
@ -301,7 +301,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a CSI inline volume [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a CSI inline volume", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"),
@ -328,7 +328,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with retaining subdir on delete [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a volume on demand with retaining subdir on delete", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -351,7 +351,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with archive on delete [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a volume on demand with archive on delete", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -374,7 +374,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with archive subdir on delete [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) { ginkgo.It("should create a volume on demand with archive subdir on delete", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -396,4 +396,27 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
} }
test.Run(ctx, cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand and resize it", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{
{
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
Volumes: []testsuites.VolumeDetails{
{
ClaimSize: "10Gi",
VolumeMount: testsuites.VolumeMountDetails{
NameGenerate: "test-volume-",
MountPathGenerate: "/mnt/test-",
},
},
},
},
}
test := testsuites.DynamicallyProvisionedResizeVolumeTest{
CSIDriver: testDriver,
Pods: pods,
StorageClassParameters: archiveSubDirStorageClassParameters,
}
test.Run(ctx, cs, ns)
})
}) })

View File

@ -0,0 +1,99 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"context"
"fmt"
"strings"
"time"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
// DynamicallyProvisionedResizeVolumeTest will provision required StorageClass(es), PVC(s) and Pod(s)
// Waiting for the PV provisioner to resize the PV
// Testing if the PV is resized successfully.
type DynamicallyProvisionedResizeVolumeTest struct {
CSIDriver driver.DynamicPVTestDriver
Pods []PodDetails
StorageClassParameters map[string]string
}
func (t *DynamicallyProvisionedResizeVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them
for i := range cleanup {
defer cleanup[i](ctx)
}
ginkgo.By("deploying the pod")
tpod.Create(ctx)
defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess(ctx)
pvcName := tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName
pvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, fmt.Sprintf("fail to get original pvc(%s): %v", pvcName, err))
}
originalSize := pvc.Spec.Resources.Requests["storage"]
delta := resource.Quantity{}
delta.Set(1024 * 1024 * 1024)
originalSize.Add(delta)
pvc.Spec.Resources.Requests["storage"] = originalSize
ginkgo.By("resizing the pvc")
updatedPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Update(ctx, pvc, metav1.UpdateOptions{})
if err != nil {
framework.ExpectNoError(err, fmt.Sprintf("fail to resize pvc(%s): %v", pvcName, err))
}
updatedSize := updatedPvc.Spec.Resources.Requests["storage"]
ginkgo.By("sleep 30s waiting for resize complete")
time.Sleep(30 * time.Second)
ginkgo.By("checking the resizing result")
newPvc, err := client.CoreV1().PersistentVolumeClaims(namespace.Name).Get(ctx, tpod.pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, fmt.Sprintf("fail to get new pvc(%s): %v", pvcName, err))
}
newSize := newPvc.Spec.Resources.Requests["storage"]
if !newSize.Equal(updatedSize) {
framework.Failf("newSize(%+v) is not equal to updatedSize(%+v)", newSize, updatedSize)
}
ginkgo.By("checking the resizing PV result")
newPv, _ := client.CoreV1().PersistentVolumes().Get(ctx, updatedPvc.Spec.VolumeName, metav1.GetOptions{})
newPvSize := newPv.Spec.Capacity["storage"]
newPvSizeStr := newPvSize.String() + "Gi"
if !strings.Contains(newPvSizeStr, newSize.String()) {
framework.Failf("newPVCSize(%+v) is not equal to newPVSize(%+v)", newSize.String(), newPvSizeStr)
}
}
}

View File

@ -16,6 +16,8 @@ DriverInfo:
fsGroup: true fsGroup: true
pvcDataSource: true pvcDataSource: true
snapshotDataSource: true snapshotDataSource: true
controllerExpansion: true
nodeExpansion: true
InlineVolumes: InlineVolumes:
- Attributes: - Attributes:
server: nfs-server.default.svc.cluster.local server: nfs-server.default.svc.cluster.local