Merge pull request #216 from andyzhangx/alternative-driver-name

feat: support alternative driver name
This commit is contained in:
Andy Zhang 2021-07-11 09:04:07 +08:00 committed by GitHub
commit e62205b946
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 112 additions and 84 deletions

View File

@ -36,24 +36,25 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
| Parameter | Description | Default | | Parameter | Description | Default |
|---------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------| |---------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------|
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` | | `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
| `image.nfs.repository` | csi-driver-nfs docker image | gcr.io/k8s-staging-sig-storage/nfsplugin | | `image.nfs.repository` | csi-driver-nfs docker image | `gcr.io/k8s-staging-sig-storage/nfsplugin` |
| `image.nfs.tag` | csi-driver-nfs docker image tag | amd64-linux-canary | | `image.nfs.tag` | csi-driver-nfs docker image tag | `amd64-linux-canary` |
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | IfNotPresent | | `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |
| `image.csiProvisioner.repository` | csi-provisioner docker image | k8s.gcr.io/sig-storage/csi-provisioner | | `image.csiProvisioner.repository` | csi-provisioner docker image | `k8s.gcr.io/sig-storage/csi-provisioner` |
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | v2.0.4 | | `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v2.0.4` |
| `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | IfNotPresent | | `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | `IfNotPresent` |
| `image.livenessProbe.repository` | liveness-probe docker image | k8s.gcr.io/sig-storage/livenessprobe | | `image.livenessProbe.repository` | liveness-probe docker image | `k8s.gcr.io/sig-storage/livenessprobe` |
| `image.livenessProbe.tag` | liveness-probe docker image tag | v2.3.0 | | `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.3.0` |
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | IfNotPresent | | `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` |
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | k8s.gcr.io/sig-storage/csi-node-driver-registrar | | `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `k8s.gcr.io/sig-storage/csi-node-driver-registrar` |
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | v2.2.0 | | `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.2.0` |
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | IfNotPresent | | `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` |
| `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) | | `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) |
| `serviceAccount.create` | whether create service account of csi-nfs-controller | true | | `serviceAccount.create` | whether create service account of csi-nfs-controller | `true` |
| `rbac.create` | whether create rbac of csi-nfs-controller | true | | `rbac.create` | whether create rbac of csi-nfs-controller | `true` |
| `controller.replicas` | the replicas of csi-nfs-controller | 2 | | `controller.replicas` | the replicas of csi-nfs-controller | `2` |
| `controller.runOnMaster` | run controller on master node | false | | `controller.runOnMaster` | run controller on master node | `false` |
| `controller.logLevel` | controller driver log level |`5` | | `controller.logLevel` | controller driver log level |`5` |
| `controller.tolerations` | controller pod tolerations | | | `controller.tolerations` | controller pod tolerations | |
| `node.logLevel` | node driver log level |`5` | | `node.logLevel` | node driver log level |`5` |

View File

@ -2,18 +2,18 @@
kind: Deployment kind: Deployment
apiVersion: apps/v1 apiVersion: apps/v1
metadata: metadata:
name: csi-nfs-controller name: {{ .Values.controller.name }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }} {{ include "nfs.labels" . | indent 2 }}
spec: spec:
replicas: {{ .Values.controller.replicas }} replicas: {{ .Values.controller.replicas }}
selector: selector:
matchLabels: matchLabels:
app: csi-nfs-controller app: {{ .Values.controller.name }}
template: template:
metadata: metadata:
{{ include "nfs.labels" . | indent 6 }} {{ include "nfs.labels" . | indent 6 }}
app: csi-nfs-controller app: {{ .Values.controller.name }}
spec: spec:
{{- if .Values.imagePullSecrets }} {{- if .Values.imagePullSecrets }}
imagePullSecrets: imagePullSecrets:
@ -21,7 +21,7 @@ spec:
{{- end }} {{- end }}
hostNetwork: true # controller also needs to mount nfs to create dir hostNetwork: true # controller also needs to mount nfs to create dir
dnsPolicy: ClusterFirstWithHostNet dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: csi-nfs-controller-sa serviceAccountName: {{ .Values.serviceAccount.controller }}
nodeSelector: nodeSelector:
kubernetes.io/os: linux kubernetes.io/os: linux
{{- if .Values.controller.runOnMaster}} {{- if .Values.controller.runOnMaster}}
@ -58,7 +58,7 @@ spec:
args: args:
- --csi-address=/csi/csi.sock - --csi-address=/csi/csi.sock
- --probe-timeout=3s - --probe-timeout=3s
- --health-port=29652 - --health-port={{ .Values.controller.livenessProbe.healthPort }}
- --v=2 - --v=2
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }} imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
volumeMounts: volumeMounts:
@ -83,6 +83,7 @@ spec:
- "--v={{ .Values.controller.logLevel }}" - "--v={{ .Values.controller.logLevel }}"
- "--nodeid=$(NODE_ID)" - "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
env: env:
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:
@ -91,7 +92,7 @@ spec:
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///csi/csi.sock value: unix:///csi/csi.sock
ports: ports:
- containerPort: 29652 - containerPort: {{ .Values.controller.livenessProbe.healthPort }}
name: healthz name: healthz
protocol: TCP protocol: TCP
livenessProbe: livenessProbe:

View File

@ -1,7 +1,7 @@
apiVersion: storage.k8s.io/v1beta1 apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver kind: CSIDriver
metadata: metadata:
name: nfs.csi.k8s.io name: {{ .Values.driver.name }}
spec: spec:
attachRequired: false attachRequired: false
volumeLifecycleModes: volumeLifecycleModes:

View File

@ -3,17 +3,17 @@
kind: DaemonSet kind: DaemonSet
apiVersion: apps/v1 apiVersion: apps/v1
metadata: metadata:
name: csi-nfs-node name: {{ .Values.node.name }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }} {{ include "nfs.labels" . | indent 2 }}
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: csi-nfs-node app: {{ .Values.node.name }}
template: template:
metadata: metadata:
{{ include "nfs.labels" . | indent 6 }} {{ include "nfs.labels" . | indent 6 }}
app: csi-nfs-node app: {{ .Values.node.name }}
spec: spec:
{{- if .Values.imagePullSecrets }} {{- if .Values.imagePullSecrets }}
imagePullSecrets: imagePullSecrets:
@ -78,6 +78,7 @@ spec:
- "--v={{ .Values.node.logLevel }}" - "--v={{ .Values.node.logLevel }}"
- "--nodeid=$(NODE_ID)" - "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"
- "--drivername={{ .Values.driver.name }}"
env: env:
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:
@ -86,7 +87,7 @@ spec:
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix:///csi/csi.sock value: unix:///csi/csi.sock
ports: ports:
- containerPort: 29653 - containerPort: {{ .Values.node.livenessProbe.healthPort }}
name: healthz name: healthz
protocol: TCP protocol: TCP
livenessProbe: livenessProbe:

View File

@ -3,7 +3,7 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: csi-nfs-controller-sa name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
{{ include "nfs.labels" . | indent 2 }} {{ include "nfs.labels" . | indent 2 }}
--- ---
@ -13,7 +13,7 @@ metadata:
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: nfs-external-provisioner-role name: {{ .Values.rbac.name }}-external-provisioner-role
{{ include "nfs.labels" . | indent 2 }} {{ include "nfs.labels" . | indent 2 }}
rules: rules:
- apiGroups: [""] - apiGroups: [""]
@ -41,14 +41,14 @@ rules:
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: nfs-csi-provisioner-binding name: {{ .Values.rbac.name }}-csi-provisioner-binding
{{ include "nfs.labels" . | indent 2 }} {{ include "nfs.labels" . | indent 2 }}
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: csi-nfs-controller-sa name: csi-{{ .Values.rbac.name }}-controller-sa
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
name: nfs-external-provisioner-role name: {{ .Values.rbac.name }}-external-provisioner-role
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
{{- end -}} {{- end -}}

View File

@ -15,26 +15,32 @@ image:
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
tag: v2.2.0 tag: v2.2.0
pullPolicy: IfNotPresent pullPolicy: IfNotPresent
serviceAccount: serviceAccount:
create: true create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
rbac: rbac:
create: true create: true
name: nfs
controller: controller:
name: csi-nfs-controller
replicas: 2 replicas: 2
runOnMaster: false runOnMaster: false
livenessProbe:
healthPort: 29652
logLevel: 5 logLevel: 5
tolerations: tolerations:
- key: "node-role.kubernetes.io/master" - key: "node-role.kubernetes.io/master"
operator: "Equal" operator: "Exists"
value: "true"
effect: "NoSchedule" effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane" - key: "node-role.kubernetes.io/controlplane"
operator: "Equal" operator: "Exists"
value: "true"
effect: "NoSchedule" effect: "NoSchedule"
node: node:
name: csi-nfs-node
logLevel: 5 logLevel: 5
livenessProbe: livenessProbe:
healthPort: 29653 healthPort: 29653
@ -44,6 +50,9 @@ node:
feature: feature:
enableFSGroupPolicy: false enableFSGroupPolicy: false
driver:
name: nfs.csi.k8s.io
## Reference to one or more secrets to be used when pulling images ## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## ##

View File

@ -31,6 +31,7 @@ var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
nodeID = flag.String("nodeid", "", "node id") nodeID = flag.String("nodeid", "", "node id")
perm = flag.String("mount-permissions", "", "mounted folder permissions") perm = flag.String("mount-permissions", "", "mounted folder permissions")
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
) )
func init() { func init() {
@ -61,6 +62,6 @@ func handle() {
parsedPerm = &permu32 parsedPerm = &permu32
} }
d := nfs.NewNFSdriver(*nodeID, *endpoint, parsedPerm) d := nfs.NewNFSdriver(*nodeID, *driverName, *endpoint, parsedPerm)
d.Run(false) d.Run(false)
} }

View File

@ -22,12 +22,10 @@ spec:
priorityClassName: system-cluster-critical priorityClassName: system-cluster-critical
tolerations: tolerations:
- key: "node-role.kubernetes.io/master" - key: "node-role.kubernetes.io/master"
operator: "Equal" operator: "Exists"
value: "true"
effect: "NoSchedule" effect: "NoSchedule"
- key: "node-role.kubernetes.io/controlplane" - key: "node-role.kubernetes.io/controlplane"
operator: "Equal" operator: "Exists"
value: "true"
effect: "NoSchedule" effect: "NoSchedule"
containers: containers:
- name: csi-provisioner - name: csi-provisioner

View File

@ -15,25 +15,35 @@
set -euo pipefail set -euo pipefail
rollout_and_wait() {
echo "Applying config \"$1\""
trap "echo \"Failed to apply config \\\"$1\\\"\" >&2" err
APPNAME=$(kubectl apply -f $1 | grep -E "^(:?daemonset|deployment|statefulset|pod)" | awk '{printf $1}')
if [[ -n $(expr "${APPNAME}" : "\(daemonset\|deployment\|statefulset\)" || true) ]]; then
kubectl rollout status $APPNAME --watch --timeout=5m
else
kubectl wait "${APPNAME}" --for condition=ready --timeout=5m
fi
}
echo "begin to create deployment examples ..." echo "begin to create deployment examples ..."
kubectl apply -f ./deploy/example/storageclass-nfs.yaml kubectl apply -f ./deploy/example/storageclass-nfs.yaml
kubectl apply -f ./deploy/example/deployment.yaml
kubectl apply -f ./deploy/example/statefulset.yaml EXAMPLES+=(\
deploy/example/deployment.yaml \
deploy/example/statefulset.yaml \
)
if [[ "$#" -gt 0 ]]&&[[ "$1" == *"ephemeral"* ]]; then if [[ "$#" -gt 0 ]]&&[[ "$1" == *"ephemeral"* ]]; then
kubectl apply -f ./deploy/example/daemonset-nfs-ephemeral.yaml EXAMPLES+=(\
deploy/example/daemonset-nfs-ephemeral.yaml \
)
fi fi
echo "sleep 60s ..." for EXAMPLE in "${EXAMPLES[@]}"; do
sleep 60 rollout_and_wait $EXAMPLE
done
echo "begin to check pod status ..."
kubectl get pods -o wide
kubectl get pods --field-selector status.phase=Running | grep deployment-nfs
kubectl get pods --field-selector status.phase=Running | grep statefulset-nfs-0
if [[ "$#" -gt 0 ]]&&[[ "$1" == *"ephemeral"* ]]; then
kubectl get pods --field-selector status.phase=Running | grep daemonset-nfs-ephemeral
fi
echo "deployment examples running completed." echo "deployment examples running completed."

View File

@ -49,7 +49,7 @@ var (
func initTestController(t *testing.T) *ControllerServer { func initTestController(t *testing.T) *ControllerServer {
var perm *uint32 var perm *uint32
mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}} mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}}
driver := NewNFSdriver("", "", perm) driver := NewNFSdriver("", "", "", perm)
driver.ns = NewNodeServer(driver, mounter) driver.ns = NewNodeServer(driver, mounter)
cs := NewControllerServer(driver) cs := NewControllerServer(driver)
cs.workingMountDir = "/tmp" cs.workingMountDir = "/tmp"

View File

@ -42,7 +42,7 @@ type Driver struct {
} }
const ( const (
DriverName = "nfs.csi.k8s.io" DefaultDriverName = "nfs.csi.k8s.io"
// Address of the NFS server // Address of the NFS server
paramServer = "server" paramServer = "server"
// Base directory of the NFS server to create volumes under. // Base directory of the NFS server to create volumes under.
@ -56,11 +56,11 @@ var (
version = "3.0.0" version = "3.0.0"
) )
func NewNFSdriver(nodeID, endpoint string, perm *uint32) *Driver { func NewNFSdriver(nodeID, driverName, endpoint string, perm *uint32) *Driver {
klog.Infof("Driver: %v version: %v", DriverName, version) klog.Infof("Driver: %v version: %v", driverName, version)
n := &Driver{ n := &Driver{
name: DriverName, name: driverName,
version: version, version: version,
nodeID: nodeID, nodeID: nodeID,
endpoint: endpoint, endpoint: endpoint,
@ -100,7 +100,7 @@ func NewNodeServer(n *Driver, mounter mount.Interface) *NodeServer {
} }
func (n *Driver) Run(testMode bool) { func (n *Driver) Run(testMode bool) {
versionMeta, err := GetVersionYAML() versionMeta, err := GetVersionYAML(n.name)
if err != nil { if err != nil {
klog.Fatalf("%v", err) klog.Fatalf("%v", err)
} }

View File

@ -36,7 +36,7 @@ func NewEmptyDriver(emptyField string) *Driver {
switch emptyField { switch emptyField {
case "version": case "version":
d = &Driver{ d = &Driver{
name: DriverName, name: DefaultDriverName,
version: "", version: "",
nodeID: fakeNodeID, nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{}, cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
@ -52,7 +52,7 @@ func NewEmptyDriver(emptyField string) *Driver {
} }
default: default:
d = &Driver{ d = &Driver{
name: DriverName, name: DefaultDriverName,
version: version, version: version,
nodeID: fakeNodeID, nodeID: fakeNodeID,
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{}, cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},

View File

@ -43,9 +43,9 @@ type VersionInfo struct {
} }
// GetVersion returns the version information of the driver // GetVersion returns the version information of the driver
func GetVersion() VersionInfo { func GetVersion(driverName string) VersionInfo {
return VersionInfo{ return VersionInfo{
DriverName: DriverName, DriverName: driverName,
DriverVersion: driverVersion, DriverVersion: driverVersion,
GitCommit: gitCommit, GitCommit: gitCommit,
BuildDate: buildDate, BuildDate: buildDate,
@ -57,8 +57,8 @@ func GetVersion() VersionInfo {
// GetVersionYAML returns the version information of the driver // GetVersionYAML returns the version information of the driver
// in YAML format // in YAML format
func GetVersionYAML() (string, error) { func GetVersionYAML(driverName string) (string, error) {
info := GetVersion() info := GetVersion(driverName)
marshalled, err := yaml.Marshal(&info) marshalled, err := yaml.Marshal(&info)
if err != nil { if err != nil {
return "", err return "", err

View File

@ -27,10 +27,10 @@ import (
) )
func TestGetVersion(t *testing.T) { func TestGetVersion(t *testing.T) {
version := GetVersion() version := GetVersion(DefaultDriverName)
expected := VersionInfo{ expected := VersionInfo{
DriverName: DriverName, DriverName: DefaultDriverName,
DriverVersion: "N/A", DriverVersion: "N/A",
GitCommit: "N/A", GitCommit: "N/A",
BuildDate: "N/A", BuildDate: "N/A",
@ -46,11 +46,11 @@ func TestGetVersion(t *testing.T) {
} }
func TestGetVersionYAML(t *testing.T) { func TestGetVersionYAML(t *testing.T) {
resp, err := GetVersionYAML() resp, err := GetVersionYAML("")
if err != nil { if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
versionInfo := GetVersion() versionInfo := GetVersion("")
marshalled, _ := yaml.Marshal(&versionInfo) marshalled, _ := yaml.Marshal(&versionInfo)
expected := strings.TrimSpace(string(marshalled)) expected := strings.TrimSpace(string(marshalled))

View File

@ -42,7 +42,7 @@ type NFSDriver struct {
func InitNFSDriver() PVTestDriver { func InitNFSDriver() PVTestDriver {
driverName := os.Getenv(NFSDriverNameVar) driverName := os.Getenv(NFSDriverNameVar)
if driverName == "" { if driverName == "" {
driverName = nfs.DriverName driverName = nfs.DefaultDriverName
} }
klog.Infof("Using nfs driver: %s", driverName) klog.Infof("Using nfs driver: %s", driverName)

View File

@ -68,7 +68,7 @@ var _ = ginkgo.BeforeSuite(func() {
handleFlags() handleFlags()
framework.AfterReadingAllFlags(&framework.TestContext) framework.AfterReadingAllFlags(&framework.TestContext)
nfsDriver = nfs.NewNFSdriver(nodeID, fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), perm) nfsDriver = nfs.NewNFSdriver(nodeID, nfs.DefaultDriverName, fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), perm)
controllerServer = nfs.NewControllerServer(nfsDriver) controllerServer = nfs.NewControllerServer(nfsDriver)
// install nfs server // install nfs server

View File

@ -17,6 +17,7 @@
set -xe set -xe
PROJECT_ROOT=$(git rev-parse --show-toplevel) PROJECT_ROOT=$(git rev-parse --show-toplevel)
DRIVER="test"
install_ginkgo () { install_ginkgo () {
apt update -y apt update -y
@ -29,8 +30,10 @@ setup_e2e_binaries() {
tar -xvf e2e-tests.tar.gz && rm e2e-tests.tar.gz tar -xvf e2e-tests.tar.gz && rm e2e-tests.tar.gz
# enable fsGroupPolicy (only available from k8s 1.20) # enable fsGroupPolicy (only available from k8s 1.20)
export EXTRA_HELM_OPTIONS="--set feature.enableFSGroupPolicy=true" export EXTRA_HELM_OPTIONS="--set feature.enableFSGroupPolicy=true --set driver.name=$DRIVER.csi.k8s.io --set controller.name=csi-$DRIVER-controller --set node.name=csi-$DRIVER-node"
# test on alternative driver name
sed -i "s/nfs.csi.k8s.io/$DRIVER.csi.k8s.io/g" deploy/example/storageclass-nfs.yaml
# install csi driver # install csi driver
mkdir -p /tmp/csi && cp deploy/example/storageclass-nfs.yaml /tmp/csi/storageclass.yaml mkdir -p /tmp/csi && cp deploy/example/storageclass-nfs.yaml /tmp/csi/storageclass.yaml
make e2e-bootstrap make e2e-bootstrap
@ -40,14 +43,14 @@ setup_e2e_binaries() {
print_logs() { print_logs() {
bash ./hack/verify-examples.sh ephemeral bash ./hack/verify-examples.sh ephemeral
echo "print out driver logs ..." echo "print out driver logs ..."
bash ./test/utils/nfs_log.sh bash ./test/utils/nfs_log.sh $DRIVER
} }
install_ginkgo install_ginkgo
setup_e2e_binaries setup_e2e_binaries
trap print_logs EXIT trap print_logs EXIT
ginkgo -p --progress --v -focus='External.Storage.*nfs.csi.k8s.io' \ ginkgo -p --progress --v -focus="External.Storage.*$DRIVER.csi.k8s.io" \
-skip='\[Disruptive\]|\[Slow\]' kubernetes/test/bin/e2e.test -- \ -skip='\[Disruptive\]|\[Slow\]' kubernetes/test/bin/e2e.test -- \
-storage.testdriver=$PROJECT_ROOT/test/external-e2e/testdriver.yaml \ -storage.testdriver=$PROJECT_ROOT/test/external-e2e/testdriver.yaml \
--kubeconfig=$KUBECONFIG --kubeconfig=$KUBECONFIG

View File

@ -18,6 +18,10 @@ set -e
NS=kube-system NS=kube-system
CONTAINER=nfs CONTAINER=nfs
DRIVER=nfs
if [[ "$#" -gt 0 ]]; then
DRIVER=$1
fi
echo "print out all nodes status ..." echo "print out all nodes status ..."
kubectl get nodes -o wide kubectl get nodes -o wide
@ -31,16 +35,16 @@ echo "print out all $NS namespace pods status ..."
kubectl get pods -n${NS} kubectl get pods -n${NS}
echo "======================================================================================" echo "======================================================================================"
echo "print out csi-nfs-controller logs ..." echo "print out csi-$DRIVER-controller logs ..."
echo "======================================================================================" echo "======================================================================================"
LABEL='app=csi-nfs-controller' LABEL="app=csi-$DRIVER-controller"
kubectl get pods -n${NS} -l${LABEL} \ kubectl get pods -n${NS} -l${LABEL} \
| awk 'NR>1 {print $1}' \ | awk 'NR>1 {print $1}' \
| xargs -I {} kubectl logs {} --prefix -c${CONTAINER} -n${NS} | xargs -I {} kubectl logs {} --prefix -c${CONTAINER} -n${NS}
echo "print out csi-nfs-node logs ..." echo "print out csi-$DRIVER-node logs ..."
echo "======================================================================================" echo "======================================================================================"
LABEL='app=csi-nfs-node' LABEL="app=csi-$DRIVER-node"
kubectl get pods -n${NS} -l${LABEL} \ kubectl get pods -n${NS} -l${LABEL} \
| awk 'NR>1 {print $1}' \ | awk 'NR>1 {print $1}' \
| xargs -I {} kubectl logs {} --prefix -c${CONTAINER} -n${NS} | xargs -I {} kubectl logs {} --prefix -c${CONTAINER} -n${NS}