feat: support alternative driver name
fix driver name
This commit is contained in:
parent
43e5c57e00
commit
958efd92ca
@ -36,24 +36,25 @@ The following table lists the configurable parameters of the latest NFS CSI Driv
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|---------------------------------------------------|------------------------------------------------------------|-------------------------------------------------------------------|
|
||||
| `driver.name` | alternative driver name | `nfs.csi.k8s.io` |
|
||||
| `feature.enableFSGroupPolicy` | enable `fsGroupPolicy` on a k8s 1.20+ cluster | `false` |
|
||||
| `image.nfs.repository` | csi-driver-nfs docker image | gcr.io/k8s-staging-sig-storage/nfsplugin |
|
||||
| `image.nfs.tag` | csi-driver-nfs docker image tag | amd64-linux-canary |
|
||||
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | IfNotPresent |
|
||||
| `image.csiProvisioner.repository` | csi-provisioner docker image | k8s.gcr.io/sig-storage/csi-provisioner |
|
||||
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | v2.0.4 |
|
||||
| `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | IfNotPresent |
|
||||
| `image.livenessProbe.repository` | liveness-probe docker image | k8s.gcr.io/sig-storage/livenessprobe |
|
||||
| `image.livenessProbe.tag` | liveness-probe docker image tag | v2.3.0 |
|
||||
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | IfNotPresent |
|
||||
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | k8s.gcr.io/sig-storage/csi-node-driver-registrar |
|
||||
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | v2.2.0 |
|
||||
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | IfNotPresent |
|
||||
| `image.nfs.repository` | csi-driver-nfs docker image | `gcr.io/k8s-staging-sig-storage/nfsplugin` |
|
||||
| `image.nfs.tag` | csi-driver-nfs docker image tag | `amd64-linux-canary` |
|
||||
| `image.nfs.pullPolicy` | csi-driver-nfs image pull policy | `IfNotPresent` |
|
||||
| `image.csiProvisioner.repository` | csi-provisioner docker image | `k8s.gcr.io/sig-storage/csi-provisioner` |
|
||||
| `image.csiProvisioner.tag` | csi-provisioner docker image tag | `v2.0.4` |
|
||||
| `image.csiProvisioner.pullPolicy` | csi-provisioner image pull policy | `IfNotPresent` |
|
||||
| `image.livenessProbe.repository` | liveness-probe docker image | `k8s.gcr.io/sig-storage/livenessprobe` |
|
||||
| `image.livenessProbe.tag` | liveness-probe docker image tag | `v2.3.0` |
|
||||
| `image.livenessProbe.pullPolicy` | liveness-probe image pull policy | `IfNotPresent` |
|
||||
| `image.nodeDriverRegistrar.repository` | csi-node-driver-registrar docker image | `k8s.gcr.io/sig-storage/csi-node-driver-registrar` |
|
||||
| `image.nodeDriverRegistrar.tag` | csi-node-driver-registrar docker image tag | `v2.2.0` |
|
||||
| `image.nodeDriverRegistrar.pullPolicy` | csi-node-driver-registrar image pull policy | `IfNotPresent` |
|
||||
| `imagePullSecrets` | Specify docker-registry secret names as an array | [] (does not add image pull secrets to deployed pods) |
|
||||
| `serviceAccount.create` | whether create service account of csi-nfs-controller | true |
|
||||
| `rbac.create` | whether create rbac of csi-nfs-controller | true |
|
||||
| `controller.replicas` | the replicas of csi-nfs-controller | 2 |
|
||||
| `controller.runOnMaster` | run controller on master node | false |
|
||||
| `serviceAccount.create` | whether create service account of csi-nfs-controller | `true` |
|
||||
| `rbac.create` | whether create rbac of csi-nfs-controller | `true` |
|
||||
| `controller.replicas` | the replicas of csi-nfs-controller | `2` |
|
||||
| `controller.runOnMaster` | run controller on master node | `false` |
|
||||
| `controller.logLevel` | controller driver log level |`5` |
|
||||
| `controller.tolerations` | controller pod tolerations | |
|
||||
| `node.logLevel` | node driver log level |`5` |
|
||||
|
||||
Binary file not shown.
@ -2,18 +2,18 @@
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-controller
|
||||
name: {{ .Values.controller.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
spec:
|
||||
replicas: {{ .Values.controller.replicas }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-controller
|
||||
app: {{ .Values.controller.name }}
|
||||
template:
|
||||
metadata:
|
||||
{{ include "nfs.labels" . | indent 6 }}
|
||||
app: csi-nfs-controller
|
||||
app: {{ .Values.controller.name }}
|
||||
spec:
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@ -21,7 +21,7 @@ spec:
|
||||
{{- end }}
|
||||
hostNetwork: true # controller also needs to mount nfs to create dir
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: csi-nfs-controller-sa
|
||||
serviceAccountName: {{ .Values.serviceAccount.controller }}
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
{{- if .Values.controller.runOnMaster}}
|
||||
@ -58,7 +58,7 @@ spec:
|
||||
args:
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --probe-timeout=3s
|
||||
- --health-port=29652
|
||||
- --health-port={{ .Values.controller.livenessProbe.healthPort }}
|
||||
- --v=2
|
||||
imagePullPolicy: {{ .Values.image.livenessProbe.pullPolicy }}
|
||||
volumeMounts:
|
||||
@ -83,6 +83,7 @@ spec:
|
||||
- "--v={{ .Values.controller.logLevel }}"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--drivername={{ .Values.driver.name }}"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
@ -91,7 +92,7 @@ spec:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
ports:
|
||||
- containerPort: 29652
|
||||
- containerPort: {{ .Values.controller.livenessProbe.healthPort }}
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: nfs.csi.k8s.io
|
||||
name: {{ .Values.driver.name }}
|
||||
spec:
|
||||
attachRequired: false
|
||||
volumeLifecycleModes:
|
||||
|
||||
@ -3,17 +3,17 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-nfs-node
|
||||
name: {{ .Values.node.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-nfs-node
|
||||
app: {{ .Values.node.name }}
|
||||
template:
|
||||
metadata:
|
||||
{{ include "nfs.labels" . | indent 6 }}
|
||||
app: csi-nfs-node
|
||||
app: {{ .Values.node.name }}
|
||||
spec:
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
@ -78,6 +78,7 @@ spec:
|
||||
- "--v={{ .Values.node.logLevel }}"
|
||||
- "--nodeid=$(NODE_ID)"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--drivername={{ .Values.driver.name }}"
|
||||
env:
|
||||
- name: NODE_ID
|
||||
valueFrom:
|
||||
@ -86,7 +87,7 @@ spec:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
ports:
|
||||
- containerPort: 29653
|
||||
- containerPort: {{ .Values.node.livenessProbe.healthPort }}
|
||||
name: healthz
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-nfs-controller-sa
|
||||
name: csi-{{ .Values.rbac.name }}-controller-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
---
|
||||
@ -13,7 +13,7 @@ metadata:
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-external-provisioner-role
|
||||
name: {{ .Values.rbac.name }}-external-provisioner-role
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
@ -41,14 +41,14 @@ rules:
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: nfs-csi-provisioner-binding
|
||||
name: {{ .Values.rbac.name }}-csi-provisioner-binding
|
||||
{{ include "nfs.labels" . | indent 2 }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-nfs-controller-sa
|
||||
name: csi-{{ .Values.rbac.name }}-controller-sa
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: nfs-external-provisioner-role
|
||||
name: {{ .Values.rbac.name }}-external-provisioner-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end -}}
|
||||
|
||||
@ -15,26 +15,32 @@ image:
|
||||
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
||||
tag: v2.2.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
serviceAccount:
|
||||
create: true
|
||||
create: true # When true, service accounts will be created for you. Set to false if you want to use your own.
|
||||
controller: csi-nfs-controller-sa # Name of Service Account to be created or used
|
||||
|
||||
rbac:
|
||||
create: true
|
||||
name: nfs
|
||||
|
||||
controller:
|
||||
name: csi-nfs-controller
|
||||
replicas: 2
|
||||
runOnMaster: false
|
||||
livenessProbe:
|
||||
healthPort: 29652
|
||||
logLevel: 5
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/controlplane"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
|
||||
node:
|
||||
name: csi-nfs-node
|
||||
logLevel: 5
|
||||
livenessProbe:
|
||||
healthPort: 29653
|
||||
@ -44,6 +50,9 @@ node:
|
||||
feature:
|
||||
enableFSGroupPolicy: false
|
||||
|
||||
driver:
|
||||
name: nfs.csi.k8s.io
|
||||
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
|
||||
@ -28,9 +28,10 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
perm = flag.String("mount-permissions", "", "mounted folder permissions")
|
||||
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
|
||||
nodeID = flag.String("nodeid", "", "node id")
|
||||
perm = flag.String("mount-permissions", "", "mounted folder permissions")
|
||||
driverName = flag.String("drivername", nfs.DefaultDriverName, "name of the driver")
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -61,6 +62,6 @@ func handle() {
|
||||
parsedPerm = &permu32
|
||||
}
|
||||
|
||||
d := nfs.NewNFSdriver(*nodeID, *endpoint, parsedPerm)
|
||||
d := nfs.NewNFSdriver(*nodeID, *driverName, *endpoint, parsedPerm)
|
||||
d.Run(false)
|
||||
}
|
||||
|
||||
@ -22,12 +22,10 @@ spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
- key: "node-role.kubernetes.io/controlplane"
|
||||
operator: "Equal"
|
||||
value: "true"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
|
||||
@ -15,25 +15,35 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
rollout_and_wait() {
|
||||
echo "Applying config \"$1\""
|
||||
trap "echo \"Failed to apply config \\\"$1\\\"\" >&2" err
|
||||
|
||||
APPNAME=$(kubectl apply -f $1 | grep -E "^(:?daemonset|deployment|statefulset|pod)" | awk '{printf $1}')
|
||||
if [[ -n $(expr "${APPNAME}" : "\(daemonset\|deployment\|statefulset\)" || true) ]]; then
|
||||
kubectl rollout status $APPNAME --watch --timeout=5m
|
||||
else
|
||||
kubectl wait "${APPNAME}" --for condition=ready --timeout=5m
|
||||
fi
|
||||
}
|
||||
|
||||
echo "begin to create deployment examples ..."
|
||||
|
||||
kubectl apply -f ./deploy/example/storageclass-nfs.yaml
|
||||
kubectl apply -f ./deploy/example/deployment.yaml
|
||||
kubectl apply -f ./deploy/example/statefulset.yaml
|
||||
|
||||
EXAMPLES+=(\
|
||||
deploy/example/deployment.yaml \
|
||||
deploy/example/statefulset.yaml \
|
||||
)
|
||||
|
||||
if [[ "$#" -gt 0 ]]&&[[ "$1" == *"ephemeral"* ]]; then
|
||||
kubectl apply -f ./deploy/example/daemonset-nfs-ephemeral.yaml
|
||||
EXAMPLES+=(\
|
||||
deploy/example/daemonset-nfs-ephemeral.yaml \
|
||||
)
|
||||
fi
|
||||
|
||||
echo "sleep 60s ..."
|
||||
sleep 60
|
||||
|
||||
echo "begin to check pod status ..."
|
||||
kubectl get pods -o wide
|
||||
|
||||
kubectl get pods --field-selector status.phase=Running | grep deployment-nfs
|
||||
kubectl get pods --field-selector status.phase=Running | grep statefulset-nfs-0
|
||||
if [[ "$#" -gt 0 ]]&&[[ "$1" == *"ephemeral"* ]]; then
|
||||
kubectl get pods --field-selector status.phase=Running | grep daemonset-nfs-ephemeral
|
||||
fi
|
||||
for EXAMPLE in "${EXAMPLES[@]}"; do
|
||||
rollout_and_wait $EXAMPLE
|
||||
done
|
||||
|
||||
echo "deployment examples running completed."
|
||||
|
||||
@ -49,7 +49,7 @@ var (
|
||||
func initTestController(t *testing.T) *ControllerServer {
|
||||
var perm *uint32
|
||||
mounter := &mount.FakeMounter{MountPoints: []mount.MountPoint{}}
|
||||
driver := NewNFSdriver("", "", perm)
|
||||
driver := NewNFSdriver("", "", "", perm)
|
||||
driver.ns = NewNodeServer(driver, mounter)
|
||||
cs := NewControllerServer(driver)
|
||||
cs.workingMountDir = "/tmp"
|
||||
|
||||
@ -42,7 +42,7 @@ type Driver struct {
|
||||
}
|
||||
|
||||
const (
|
||||
DriverName = "nfs.csi.k8s.io"
|
||||
DefaultDriverName = "nfs.csi.k8s.io"
|
||||
// Address of the NFS server
|
||||
paramServer = "server"
|
||||
// Base directory of the NFS server to create volumes under.
|
||||
@ -56,11 +56,11 @@ var (
|
||||
version = "3.0.0"
|
||||
)
|
||||
|
||||
func NewNFSdriver(nodeID, endpoint string, perm *uint32) *Driver {
|
||||
klog.Infof("Driver: %v version: %v", DriverName, version)
|
||||
func NewNFSdriver(nodeID, driverName, endpoint string, perm *uint32) *Driver {
|
||||
klog.Infof("Driver: %v version: %v", driverName, version)
|
||||
|
||||
n := &Driver{
|
||||
name: DriverName,
|
||||
name: driverName,
|
||||
version: version,
|
||||
nodeID: nodeID,
|
||||
endpoint: endpoint,
|
||||
@ -100,7 +100,7 @@ func NewNodeServer(n *Driver, mounter mount.Interface) *NodeServer {
|
||||
}
|
||||
|
||||
func (n *Driver) Run(testMode bool) {
|
||||
versionMeta, err := GetVersionYAML()
|
||||
versionMeta, err := GetVersionYAML(n.name)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
|
||||
@ -36,7 +36,7 @@ func NewEmptyDriver(emptyField string) *Driver {
|
||||
switch emptyField {
|
||||
case "version":
|
||||
d = &Driver{
|
||||
name: DriverName,
|
||||
name: DefaultDriverName,
|
||||
version: "",
|
||||
nodeID: fakeNodeID,
|
||||
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
|
||||
@ -52,7 +52,7 @@ func NewEmptyDriver(emptyField string) *Driver {
|
||||
}
|
||||
default:
|
||||
d = &Driver{
|
||||
name: DriverName,
|
||||
name: DefaultDriverName,
|
||||
version: version,
|
||||
nodeID: fakeNodeID,
|
||||
cap: map[csi.VolumeCapability_AccessMode_Mode]bool{},
|
||||
|
||||
@ -43,9 +43,9 @@ type VersionInfo struct {
|
||||
}
|
||||
|
||||
// GetVersion returns the version information of the driver
|
||||
func GetVersion() VersionInfo {
|
||||
func GetVersion(driverName string) VersionInfo {
|
||||
return VersionInfo{
|
||||
DriverName: DriverName,
|
||||
DriverName: driverName,
|
||||
DriverVersion: driverVersion,
|
||||
GitCommit: gitCommit,
|
||||
BuildDate: buildDate,
|
||||
@ -57,8 +57,8 @@ func GetVersion() VersionInfo {
|
||||
|
||||
// GetVersionYAML returns the version information of the driver
|
||||
// in YAML format
|
||||
func GetVersionYAML() (string, error) {
|
||||
info := GetVersion()
|
||||
func GetVersionYAML(driverName string) (string, error) {
|
||||
info := GetVersion(driverName)
|
||||
marshalled, err := yaml.Marshal(&info)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@ -27,10 +27,10 @@ import (
|
||||
)
|
||||
|
||||
func TestGetVersion(t *testing.T) {
|
||||
version := GetVersion()
|
||||
version := GetVersion(DefaultDriverName)
|
||||
|
||||
expected := VersionInfo{
|
||||
DriverName: DriverName,
|
||||
DriverName: DefaultDriverName,
|
||||
DriverVersion: "N/A",
|
||||
GitCommit: "N/A",
|
||||
BuildDate: "N/A",
|
||||
@ -46,11 +46,11 @@ func TestGetVersion(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetVersionYAML(t *testing.T) {
|
||||
resp, err := GetVersionYAML()
|
||||
resp, err := GetVersionYAML("")
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
versionInfo := GetVersion()
|
||||
versionInfo := GetVersion("")
|
||||
marshalled, _ := yaml.Marshal(&versionInfo)
|
||||
expected := strings.TrimSpace(string(marshalled))
|
||||
|
||||
|
||||
@ -42,7 +42,7 @@ type NFSDriver struct {
|
||||
func InitNFSDriver() PVTestDriver {
|
||||
driverName := os.Getenv(NFSDriverNameVar)
|
||||
if driverName == "" {
|
||||
driverName = nfs.DriverName
|
||||
driverName = nfs.DefaultDriverName
|
||||
}
|
||||
|
||||
klog.Infof("Using nfs driver: %s", driverName)
|
||||
|
||||
@ -68,7 +68,7 @@ var _ = ginkgo.BeforeSuite(func() {
|
||||
handleFlags()
|
||||
framework.AfterReadingAllFlags(&framework.TestContext)
|
||||
|
||||
nfsDriver = nfs.NewNFSdriver(nodeID, fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), perm)
|
||||
nfsDriver = nfs.NewNFSdriver(nodeID, nfs.DefaultDriverName, fmt.Sprintf("unix:///tmp/csi-%s.sock", uuid.NewUUID().String()), perm)
|
||||
controllerServer = nfs.NewControllerServer(nfsDriver)
|
||||
|
||||
// install nfs server
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
set -xe
|
||||
|
||||
PROJECT_ROOT=$(git rev-parse --show-toplevel)
|
||||
DRIVER="test"
|
||||
|
||||
install_ginkgo () {
|
||||
apt update -y
|
||||
@ -29,8 +30,10 @@ setup_e2e_binaries() {
|
||||
tar -xvf e2e-tests.tar.gz && rm e2e-tests.tar.gz
|
||||
|
||||
# enable fsGroupPolicy (only available from k8s 1.20)
|
||||
export EXTRA_HELM_OPTIONS="--set feature.enableFSGroupPolicy=true"
|
||||
export EXTRA_HELM_OPTIONS="--set feature.enableFSGroupPolicy=true --set driver.name=$DRIVER.csi.k8s.io --set controller.name=csi-$DRIVER-controller --set node.name=csi-$DRIVER-node"
|
||||
|
||||
# test on alternative driver name
|
||||
sed -i "s/nfs.csi.k8s.io/$DRIVER.csi.k8s.io/g" deploy/example/storageclass-nfs.yaml
|
||||
# install csi driver
|
||||
mkdir -p /tmp/csi && cp deploy/example/storageclass-nfs.yaml /tmp/csi/storageclass.yaml
|
||||
make e2e-bootstrap
|
||||
@ -40,14 +43,14 @@ setup_e2e_binaries() {
|
||||
print_logs() {
|
||||
bash ./hack/verify-examples.sh ephemeral
|
||||
echo "print out driver logs ..."
|
||||
bash ./test/utils/nfs_log.sh
|
||||
bash ./test/utils/nfs_log.sh $DRIVER
|
||||
}
|
||||
|
||||
install_ginkgo
|
||||
setup_e2e_binaries
|
||||
trap print_logs EXIT
|
||||
|
||||
ginkgo -p --progress --v -focus='External.Storage.*nfs.csi.k8s.io' \
|
||||
ginkgo -p --progress --v -focus="External.Storage.*$DRIVER.csi.k8s.io" \
|
||||
-skip='\[Disruptive\]|\[Slow\]' kubernetes/test/bin/e2e.test -- \
|
||||
-storage.testdriver=$PROJECT_ROOT/test/external-e2e/testdriver.yaml \
|
||||
--kubeconfig=$KUBECONFIG
|
||||
|
||||
@ -18,6 +18,10 @@ set -e
|
||||
|
||||
NS=kube-system
|
||||
CONTAINER=nfs
|
||||
DRIVER=nfs
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
DRIVER=$1
|
||||
fi
|
||||
|
||||
echo "print out all nodes status ..."
|
||||
kubectl get nodes -o wide
|
||||
@ -31,16 +35,16 @@ echo "print out all $NS namespace pods status ..."
|
||||
kubectl get pods -n${NS}
|
||||
echo "======================================================================================"
|
||||
|
||||
echo "print out csi-nfs-controller logs ..."
|
||||
echo "print out csi-$DRIVER-controller logs ..."
|
||||
echo "======================================================================================"
|
||||
LABEL='app=csi-nfs-controller'
|
||||
LABEL="app=csi-$DRIVER-controller"
|
||||
kubectl get pods -n${NS} -l${LABEL} \
|
||||
| awk 'NR>1 {print $1}' \
|
||||
| xargs -I {} kubectl logs {} --prefix -c${CONTAINER} -n${NS}
|
||||
|
||||
echo "print out csi-nfs-node logs ..."
|
||||
echo "print out csi-$DRIVER-node logs ..."
|
||||
echo "======================================================================================"
|
||||
LABEL='app=csi-nfs-node'
|
||||
LABEL="app=csi-$DRIVER-node"
|
||||
kubectl get pods -n${NS} -l${LABEL} \
|
||||
| awk 'NR>1 {print $1}' \
|
||||
| xargs -I {} kubectl logs {} --prefix -c${CONTAINER} -n${NS}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user