test: fix e2e test failure

fix

test: fix test failure

fix

fix
This commit is contained in:
andyzhangx 2023-12-03 04:00:38 +00:00
parent bafce444be
commit d0f1e29764
13 changed files with 196 additions and 154 deletions

2
go.mod
View File

@ -1,6 +1,6 @@
module github.com/kubernetes-csi/csi-driver-nfs module github.com/kubernetes-csi/csi-driver-nfs
go 1.20 go 1.21
require ( require (
github.com/container-storage-interface/spec v1.8.0 github.com/container-storage-interface/spec v1.8.0

26
go.sum
View File

@ -22,7 +22,9 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY=
cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -63,6 +65,7 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -93,6 +96,7 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k=
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI= github.com/container-storage-interface/spec v1.8.0 h1:D0vhF3PLIZwlwZEf2eNbpujGCNwspwTYf2idJRJx4xI=
github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0= github.com/container-storage-interface/spec v1.8.0/go.mod h1:ROLik+GhPslwwWRNFF1KasPzroNARibH2rfz1rkg4H0=
@ -111,6 +115,7 @@ github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
@ -123,6 +128,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.m
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA=
github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@ -150,6 +156,7 @@ github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ4
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A=
github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
@ -169,9 +176,11 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -205,6 +214,7 @@ github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo= github.com/google/cel-go v0.16.1 h1:3hZfSNiAU3KOiNtxuFXVp5WFy4hf/Ly3Sa4/7F8SXNo=
github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/cel-go v0.16.1/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
@ -249,6 +259,7 @@ github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0U
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
@ -267,6 +278,7 @@ github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@ -286,6 +298,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@ -361,12 +374,15 @@ github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPH
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
@ -393,22 +409,29 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE=
github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs=
go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE=
go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo=
go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ=
go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E=
go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA=
go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ=
go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY=
go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI=
go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg=
go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0=
go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -441,6 +464,7 @@ go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@ -490,6 +514,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -804,6 +829,7 @@ k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3Qv
k8s.io/controller-manager v0.28.4 h1:8uJmo1pD6fWYk4mC/JfZQU6zPvuCgEHf3pd5G39ldDU= k8s.io/controller-manager v0.28.4 h1:8uJmo1pD6fWYk4mC/JfZQU6zPvuCgEHf3pd5G39ldDU=
k8s.io/controller-manager v0.28.4/go.mod h1:pnO+UK2mcWNu1MxucqI8xHPD/8UBm04IUmp2u/3vbnM= k8s.io/controller-manager v0.28.4/go.mod h1:pnO+UK2mcWNu1MxucqI8xHPD/8UBm04IUmp2u/3vbnM=
k8s.io/csi-translation-lib v0.28.4 h1:4TrU2zefZGU5HQCyPZvcPxkS6IowqZ/jBs2Qi/dPUpc= k8s.io/csi-translation-lib v0.28.4 h1:4TrU2zefZGU5HQCyPZvcPxkS6IowqZ/jBs2Qi/dPUpc=
k8s.io/csi-translation-lib v0.28.4/go.mod h1:oxwDdx0hyVqViINOUF7TGrVt51eqsOkQ0BTI+A9QcQs=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=

View File

@ -28,7 +28,7 @@ import (
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
) )
var _ = ginkgo.Describe("Dynamic Provisioning", func() { var _ = ginkgo.Describe("Dynamic Provisioning", func(ctx ginkgo.SpecContext) {
f := framework.NewDefaultFramework("nfs") f := framework.NewDefaultFramework("nfs")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
@ -38,7 +38,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
testDriver driver.PVTestDriver testDriver driver.PVTestDriver
) )
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx ginkgo.SpecContext) {
checkPodsRestart := testCmd{ checkPodsRestart := testCmd{
command: "sh", command: "sh",
args: []string{"test/utils/check_driver_pods_restart.sh"}, args: []string{"test/utils/check_driver_pods_restart.sh"},
@ -52,7 +52,7 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}) })
testDriver = driver.InitNFSDriver() testDriver = driver.InitNFSDriver()
ginkgo.It("should create a volume on demand with mount options [nfs.csi.k8s.io]", func() { ginkgo.It("should create a volume on demand with mount options [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -72,10 +72,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with zero mountPermissions [nfs.csi.k8s.io]", func() { ginkgo.It("should create a volume on demand with zero mountPermissions [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -95,10 +95,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: storageClassParametersWithZeroMountPermisssions, StorageClassParameters: storageClassParametersWithZeroMountPermisssions,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [nfs.csi.k8s.io]", func() { ginkgo.It("should create multiple PV objects, bind to PVCs and attach all to different pods on the same node [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done", Cmd: "while true; do echo $(date -u) >> /mnt/test-1/data; sleep 100; done",
@ -131,11 +131,11 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
ColocatePods: true, ColocatePods: true,
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
// Track issue https://github.com/kubernetes/kubernetes/issues/70505 // Track issue https://github.com/kubernetes/kubernetes/issues/70505
ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [nfs.csi.k8s.io]", func() { ginkgo.It("should create a volume on demand and mount it as readOnly in a pod [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "touch /mnt/test-1/data", Cmd: "touch /mnt/test-1/data",
@ -156,10 +156,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [nfs.csi.k8s.io]", func() { ginkgo.It("should create a deployment object, write and read to it, delete the pod and write and read to it again [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pod := testsuites.PodDetails{ pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done", Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done",
Volumes: []testsuites.VolumeDetails{ Volumes: []testsuites.VolumeDetails{
@ -185,10 +185,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}, },
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("[subDir]should create a deployment object, write and read to it, delete the pod and write and read to it again [nfs.csi.k8s.io]", func() { ginkgo.It("[subDir]should create a deployment object, write and read to it, delete the pod and write and read to it again [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pod := testsuites.PodDetails{ pod := testsuites.PodDetails{
Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done", Cmd: "echo 'hello world' >> /mnt/test-1/data && while true; do sleep 100; done",
Volumes: []testsuites.VolumeDetails{ Volumes: []testsuites.VolumeDetails{
@ -214,10 +214,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
}, },
StorageClassParameters: subDirStorageClassParameters, StorageClassParameters: subDirStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [nfs.csi.k8s.io]", v1.PersistentVolumeReclaimDelete), func() { ginkgo.It(fmt.Sprintf("should delete PV with reclaimPolicy %q [nfs.csi.k8s.io]", v1.PersistentVolumeReclaimDelete), func(ctx ginkgo.SpecContext) {
reclaimPolicy := v1.PersistentVolumeReclaimDelete reclaimPolicy := v1.PersistentVolumeReclaimDelete
volumes := []testsuites.VolumeDetails{ volumes := []testsuites.VolumeDetails{
{ {
@ -231,10 +231,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
ControllerServer: *controllerServer, ControllerServer: *controllerServer,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [nfs.csi.k8s.io]", v1.PersistentVolumeReclaimRetain), func() { ginkgo.It(fmt.Sprintf("should retain PV with reclaimPolicy %q [nfs.csi.k8s.io]", v1.PersistentVolumeReclaimRetain), func(ctx ginkgo.SpecContext) {
reclaimPolicy := v1.PersistentVolumeReclaimRetain reclaimPolicy := v1.PersistentVolumeReclaimRetain
volumes := []testsuites.VolumeDetails{ volumes := []testsuites.VolumeDetails{
{ {
@ -248,10 +248,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
ControllerServer: *controllerServer, ControllerServer: *controllerServer,
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a pod with multiple volumes [nfs.csi.k8s.io]", func() { ginkgo.It("should create a pod with multiple volumes [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
volumes := []testsuites.VolumeDetails{} volumes := []testsuites.VolumeDetails{}
for i := 1; i <= 6; i++ { for i := 1; i <= 6; i++ {
volume := testsuites.VolumeDetails{ volume := testsuites.VolumeDetails{
@ -275,10 +275,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: subDirStorageClassParameters, StorageClassParameters: subDirStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a pod with volume mount subpath [nfs.csi.k8s.io]", func() { ginkgo.It("should create a pod with volume mount subpath [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"),
@ -298,10 +298,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: defaultStorageClassParameters, StorageClassParameters: defaultStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a CSI inline volume [nfs.csi.k8s.io]", func() { ginkgo.It("should create a CSI inline volume [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"), Cmd: convertToPowershellCommandIfNecessary("echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data"),
@ -325,10 +325,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
MountOptions: "nconnect=8,nfsvers=4.1,sec=sys", MountOptions: "nconnect=8,nfsvers=4.1,sec=sys",
ReadOnly: false, ReadOnly: false,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with retaining subdir on delete [nfs.csi.k8s.io]", func() { ginkgo.It("should create a volume on demand with retaining subdir on delete [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -348,10 +348,10 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: retainStorageClassParameters, StorageClassParameters: retainStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
ginkgo.It("should create a volume on demand with archive subdir on delete [nfs.csi.k8s.io]", func() { ginkgo.It("should create a volume on demand with archive subdir on delete [nfs.csi.k8s.io]", func(ctx ginkgo.SpecContext) {
pods := []testsuites.PodDetails{ pods := []testsuites.PodDetails{
{ {
Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data", Cmd: "echo 'hello world' > /mnt/test-1/data && grep 'hello world' /mnt/test-1/data",
@ -371,6 +371,6 @@ var _ = ginkgo.Describe("Dynamic Provisioning", func() {
Pods: pods, Pods: pods,
StorageClassParameters: archiveStorageClassParameters, StorageClassParameters: archiveStorageClassParameters,
} }
test.Run(cs, ns) test.Run(ctx, cs, ns)
}) })
}) })

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -33,18 +35,18 @@ type DynamicallyProvisionedCmdVolumeTest struct {
StorageClassParameters map[string]string StorageClassParameters map[string]string
} }
func (t *DynamicallyProvisionedCmdVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedCmdVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods { for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i](ctx)
} }
ginkgo.By("deploying the pod") ginkgo.By("deploying the pod")
tpod.Create() tpod.Create(ctx)
defer tpod.Cleanup() defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error") ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess() tpod.WaitForSuccess(ctx)
} }
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -33,24 +35,24 @@ type DynamicallyProvisionedCollocatedPodTest struct {
StorageClassParameters map[string]string StorageClassParameters map[string]string
} }
func (t *DynamicallyProvisionedCollocatedPodTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedCollocatedPodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
nodeName := "" nodeName := ""
for _, pod := range t.Pods { for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
if t.ColocatePods && nodeName != "" { if t.ColocatePods && nodeName != "" {
tpod.SetNodeSelector(map[string]string{"name": nodeName}) tpod.SetNodeSelector(map[string]string{"name": nodeName})
} }
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i](ctx)
} }
ginkgo.By("deploying the pod") ginkgo.By("deploying the pod")
tpod.Create() tpod.Create(ctx)
defer tpod.Cleanup() defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pod is running") ginkgo.By("checking that the pod is running")
tpod.WaitForRunning() tpod.WaitForRunning(ctx)
nodeName = tpod.pod.Spec.NodeName nodeName = tpod.pod.Spec.NodeName
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -38,18 +40,18 @@ type PodExecCheck struct {
ExpectedString string ExpectedString string
} }
func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedDeletePodTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
tDeployment, cleanup := t.Pod.SetupDeployment(client, namespace, t.CSIDriver, t.StorageClassParameters) tDeployment, cleanup := t.Pod.SetupDeployment(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i](ctx)
} }
ginkgo.By("deploying the deployment") ginkgo.By("deploying the deployment")
tDeployment.Create() tDeployment.Create(ctx)
ginkgo.By("checking that the pod is running") ginkgo.By("checking that the pod is running")
tDeployment.WaitForPodReady() tDeployment.WaitForPodReady(ctx)
if t.PodCheck != nil { if t.PodCheck != nil {
ginkgo.By("checking pod exec") ginkgo.By("checking pod exec")
@ -57,10 +59,10 @@ func (t *DynamicallyProvisionedDeletePodTest) Run(client clientset.Interface, na
} }
ginkgo.By("deleting the pod for deployment") ginkgo.By("deleting the pod for deployment")
tDeployment.DeletePodAndWait() tDeployment.DeletePodAndWait(ctx)
ginkgo.By("checking again that the pod is running") ginkgo.By("checking again that the pod is running")
tDeployment.WaitForPodReady() tDeployment.WaitForPodReady(ctx)
if t.PodCheck != nil { if t.PodCheck != nil {
ginkgo.By("checking pod exec") ginkgo.By("checking pod exec")

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -35,7 +37,7 @@ type DynamicallyProvisionedInlineVolumeTest struct {
ReadOnly bool ReadOnly bool
} }
func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedInlineVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods { for _, pod := range t.Pods {
var tpod *TestPod var tpod *TestPod
var cleanup []func() var cleanup []func()
@ -46,9 +48,9 @@ func (t *DynamicallyProvisionedInlineVolumeTest) Run(client clientset.Interface,
} }
ginkgo.By("deploying the pod") ginkgo.By("deploying the pod")
tpod.Create() tpod.Create(ctx)
defer tpod.Cleanup() defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error") ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess() tpod.WaitForSuccess(ctx)
} }
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -34,18 +36,18 @@ type DynamicallyProvisionedPodWithMultiplePVsTest struct {
StorageClassParameters map[string]string StorageClassParameters map[string]string
} }
func (t *DynamicallyProvisionedPodWithMultiplePVsTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedPodWithMultiplePVsTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods { for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicMultipleVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) tpod, cleanup := pod.SetupWithDynamicMultipleVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i](ctx)
} }
ginkgo.By("deploying the pod") ginkgo.By("deploying the pod")
tpod.Create() tpod.Create(ctx)
defer tpod.Cleanup() defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error") ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess() tpod.WaitForSuccess(ctx)
} }
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"fmt" "fmt"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
@ -36,23 +37,23 @@ type DynamicallyProvisionedReadOnlyVolumeTest struct {
StorageClassParameters map[string]string StorageClassParameters map[string]string
} }
func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedReadOnlyVolumeTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods { for _, pod := range t.Pods {
expectedReadOnlyLog := "Read-only file system" expectedReadOnlyLog := "Read-only file system"
tpod, cleanup := pod.SetupWithDynamicVolumes(client, namespace, t.CSIDriver, t.StorageClassParameters) tpod, cleanup := pod.SetupWithDynamicVolumes(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i](ctx)
} }
ginkgo.By("deploying the pod") ginkgo.By("deploying the pod")
tpod.Create() tpod.Create(ctx)
defer tpod.Cleanup() defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with an error") ginkgo.By("checking that the pods command exits with an error")
tpod.WaitForFailure() tpod.WaitForFailure(ctx)
ginkgo.By("checking that pod logs contain expected message") ginkgo.By("checking that pod logs contain expected message")
body, err := tpod.Logs() body, err := tpod.Logs(ctx)
framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err)) framework.ExpectNoError(err, fmt.Sprintf("Error getting logs for pod %s: %v", tpod.pod.Name, err))
gomega.Expect(string(body)).To(gomega.ContainSubstring(expectedReadOnlyLog)) gomega.Expect(string(body)).To(gomega.ContainSubstring(expectedReadOnlyLog))
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs" "github.com/kubernetes-csi/csi-driver-nfs/pkg/nfs"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
@ -33,17 +35,17 @@ type DynamicallyProvisionedReclaimPolicyTest struct {
StorageClassParameters map[string]string StorageClassParameters map[string]string
} }
func (t *DynamicallyProvisionedReclaimPolicyTest) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedReclaimPolicyTest) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, volume := range t.Volumes { for _, volume := range t.Volumes {
tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(client, namespace, t.CSIDriver, t.StorageClassParameters) tpvc, _ := volume.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// will delete the PVC // will delete the PVC
// will also wait for PV to be deleted when reclaimPolicy=Delete // will also wait for PV to be deleted when reclaimPolicy=Delete
tpvc.Cleanup() tpvc.Cleanup(ctx)
// first check PV stills exists, then manually delete it // first check PV stills exists, then manually delete it
if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain { if tpvc.ReclaimPolicy() == v1.PersistentVolumeReclaimRetain {
tpvc.WaitForPersistentVolumePhase(v1.VolumeReleased) tpvc.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased)
tpvc.DeleteBoundPersistentVolume() tpvc.DeleteBoundPersistentVolume(ctx)
// The controller server cannot resolve the nfs server hosting inside the testing k8s cluster, skipping the cleanup step. // The controller server cannot resolve the nfs server hosting inside the testing k8s cluster, skipping the cleanup step.
// tpvc.DeleteBackingVolume(&t.ControllerServer) // tpvc.DeleteBackingVolume(&t.ControllerServer)
} }

View File

@ -17,6 +17,8 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -33,18 +35,18 @@ type DynamicallyProvisionedVolumeSubpathTester struct {
StorageClassParameters map[string]string StorageClassParameters map[string]string
} }
func (t *DynamicallyProvisionedVolumeSubpathTester) Run(client clientset.Interface, namespace *v1.Namespace) { func (t *DynamicallyProvisionedVolumeSubpathTester) Run(ctx context.Context, client clientset.Interface, namespace *v1.Namespace) {
for _, pod := range t.Pods { for _, pod := range t.Pods {
tpod, cleanup := pod.SetupWithDynamicVolumesWithSubpath(client, namespace, t.CSIDriver, t.StorageClassParameters) tpod, cleanup := pod.SetupWithDynamicVolumesWithSubpath(ctx, client, namespace, t.CSIDriver, t.StorageClassParameters)
// defer must be called here for resources not get removed before using them // defer must be called here for resources not get removed before using them
for i := range cleanup { for i := range cleanup {
defer cleanup[i]() defer cleanup[i](ctx)
} }
ginkgo.By("deploying the pod") ginkgo.By("deploying the pod")
tpod.Create() tpod.Create(ctx)
defer tpod.Cleanup() defer tpod.Cleanup(ctx)
ginkgo.By("checking that the pods command exits with no error") ginkgo.By("checking that the pods command exits with no error")
tpod.WaitForSuccess() tpod.WaitForSuccess(ctx)
} }
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package testsuites package testsuites
import ( import (
"context"
"fmt" "fmt"
"github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver" "github.com/kubernetes-csi/csi-driver-nfs/test/e2e/driver"
@ -78,12 +79,12 @@ type VolumeDetails struct {
NodeStageSecretRef string NodeStageSecretRef string
} }
func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPersistentVolumeClaim, []func()) { func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPersistentVolumeClaim, []func(ctx context.Context)) {
cleanupFuncs := make([]func(), 0) cleanupFuncs := make([]func(ctx context.Context), 0)
ginkgo.By("setting up the StorageClass") ginkgo.By("setting up the StorageClass")
storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name)
tsc := NewTestStorageClass(client, namespace, storageClass) tsc := NewTestStorageClass(client, namespace, storageClass)
createdStorageClass := tsc.Create() createdStorageClass := tsc.Create(ctx)
cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)
ginkgo.By("setting up the PVC and PV") ginkgo.By("setting up the PVC and PV")
var tpvc *TestPersistentVolumeClaim var tpvc *TestPersistentVolumeClaim
@ -95,22 +96,22 @@ func (volume *VolumeDetails) SetupDynamicPersistentVolumeClaim(client clientset.
} else { } else {
tpvc = NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) tpvc = NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass)
} }
tpvc.Create() tpvc.Create(ctx)
cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup)
// PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer // PV will not be ready until PVC is used in a pod when volumeBindingMode: WaitForFirstConsumer
if volume.VolumeBindingMode == nil || *volume.VolumeBindingMode == storagev1.VolumeBindingImmediate { if volume.VolumeBindingMode == nil || *volume.VolumeBindingMode == storagev1.VolumeBindingImmediate {
tpvc.WaitForBound() tpvc.WaitForBound(ctx)
tpvc.ValidateProvisionedPersistentVolume() tpvc.ValidateProvisionedPersistentVolume(ctx)
} }
return tpvc, cleanupFuncs return tpvc, cleanupFuncs
} }
func (pod *PodDetails) SetupWithDynamicVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { func (pod *PodDetails) SetupWithDynamicVolumes(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func(ctx context.Context)) {
tpod := NewTestPod(client, namespace, pod.Cmd) tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0) cleanupFuncs := make([]func(ctx context.Context), 0)
for n, v := range pod.Volumes { for n, v := range pod.Volumes {
tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, csiDriver, storageClassParameters)
cleanupFuncs = append(cleanupFuncs, funcs...) cleanupFuncs = append(cleanupFuncs, funcs...)
tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly) tpod.SetupVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), v.VolumeMount.ReadOnly)
} }
@ -126,19 +127,19 @@ func (pod *PodDetails) SetupWithCSIInlineVolumes(client clientset.Interface, nam
return tpod, cleanupFuncs return tpod, cleanupFuncs
} }
func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func()) { func (pod *PodDetails) SetupDeployment(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestDeployment, []func(ctx context.Context)) {
cleanupFuncs := make([]func(), 0) cleanupFuncs := make([]func(ctx context.Context), 0)
volume := pod.Volumes[0] volume := pod.Volumes[0]
ginkgo.By("setting up the StorageClass") ginkgo.By("setting up the StorageClass")
storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name) storageClass := csiDriver.GetDynamicProvisionStorageClass(storageClassParameters, volume.MountOptions, volume.ReclaimPolicy, volume.VolumeBindingMode, volume.AllowedTopologyValues, namespace.Name)
tsc := NewTestStorageClass(client, namespace, storageClass) tsc := NewTestStorageClass(client, namespace, storageClass)
createdStorageClass := tsc.Create() createdStorageClass := tsc.Create(ctx)
cleanupFuncs = append(cleanupFuncs, tsc.Cleanup) cleanupFuncs = append(cleanupFuncs, tsc.Cleanup)
ginkgo.By("setting up the PVC") ginkgo.By("setting up the PVC")
tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass) tpvc := NewTestPersistentVolumeClaim(client, namespace, volume.ClaimSize, volume.VolumeMode, &createdStorageClass)
tpvc.Create() tpvc.Create(ctx)
tpvc.WaitForBound() tpvc.WaitForBound(ctx)
tpvc.ValidateProvisionedPersistentVolume() tpvc.ValidateProvisionedPersistentVolume(ctx)
cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup) cleanupFuncs = append(cleanupFuncs, tpvc.Cleanup)
ginkgo.By("setting up the Deployment") ginkgo.By("setting up the Deployment")
tDeployment := NewTestDeployment(client, namespace, pod.Cmd, tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", volume.VolumeMount.NameGenerate, 1), fmt.Sprintf("%s%d", volume.VolumeMount.MountPathGenerate, 1), volume.VolumeMount.ReadOnly) tDeployment := NewTestDeployment(client, namespace, pod.Cmd, tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", volume.VolumeMount.NameGenerate, 1), fmt.Sprintf("%s%d", volume.VolumeMount.MountPathGenerate, 1), volume.VolumeMount.ReadOnly)
@ -148,11 +149,11 @@ func (pod *PodDetails) SetupDeployment(client clientset.Interface, namespace *v1
} }
// SetupWithDynamicMultipleVolumes each pod will be mounted with multiple volumes // SetupWithDynamicMultipleVolumes each pod will be mounted with multiple volumes
func (pod *PodDetails) SetupWithDynamicMultipleVolumes(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { func (pod *PodDetails) SetupWithDynamicMultipleVolumes(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func(ctx context.Context)) {
tpod := NewTestPod(client, namespace, pod.Cmd) tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0) cleanupFuncs := make([]func(ctx context.Context), 0)
for n, v := range pod.Volumes { for n, v := range pod.Volumes {
tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, csiDriver, storageClassParameters)
cleanupFuncs = append(cleanupFuncs, funcs...) cleanupFuncs = append(cleanupFuncs, funcs...)
if v.VolumeMode == Block { if v.VolumeMode == Block {
tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath) tpod.SetupRawBlockVolume(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeDevice.NameGenerate, n+1), v.VolumeDevice.DevicePath)
@ -163,11 +164,11 @@ func (pod *PodDetails) SetupWithDynamicMultipleVolumes(client clientset.Interfac
return tpod, cleanupFuncs return tpod, cleanupFuncs
} }
func (pod *PodDetails) SetupWithDynamicVolumesWithSubpath(client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func()) { func (pod *PodDetails) SetupWithDynamicVolumesWithSubpath(ctx context.Context, client clientset.Interface, namespace *v1.Namespace, csiDriver driver.DynamicPVTestDriver, storageClassParameters map[string]string) (*TestPod, []func(ctx context.Context)) {
tpod := NewTestPod(client, namespace, pod.Cmd) tpod := NewTestPod(client, namespace, pod.Cmd)
cleanupFuncs := make([]func(), 0) cleanupFuncs := make([]func(ctx context.Context), 0)
for n, v := range pod.Volumes { for n, v := range pod.Volumes {
tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(client, namespace, csiDriver, storageClassParameters) tpvc, funcs := v.SetupDynamicPersistentVolumeClaim(ctx, client, namespace, csiDriver, storageClassParameters)
cleanupFuncs = append(cleanupFuncs, funcs...) cleanupFuncs = append(cleanupFuncs, funcs...)
tpod.SetupVolumeMountWithSubpath(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), "testSubpath", v.VolumeMount.ReadOnly) tpod.SetupVolumeMountWithSubpath(tpvc.persistentVolumeClaim, fmt.Sprintf("%s%d", v.VolumeMount.NameGenerate, n+1), fmt.Sprintf("%s%d", v.VolumeMount.MountPathGenerate, n+1), "testSubpath", v.VolumeMount.ReadOnly)
} }

View File

@ -94,17 +94,17 @@ type TestPod struct {
namespace *v1.Namespace namespace *v1.Namespace
} }
func (t *TestStorageClass) Cleanup() { func (t *TestStorageClass) Cleanup(ctx context.Context) {
framework.Logf("deleting StorageClass %s", t.storageClass.Name) framework.Logf("deleting StorageClass %s", t.storageClass.Name)
err := t.client.StorageV1().StorageClasses().Delete(context.TODO(), t.storageClass.Name, metav1.DeleteOptions{}) err := t.client.StorageV1().StorageClasses().Delete(ctx, t.storageClass.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestStorageClass) Create() storagev1.StorageClass { func (t *TestStorageClass) Create(ctx context.Context) storagev1.StorageClass {
var err error var err error
ginkgo.By("creating a StorageClass " + t.storageClass.Name) ginkgo.By("creating a StorageClass " + t.storageClass.Name)
t.storageClass, err = t.client.StorageV1().StorageClasses().Create(context.TODO(), t.storageClass, metav1.CreateOptions{}) t.storageClass, err = t.client.StorageV1().StorageClasses().Create(ctx, t.storageClass, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return *t.storageClass return *t.storageClass
} }
@ -168,7 +168,7 @@ func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.Pe
} }
} }
func (t *TestPersistentVolumeClaim) Create() { func (t *TestPersistentVolumeClaim) Create(ctx context.Context) {
var err error var err error
ginkgo.By("creating a PVC") ginkgo.By("creating a PVC")
@ -177,14 +177,14 @@ func (t *TestPersistentVolumeClaim) Create() {
storageClassName = t.storageClass.Name storageClassName = t.storageClass.Name
} }
t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource) t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource)
t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(context.TODO(), t.requestedPersistentVolumeClaim, metav1.CreateOptions{}) t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(ctx, t.requestedPersistentVolumeClaim, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// removeFinalizers is a workaround to solve the problem that PV is stuck at terminating after PVC is deleted. // removeFinalizers is a workaround to solve the problem that PV is stuck at terminating after PVC is deleted.
// Related issue: https://github.com/kubernetes/kubernetes/issues/69697 // Related issue: https://github.com/kubernetes/kubernetes/issues/69697
func (t *TestPersistentVolumeClaim) removeFinalizers() { func (t *TestPersistentVolumeClaim) removeFinalizers(ctx context.Context) {
pv, err := t.client.CoreV1().PersistentVolumes().Get(context.TODO(), t.persistentVolume.Name, metav1.GetOptions{}) pv, err := t.client.CoreV1().PersistentVolumes().Get(ctx, t.persistentVolume.Name, metav1.GetOptions{})
// Because the pv might be deleted successfully, if so, ignore the error. // Because the pv might be deleted successfully, if so, ignore the error.
if err != nil && strings.Contains(err.Error(), "not found") { if err != nil && strings.Contains(err.Error(), "not found") {
return return
@ -204,7 +204,7 @@ func (t *TestPersistentVolumeClaim) removeFinalizers() {
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvClone) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, pvClone)
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, err = t.client.CoreV1().PersistentVolumes().Patch(context.TODO(), pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = t.client.CoreV1().PersistentVolumes().Patch(ctx, pvClone.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
// Because the pv might be deleted successfully before patched, if so, ignore the error. // Because the pv might be deleted successfully before patched, if so, ignore the error.
if err != nil && strings.Contains(err.Error(), "not found") { if err != nil && strings.Contains(err.Error(), "not found") {
return return
@ -212,9 +212,9 @@ func (t *TestPersistentVolumeClaim) removeFinalizers() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestPersistentVolumeClaim) Cleanup() { func (t *TestPersistentVolumeClaim) Cleanup(ctx context.Context) {
framework.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name) framework.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name)
err := e2epv.DeletePersistentVolumeClaim(t.client, t.persistentVolumeClaim.Name, t.namespace.Name) err := e2epv.DeletePersistentVolumeClaim(ctx, t.client, t.persistentVolumeClaim.Name, t.namespace.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and // Retain, there's no use waiting because the PV won't be auto-deleted and
@ -225,22 +225,22 @@ func (t *TestPersistentVolumeClaim) Cleanup() {
if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete { if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
if t.persistentVolume.Spec.CSI != nil { if t.persistentVolume.Spec.CSI != nil {
// only workaround in CSI driver tests // only workaround in CSI driver tests
t.removeFinalizers() t.removeFinalizers(ctx)
} }
ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
err := e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) err := e2epv.WaitForPersistentVolumeDeleted(ctx, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// Wait for the PVC to be deleted // Wait for the PVC to be deleted
err = waitForPersistentVolumeClaimDeleted(t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute) err = waitForPersistentVolumeClaimDeleted(ctx, t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
// waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. // waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { func waitForPersistentVolumeClaimDeleted(ctx context.Context, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{}) _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
if apierrs.IsNotFound(err) { if apierrs.IsNotFound(err) {
framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns) framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
@ -252,27 +252,27 @@ func waitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcNa
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout) return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
} }
func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim { func (t *TestPersistentVolumeClaim) WaitForBound(ctx context.Context) v1.PersistentVolumeClaim {
var err error var err error
ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound)) ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound))
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking the PVC") ginkgo.By("checking the PVC")
// Get new copy of the claim // Get new copy of the claim
t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(context.TODO(), t.persistentVolumeClaim.Name, metav1.GetOptions{}) t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(ctx, t.persistentVolumeClaim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return *t.persistentVolumeClaim return *t.persistentVolumeClaim
} }
func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() { func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume(ctx context.Context) {
var err error var err error
// Get the bound PersistentVolume // Get the bound PersistentVolume
ginkgo.By("validating provisioned PV") ginkgo.By("validating provisioned PV")
t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(context.TODO(), t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{}) t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(ctx, t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check sizes // Check sizes
@ -312,8 +312,8 @@ func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) {
t.pod.Spec.NodeSelector = nodeSelector t.pod.Spec.NodeSelector = nodeSelector
} }
func (t *TestPod) WaitForFailure() { func (t *TestPod) WaitForFailure(ctx context.Context) {
err := e2epod.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition) err := e2epod.WaitForPodCondition(ctx, t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -382,44 +382,44 @@ func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath str
t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume) t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
} }
func (t *TestPod) Logs() ([]byte, error) { func (t *TestPod) Logs(ctx context.Context) ([]byte, error) {
return podLogs(t.client, t.pod.Name, t.namespace.Name) return podLogs(ctx, t.client, t.pod.Name, t.namespace.Name)
} }
func cleanupPodOrFail(client clientset.Interface, name, namespace string) { func cleanupPodOrFail(ctx context.Context, client clientset.Interface, name, namespace string) {
framework.Logf("deleting Pod %q/%q", namespace, name) framework.Logf("deleting Pod %q/%q", namespace, name)
body, err := podLogs(client, name, namespace) body, err := podLogs(ctx, client, name, namespace)
if err != nil { if err != nil {
framework.Logf("Error getting logs for pod %s: %v", name, err) framework.Logf("Error getting logs for pod %s: %v", name, err)
} else { } else {
framework.Logf("Pod %s has the following logs: %s", name, body) framework.Logf("Pod %s has the following logs: %s", name, body)
} }
e2epod.DeletePodOrFail(client, namespace, name) e2epod.DeletePodOrFail(ctx, client, namespace, name)
} }
func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) { func podLogs(ctx context.Context, client clientset.Interface, name, namespace string) ([]byte, error) {
return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do(ctx).Raw()
} }
func (t *TestPod) Create() { func (t *TestPod) Create(ctx context.Context) {
var err error var err error
t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(context.TODO(), t.pod, metav1.CreateOptions{}) t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(ctx, t.pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestPod) WaitForSuccess() { func (t *TestPod) WaitForSuccess(ctx context.Context) {
err := e2epod.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name) err := e2epod.WaitForPodSuccessInNamespaceSlow(ctx, t.client, t.pod.Name, t.namespace.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestPod) WaitForRunning() { func (t *TestPod) WaitForRunning(ctx context.Context) {
err := e2epod.WaitForPodRunningInNamespace(t.client, t.pod) err := e2epod.WaitForPodRunningInNamespace(ctx, t.client, t.pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestPod) Cleanup() { func (t *TestPod) Cleanup(ctx context.Context) {
cleanupPodOrFail(t.client, t.pod.Name, t.namespace.Name) cleanupPodOrFail(ctx, t.client, t.pod.Name, t.namespace.Name)
} }
type TestDeployment struct { type TestDeployment struct {
@ -486,25 +486,25 @@ func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string,
return testDeployment return testDeployment
} }
func (t *TestDeployment) Create() { func (t *TestDeployment) Create(ctx context.Context) {
var err error var err error
t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(context.TODO(), t.deployment, metav1.CreateOptions{}) t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(ctx, t.deployment, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = deployment.WaitForDeploymentComplete(t.client, t.deployment) err = deployment.WaitForDeploymentComplete(t.client, t.deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods, err := deployment.GetPodsForDeployment(t.client, t.deployment) pods, err := deployment.GetPodsForDeployment(ctx, t.client, t.deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// always get first pod as there should only be one // always get first pod as there should only be one
t.podName = pods.Items[0].Name t.podName = pods.Items[0].Name
} }
func (t *TestDeployment) WaitForPodReady() { func (t *TestDeployment) WaitForPodReady(ctx context.Context) {
pods, err := deployment.GetPodsForDeployment(t.client, t.deployment) pods, err := deployment.GetPodsForDeployment(ctx, t.client, t.deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// always get first pod as there should only be one // always get first pod as there should only be one
pod := pods.Items[0] pod := pods.Items[0]
t.podName = pod.Name t.podName = pod.Name
err = e2epod.WaitForPodRunningInNamespace(t.client, &pod) err = e2epod.WaitForPodRunningInNamespace(ctx, t.client, &pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -542,9 +542,9 @@ func pollForStringWorker(namespace string, pod string, command []string, expecte
ch <- err ch <- err
} }
func (t *TestDeployment) DeletePodAndWait() { func (t *TestDeployment) DeletePodAndWait(ctx context.Context) {
framework.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name) framework.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name)
err := t.client.CoreV1().Pods(t.namespace.Name).Delete(context.TODO(), t.podName, metav1.DeleteOptions{}) err := t.client.CoreV1().Pods(t.namespace.Name).Delete(ctx, t.podName, metav1.DeleteOptions{})
if err != nil { if err != nil {
if !apierrs.IsNotFound(err) { if !apierrs.IsNotFound(err) {
framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err)) framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err))
@ -552,43 +552,43 @@ func (t *TestDeployment) DeletePodAndWait() {
return return
} }
framework.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name) framework.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name)
err = e2epod.WaitForPodNotFoundInNamespace(t.client, t.podName, t.namespace.Name, e2epod.DefaultPodDeletionTimeout) err = e2epod.WaitForPodNotFoundInNamespace(ctx, t.client, t.podName, t.namespace.Name, e2epod.DefaultPodDeletionTimeout)
if err != nil { if err != nil {
framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %w", t.podName, err)) framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %w", t.podName, err))
} }
} }
func (t *TestDeployment) Cleanup() { func (t *TestDeployment) Cleanup(ctx context.Context) {
framework.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name) framework.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name)
body, err := t.Logs() body, err := t.Logs(ctx)
if err != nil { if err != nil {
framework.Logf("Error getting logs for pod %s: %v", t.podName, err) framework.Logf("Error getting logs for pod %s: %v", t.podName, err)
} else { } else {
framework.Logf("Pod %s has the following logs: %s", t.podName, body) framework.Logf("Pod %s has the following logs: %s", t.podName, body)
} }
err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(context.TODO(), t.deployment.Name, metav1.DeleteOptions{}) err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(ctx, t.deployment.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestDeployment) Logs() ([]byte, error) { func (t *TestDeployment) Logs(ctx context.Context) ([]byte, error) {
return podLogs(t.client, t.podName, t.namespace.Name) return podLogs(ctx, t.client, t.podName, t.namespace.Name)
} }
func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy { func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy {
return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy
} }
func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) { func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(ctx context.Context, phase v1.PersistentVolumePhase) {
err := e2epv.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) err := e2epv.WaitForPersistentVolumePhase(ctx, phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() { func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume(ctx context.Context) {
ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name)) ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name))
err := e2epv.DeletePersistentVolume(t.client, t.persistentVolume.Name) err := e2epv.DeletePersistentVolume(ctx, t.client, t.persistentVolume.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name)) ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
err = e2epv.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute) err = e2epv.WaitForPersistentVolumeDeleted(ctx, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }