fix: CVE-2022-3294

This commit is contained in:
andyzhangx 2023-04-02 14:32:01 +00:00
parent 6906f970c8
commit 8e71777a30
40 changed files with 712 additions and 280 deletions

72
go.mod
View File

@ -12,12 +12,12 @@ require (
github.com/stretchr/testify v1.8.0 github.com/stretchr/testify v1.8.0
golang.org/x/net v0.7.0 golang.org/x/net v0.7.0
google.golang.org/grpc v1.40.0 google.golang.org/grpc v1.40.0
k8s.io/api v0.23.3 k8s.io/api v0.23.14
k8s.io/apimachinery v0.23.3 k8s.io/apimachinery v0.23.14
k8s.io/client-go v0.23.3 k8s.io/client-go v0.23.14
k8s.io/klog/v2 v2.80.1 k8s.io/klog/v2 v2.80.1
k8s.io/kubernetes v1.23.3 k8s.io/kubernetes v1.23.14
k8s.io/mount-utils v0.23.3 k8s.io/mount-utils v0.23.14
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml v1.2.0
) )
@ -87,44 +87,44 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiserver v0.23.3 // indirect k8s.io/apiserver v0.23.14 // indirect
k8s.io/cloud-provider v0.23.3 // indirect k8s.io/cloud-provider v0.23.14 // indirect
k8s.io/component-base v0.23.3 // indirect k8s.io/component-base v0.23.14 // indirect
k8s.io/component-helpers v0.23.3 // indirect k8s.io/component-helpers v0.23.14 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/kubectl v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect
k8s.io/kubelet v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
) )
replace ( replace (
k8s.io/api => k8s.io/api v0.23.3 k8s.io/api => k8s.io/api v0.23.14
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3 k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.14
k8s.io/apimachinery => k8s.io/apimachinery v0.23.3 k8s.io/apimachinery => k8s.io/apimachinery v0.23.14
k8s.io/apiserver => k8s.io/apiserver v0.23.3 k8s.io/apiserver => k8s.io/apiserver v0.23.14
k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3 k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.14
k8s.io/client-go => k8s.io/client-go v0.23.3 k8s.io/client-go => k8s.io/client-go v0.23.14
k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3 k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.14
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3 k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.14
k8s.io/code-generator => k8s.io/code-generator v0.23.3 k8s.io/code-generator => k8s.io/code-generator v0.23.14
k8s.io/component-base => k8s.io/component-base v0.23.3 k8s.io/component-base => k8s.io/component-base v0.23.14
k8s.io/component-helpers => k8s.io/component-helpers v0.23.3 k8s.io/component-helpers => k8s.io/component-helpers v0.23.14
k8s.io/controller-manager => k8s.io/controller-manager v0.23.3 k8s.io/controller-manager => k8s.io/controller-manager v0.23.14
k8s.io/cri-api => k8s.io/cri-api v0.23.3 k8s.io/cri-api => k8s.io/cri-api v0.23.14
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3 k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.14
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3 k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.14
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3 k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.14
k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3 k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.14
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3 k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.14
k8s.io/kubectl => k8s.io/kubectl v0.23.3 k8s.io/kubectl => k8s.io/kubectl v0.23.14
k8s.io/kubelet => k8s.io/kubelet v0.23.3 k8s.io/kubelet => k8s.io/kubelet v0.23.14
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.14
k8s.io/metrics => k8s.io/metrics v0.23.3 k8s.io/metrics => k8s.io/metrics v0.23.14
k8s.io/mount-utils => k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2 k8s.io/mount-utils => k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3 k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.14
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3 k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.14
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.3 k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.14
k8s.io/sample-controller => k8s.io/sample-controller v0.23.3 k8s.io/sample-controller => k8s.io/sample-controller v0.23.14
) )

78
go.sum
View File

@ -271,7 +271,7 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cadvisor v0.43.0/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ= github.com/google/cadvisor v0.43.1/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ=
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -641,6 +641,8 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0H
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -1115,27 +1117,27 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38= k8s.io/api v0.23.14 h1:1gRN0u72YpSEsnnyM5P16XNbRztFefV8pU+mb8aC4Io=
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.23.14/go.mod h1:sWeFZzeE9lLU75pogeht6lIcAP+wkqzTlM8V/REymxs=
k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEUDF+hrpIQJL38= k8s.io/apiextensions-apiserver v0.23.14/go.mod h1:p3aZ4Flyg0h7A0cfRSVn3B+32idn/qzc52l/RhVtOUE=
k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk= k8s.io/apimachinery v0.23.14 h1:UNkDdlj663kkbFiayTMRr37YIE4b7LtIm9gFnVlpfqA=
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.23.14/go.mod h1:qd/bZ1ajQ8MklCd0DlIwLhjcs1kr7QurGOQUS59A7XE=
k8s.io/apiserver v0.23.3 h1:gWY1DmA0AdAGR/H+Q/1FtyGkFq8xqSaZOw7oLopmO8k= k8s.io/apiserver v0.23.14 h1:Ci2POk+ZAqEju/KdteVQD4kJKEFxMQ4ttKXQTpeW888=
k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4= k8s.io/apiserver v0.23.14/go.mod h1:gjuGlqHbtC8NKIiEswwh6gglJHbQgTtP2fVZOq8u+nQ=
k8s.io/cli-runtime v0.23.3/go.mod h1:yA00O5pDqnjkBh8fkuugBbfIfjB1nOpz+aYLotbnOfc= k8s.io/cli-runtime v0.23.14/go.mod h1:1uLxnOWO7GRVIsyvSTMDgozrQUkldukBZPoFmfygz5Q=
k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= k8s.io/client-go v0.23.14 h1:nwrCHBj6hXHehOg3lSwGZYo+T/acwfIL6tMWkT2u+mQ=
k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= k8s.io/client-go v0.23.14/go.mod h1:nKdHtC7qNAkMP0seRU49pdP9tK0lobkM2n5Ljg4r+VI=
k8s.io/cloud-provider v0.23.3 h1:/3hcsBAyG5VpeDAyJQvRqf5US4dHU0Hu57MJiq4zG/w= k8s.io/cloud-provider v0.23.14 h1:M6e3+cJNAOvyPUSjsNH4kLVqchcbsvJ0RJXhCiHR7og=
k8s.io/cloud-provider v0.23.3/go.mod h1:Ik+pKlpPOp0Zs906xyOpT3g2xB9A8VGNdejMTZS6EeA= k8s.io/cloud-provider v0.23.14/go.mod h1:5X8oZsD5Q0u9uD1hwxClT2UVI7/eB/Kp1TflUHAfg84=
k8s.io/cluster-bootstrap v0.23.3/go.mod h1:NwUIksUHKNOKIHg/AfLH4NxqylbfEVXUh9EX2NxHZII= k8s.io/cluster-bootstrap v0.23.14/go.mod h1:RX2htmL8/bO/MDK6N4F58k6wB7LinfHli96klkr2BKg=
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.23.14/go.mod h1:3pOEhV/pmJcwFtt3zfwaArTMflocYMgjYIgM0zUIjAs=
k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY= k8s.io/component-base v0.23.14 h1:ghITiT9qmYms+XjrYVM2re5leCT+LrZ35fx3wIeiYHE=
k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg= k8s.io/component-base v0.23.14/go.mod h1:sY0Bdtshi6Yab/KeVgkWKBqotIAuOGCKINBeFyazUeE=
k8s.io/component-helpers v0.23.3 h1:OzuQpfsJsjGvT2nYnp0JsyxpGbnsv0GSvRlIkMKx+I8= k8s.io/component-helpers v0.23.14 h1:WEUfpxuighRwHG7F2DqVx98lBpSrFLvipRo63dCKvWQ=
k8s.io/component-helpers v0.23.3/go.mod h1:SH+W/WPTaTenbWyDEeY7iytAQiMh45aqKxkvlqQ57cg= k8s.io/component-helpers v0.23.14/go.mod h1:VzqGWNjq7L0O3Ivd/45R8DxrhhHxW6eMFjAWrpCpUtY=
k8s.io/controller-manager v0.23.3/go.mod h1:E0ss6ogA93sZ+AuibQSa7H4xWIiICTYFjowkjellVeU= k8s.io/controller-manager v0.23.14/go.mod h1:5NKgyx6MNg5YhYWBJpON7WrPoMIBYIgqBuS7Uu7RmH4=
k8s.io/cri-api v0.23.3/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= k8s.io/cri-api v0.23.14/go.mod h1:WtUZS6AZ9GadZ3xXRbujziML9XcA1+5HBWPJe/AG0tU=
k8s.io/csi-translation-lib v0.23.3/go.mod h1:8J7hpeqMoCJWofd1lCs4vZrEshdbVYrqurFeB6GZ/+E= k8s.io/csi-translation-lib v0.23.14/go.mod h1:VPxHAWjv5jQklh/zSYRh3zf7r5lsHBgOjpykuVEImeQ=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
@ -1144,25 +1146,25 @@ k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-aggregator v0.23.3/go.mod h1:pt5QJ3QaIdhZzNlUvN5wndbM0LNT4BvhszGkzy2QdFo= k8s.io/kube-aggregator v0.23.14/go.mod h1:bblLuI9g/DgJcZtVa5x6CTC1MtGvJ7GX5FBFOf0mCvU=
k8s.io/kube-controller-manager v0.23.3/go.mod h1:e8m5dhjei67DlLZA/QTvenxiGyonG9UhgHtU1LMslJE= k8s.io/kube-controller-manager v0.23.14/go.mod h1:srtedngV7XEEqYvmS49PPPQLArSFyhVHB4S1R93xnhU=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/kube-proxy v0.23.3/go.mod h1:XdvwqJkR9r0ddUAX4ruA4V22Kws3qzKvgL3rIq584Ko= k8s.io/kube-proxy v0.23.14/go.mod h1:vl1BS3CWnkRjwsiUncL9ZteI1qdyhn+6W6GeBAQt27M=
k8s.io/kube-scheduler v0.23.3/go.mod h1:/thFQoAMv9/olDOEYVSQbUohmkJJyIPUmpVu0UealSM= k8s.io/kube-scheduler v0.23.14/go.mod h1:mhxqa0sV6OTdYAE7Fg5/QwdgPEE3pSYK89LWs5qm5GY=
k8s.io/kubectl v0.23.3 h1:gJsF7cahkWDPYlNvYKK+OrBZLAJUBzCym+Zsi+dfi1E= k8s.io/kubectl v0.23.14 h1:1UuVZLdFaI0pX2kGEigu1VTlKqfkg2tPu3zG/YFaVHM=
k8s.io/kubectl v0.23.3/go.mod h1:VBeeXNgLhSabu4/k0O7Q0YujgnA3+CLTUE0RcmF73yY= k8s.io/kubectl v0.23.14/go.mod h1:a4hkiz1G+fe+K4Tp15Xn49qj/HlIbHnDBysRipRyh1Q=
k8s.io/kubelet v0.23.3 h1:jYed8HoT0H2zXzf5Av+Ml8z5erN39uJfKh/yplYMgkg= k8s.io/kubelet v0.23.14 h1:cENAgyd+KhYchf/JRopKPUG0FafxrmcTaGP53LYoKSQ=
k8s.io/kubelet v0.23.3/go.mod h1:RZxGSCsiwoWJ9z6mVla+jhiLfCFIKC16yAS38D7GQSE= k8s.io/kubelet v0.23.14/go.mod h1:YshK+1lzYbGD4bFLMVg0jYQkxISz+fbokNsStcu4R/k=
k8s.io/kubernetes v1.23.3 h1:weuFJOkRP7+057uvhNUYbVTVCog/klquhbtKRD+UHUo= k8s.io/kubernetes v1.23.14 h1:xqSVDhItGWCsCzZlnj2PHDkDAofaqvPHmKYW7kr8wUE=
k8s.io/kubernetes v1.23.3/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io= k8s.io/kubernetes v1.23.14/go.mod h1:Z0W84JU7x94gywsN90lP3jNosay9t86a5/qTuaeyMk0=
k8s.io/legacy-cloud-providers v0.23.3/go.mod h1:s9vv59dUv4SU+HAm9C/YDdyw2OY9qmFYmcGEwr/ecDc= k8s.io/legacy-cloud-providers v0.23.14/go.mod h1:6if2+guj6PmzTmXhwQ38yX88oh2Jznqjqu5jIyYaJww=
k8s.io/metrics v0.23.3/go.mod h1:Ut8TvkbsO4oMVeUzaTArvPrcw9QRFLs2XNzUlORjdYE= k8s.io/metrics v0.23.14/go.mod h1:OySm7Obgl95KjBOjoGtr3is3gj/uxBPKwS6SlnZB5rw=
k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2 h1:kfACKquxtsEA7XXDy+iC92lg/1stK0UtzAhf7R2Y8Fc= k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2 h1:kfACKquxtsEA7XXDy+iC92lg/1stK0UtzAhf7R2Y8Fc=
k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2/go.mod h1:au99w4FWU5ZWelLb3Yx6kJc8RZ387IyWVM9tN65Yhxo= k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2/go.mod h1:au99w4FWU5ZWelLb3Yx6kJc8RZ387IyWVM9tN65Yhxo=
k8s.io/pod-security-admission v0.23.3/go.mod h1:vULEGUgsujyrKBz3RRRZnvrJJt115gu0GICArDmgzqo= k8s.io/pod-security-admission v0.23.14/go.mod h1:HYKZp5EyDzZ79mcfoogbTQ6T7L9BWyxuLezsnb/iaL0=
k8s.io/sample-apiserver v0.23.3/go.mod h1:5yDZRMfFvp7/2BOXBwk0AFNsD00iyuXeEsWZSoLFeGw= k8s.io/sample-apiserver v0.23.14/go.mod h1:LqP1Sz+qqPLNJ6PjpGPUy3MQ+ai6j2QcXETLfaI1OEg=
k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
@ -1178,8 +1180,8 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 h1:KQOkVzXrLNb0EP6W0FD6u3CCPAwgXFYwZitbj7K0P0Y= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33 h1:LYqFq+6Cj2D0gFfrJvL7iElD4ET6ir3VDdhDdTK7rgc=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33/go.mod h1:soWkSNf2tZC7aMibXEqVhCd73GOY5fJikn8qbdzemB0=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8=

View File

@ -748,6 +748,7 @@ message StatefulSetStatus {
// Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
// This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.
// +optional
optional int32 availableReplicas = 11; optional int32 availableReplicas = 11;
} }

1
vendor/k8s.io/api/apps/v1/types.go generated vendored
View File

@ -272,6 +272,7 @@ type StatefulSetStatus struct {
// Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
// This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.
// +optional
AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"`
} }

View File

@ -502,6 +502,7 @@ message StatefulSetStatus {
// Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
// This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.
// +optional
optional int32 availableReplicas = 11; optional int32 availableReplicas = 11;
} }

View File

@ -310,6 +310,7 @@ type StatefulSetStatus struct {
// Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
// This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.
// +optional
AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"`
} }

View File

@ -790,6 +790,7 @@ message StatefulSetStatus {
// Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
// This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.
// +optional
optional int32 availableReplicas = 11; optional int32 availableReplicas = 11;
} }

View File

@ -320,6 +320,7 @@ type StatefulSetStatus struct {
// Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
// This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate. // This is a beta field and enabled/disabled by StatefulSetMinReadySeconds feature gate.
// +optional
AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"` AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,11,opt,name=availableReplicas"`
} }

View File

@ -289,10 +289,9 @@ type UncountedTerminatedPods struct {
Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"` Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"`
} }
// +enum
type JobConditionType string type JobConditionType string
// These are valid conditions of a job. // These are built-in conditions of a job.
const ( const (
// JobSuspended means the job has been suspended. // JobSuspended means the job has been suspended.
JobSuspended JobConditionType = "Suspended" JobSuspended JobConditionType = "Suspended"

View File

@ -214,7 +214,6 @@ type CertificateSigningRequestStatus struct {
} }
// RequestConditionType is the type of a CertificateSigningRequestCondition // RequestConditionType is the type of a CertificateSigningRequestCondition
// +enum
type RequestConditionType string type RequestConditionType string
// Well-known condition types for certificate requests. // Well-known condition types for certificate requests.

19
vendor/k8s.io/api/core/v1/types.go generated vendored
View File

@ -522,7 +522,6 @@ type PersistentVolumeClaimSpec struct {
} }
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
// +enum
type PersistentVolumeClaimConditionType string type PersistentVolumeClaimConditionType string
const ( const (
@ -2635,10 +2634,9 @@ const (
) )
// PodConditionType is a valid value for PodCondition.Type // PodConditionType is a valid value for PodCondition.Type
// +enum
type PodConditionType string type PodConditionType string
// These are valid conditions of pod. // These are built-in conditions of pod. An application may use a custom condition not listed here.
const ( const (
// ContainersReady indicates whether all containers in the pod are ready. // ContainersReady indicates whether all containers in the pod are ready.
ContainersReady PodConditionType = "ContainersReady" ContainersReady PodConditionType = "ContainersReady"
@ -5055,11 +5053,10 @@ const (
NodeTerminated NodePhase = "Terminated" NodeTerminated NodePhase = "Terminated"
) )
// +enum
type NodeConditionType string type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide // These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here.
// node condition. In the future, we will add more. The proposed set of conditions are: // The built-in set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. // NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const ( const (
// NodeReady means kubelet is healthy and ready to accept pods. // NodeReady means kubelet is healthy and ready to accept pods.
@ -5094,10 +5091,9 @@ type NodeCondition struct {
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
} }
// +enum
type NodeAddressType string type NodeAddressType string
// These are valid address type of node. // These are built-in addresses type of node. A cloud provider may set a type not listed here.
const ( const (
// NodeHostName identifies a name of the node. Although every node can be assumed // NodeHostName identifies a name of the node. Although every node can be assumed
// to have a NodeAddress of this type, its exact syntax and semantics are not // to have a NodeAddress of this type, its exact syntax and semantics are not
@ -5267,10 +5263,9 @@ const (
NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating" NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating"
) )
// +enum
type NamespaceConditionType string type NamespaceConditionType string
// These are valid conditions of a namespace. // These are built-in conditions of a namespace.
const ( const (
// NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery.
NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure"
@ -5758,8 +5753,8 @@ type EventList struct {
// List holds a list of objects, which may not be known by the server. // List holds a list of objects, which may not be known by the server.
type List metav1.List type List metav1.List
// LimitType is a type of object that is limited // LimitType is a type of object that is limited. It can be Pod, Container, PersistentVolumeClaim or
// +enum // a fully qualified resource name.
type LimitType string type LimitType string
const ( const (

View File

@ -144,7 +144,6 @@ message PodDisruptionBudgetSpec {
// A null selector selects no pods. // A null selector selects no pods.
// An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods.
// In policy/v1, an empty selector will select all pods in the namespace. // In policy/v1, an empty selector will select all pods in the namespace.
// +patchStrategy=replace
// +optional // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;

View File

@ -36,9 +36,8 @@ type PodDisruptionBudgetSpec struct {
// A null selector selects no pods. // A null selector selects no pods.
// An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods. // An empty selector ({}) also selects no pods, which differs from standard behavior of selecting all pods.
// In policy/v1, an empty selector will select all pods in the namespace. // In policy/v1, an empty selector will select all pods in the namespace.
// +patchStrategy=replace
// +optional // +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" patchStrategy:"replace" protobuf:"bytes,2,opt,name=selector"` Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
// An eviction is allowed if at most "maxUnavailable" pods selected by // An eviction is allowed if at most "maxUnavailable" pods selected by
// "selector" are unavailable after the eviction, i.e. even in absence of // "selector" are unavailable after the eviction, i.e. even in absence of

View File

@ -27,9 +27,12 @@ func (m *MicroTime) ProtoMicroTime() *Timestamp {
if m == nil { if m == nil {
return &Timestamp{} return &Timestamp{}
} }
// truncate precision to microseconds to match JSON marshaling/unmarshaling
truncatedNanoseconds := time.Duration(m.Time.Nanosecond()).Truncate(time.Microsecond)
return &Timestamp{ return &Timestamp{
Seconds: m.Time.Unix(), Seconds: m.Time.Unix(),
Nanos: int32(m.Time.Nanosecond()), Nanos: int32(truncatedNanoseconds),
} }
} }
@ -51,7 +54,10 @@ func (m *MicroTime) Unmarshal(data []byte) error {
if err := p.Unmarshal(data); err != nil { if err := p.Unmarshal(data); err != nil {
return err return err
} }
m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local()
// truncate precision to microseconds to match JSON marshaling/unmarshaling
truncatedNanoseconds := time.Duration(p.Nanos).Truncate(time.Microsecond)
m.Time = time.Unix(p.Seconds, int64(truncatedNanoseconds)).Local()
return nil return nil
} }

View File

@ -48,6 +48,7 @@ func (u union) ProcessEvents(events ...*auditinternal.Event) bool {
func (u union) Run(stopCh <-chan struct{}) error { func (u union) Run(stopCh <-chan struct{}) error {
var funcs []func() error var funcs []func() error
for _, backend := range u.backends { for _, backend := range u.backends {
backend := backend
funcs = append(funcs, func() error { funcs = append(funcs, func() error {
return backend.Run(stopCh) return backend.Run(stopCh)
}) })

View File

@ -413,6 +413,10 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients // share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy) httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil { if err != nil {

View File

@ -1 +0,0 @@
base.go export-subst

View File

@ -201,7 +201,6 @@ func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecC
now: time.Now, now: time.Now,
environ: os.Environ, environ: os.Environ,
defaultDialer: defaultDialer,
connTracker: connTracker, connTracker: connTracker,
} }
@ -209,6 +208,11 @@ func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecC
a.env = append(a.env, env.Name+"="+env.Value) a.env = append(a.env, env.Name+"="+env.Value)
} }
// these functions are made comparable and stored in the cache so that repeated clientset
// construction with the same rest.Config results in a single TLS cache and Authenticator
a.getCert = &transport.GetCertHolder{GetCert: a.cert}
a.dial = &transport.DialHolder{Dial: defaultDialer.DialContext}
return c.put(key, a), nil return c.put(key, a), nil
} }
@ -263,8 +267,6 @@ type Authenticator struct {
now func() time.Time now func() time.Time
environ func() []string environ func() []string
// defaultDialer is used for clients which don't specify a custom dialer
defaultDialer *connrotation.Dialer
// connTracker tracks all connections opened that we need to close when rotating a client certificate // connTracker tracks all connections opened that we need to close when rotating a client certificate
connTracker *connrotation.ConnectionTracker connTracker *connrotation.ConnectionTracker
@ -275,6 +277,12 @@ type Authenticator struct {
mu sync.Mutex mu sync.Mutex
cachedCreds *credentials cachedCreds *credentials
exp time.Time exp time.Time
// getCert makes Authenticator.cert comparable to support TLS config caching
getCert *transport.GetCertHolder
// dial is used for clients which do not specify a custom dialer
// it is comparable to support TLS config caching
dial *transport.DialHolder
} }
type credentials struct { type credentials struct {
@ -302,17 +310,19 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
if c.TLS.GetCert != nil { if c.TLS.GetCert != nil {
return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set")
} }
c.TLS.GetCert = a.cert c.TLS.GetCert = a.getCert.GetCert
c.TLS.GetCertHolder = a.getCert // comparable for TLS config caching
var d *connrotation.Dialer
if c.Dial != nil { if c.Dial != nil {
// if c has a custom dialer, we have to wrap it // if c has a custom dialer, we have to wrap it
d = connrotation.NewDialerWithTracker(c.Dial, a.connTracker) // TLS config caching is not supported for this config
} else { d := connrotation.NewDialerWithTracker(c.Dial, a.connTracker)
d = a.defaultDialer
}
c.Dial = d.DialContext c.Dial = d.DialContext
c.DialHolder = nil
} else {
c.Dial = a.dial.Dial
c.DialHolder = a.dial // comparable for TLS config caching
}
return nil return nil
} }

View File

@ -82,6 +82,12 @@ func (r *RequestConstructionError) Error() string {
var noBackoff = &NoBackoff{} var noBackoff = &NoBackoff{}
type requestRetryFunc func(maxRetries int) WithRetry
func defaultRequestRetryFn(maxRetries int) WithRetry {
return &withRetry{maxRetries: maxRetries}
}
// Request allows for building up a request to a server in a chained fashion. // Request allows for building up a request to a server in a chained fashion.
// Any errors are stored until the end of your call, so you only have to // Any errors are stored until the end of your call, so you only have to
// check once. // check once.
@ -93,6 +99,7 @@ type Request struct {
rateLimiter flowcontrol.RateLimiter rateLimiter flowcontrol.RateLimiter
backoff BackoffManager backoff BackoffManager
timeout time.Duration timeout time.Duration
maxRetries int
// generic components accessible via method setters // generic components accessible via method setters
verb string verb string
@ -111,7 +118,8 @@ type Request struct {
// output // output
err error err error
body io.Reader body io.Reader
retry WithRetry
retryFn requestRetryFunc
} }
// NewRequest creates a new request helper object for accessing runtime.Objects on a server. // NewRequest creates a new request helper object for accessing runtime.Objects on a server.
@ -142,7 +150,8 @@ func NewRequest(c *RESTClient) *Request {
backoff: backoff, backoff: backoff,
timeout: timeout, timeout: timeout,
pathPrefix: pathPrefix, pathPrefix: pathPrefix,
retry: &withRetry{maxRetries: 10}, maxRetries: 10,
retryFn: defaultRequestRetryFn,
warningHandler: c.warningHandler, warningHandler: c.warningHandler,
} }
@ -408,7 +417,10 @@ func (r *Request) Timeout(d time.Duration) *Request {
// function is specifically called with a different value. // function is specifically called with a different value.
// A zero maxRetries prevent it from doing retires and return an error immediately. // A zero maxRetries prevent it from doing retires and return an error immediately.
func (r *Request) MaxRetries(maxRetries int) *Request { func (r *Request) MaxRetries(maxRetries int) *Request {
r.retry.SetMaxRetries(maxRetries) if maxRetries < 0 {
maxRetries = 0
}
r.maxRetries = maxRetries
return r return r
} }
@ -688,8 +700,10 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
} }
return false return false
} }
var retryAfter *RetryAfter var retryAfter *RetryAfter
url := r.URL().String() url := r.URL().String()
withRetry := r.retryFn(r.maxRetries)
for { for {
req, err := r.newHTTPRequest(ctx) req, err := r.newHTTPRequest(ctx)
if err != nil { if err != nil {
@ -724,9 +738,9 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
defer readAndCloseResponseBody(resp) defer readAndCloseResponseBody(resp)
var retry bool var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, isErrRetryableFunc) retryAfter, retry = withRetry.NextRetry(req, resp, err, isErrRetryableFunc)
if retry { if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body) err := withRetry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
if err == nil { if err == nil {
return false, nil return false, nil
} }
@ -817,6 +831,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
} }
var retryAfter *RetryAfter var retryAfter *RetryAfter
withRetry := r.retryFn(r.maxRetries)
url := r.URL().String() url := r.URL().String()
for { for {
req, err := r.newHTTPRequest(ctx) req, err := r.newHTTPRequest(ctx)
@ -862,9 +877,9 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
defer resp.Body.Close() defer resp.Body.Close()
var retry bool var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, neverRetryError) retryAfter, retry = withRetry.NextRetry(req, resp, err, neverRetryError)
if retry { if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body) err := withRetry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
if err == nil { if err == nil {
return false, nil return false, nil
} }
@ -961,6 +976,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
// Right now we make about ten retry attempts if we get a Retry-After response. // Right now we make about ten retry attempts if we get a Retry-After response.
var retryAfter *RetryAfter var retryAfter *RetryAfter
withRetry := r.retryFn(r.maxRetries)
for { for {
req, err := r.newHTTPRequest(ctx) req, err := r.newHTTPRequest(ctx)
if err != nil { if err != nil {
@ -997,7 +1013,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
} }
var retry bool var retry bool
retryAfter, retry = r.retry.NextRetry(req, resp, err, func(req *http.Request, err error) bool { retryAfter, retry = withRetry.NextRetry(req, resp, err, func(req *http.Request, err error) bool {
// "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors. // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
// Thus in case of "GET" operations, we simply retry it. // Thus in case of "GET" operations, we simply retry it.
// We are not automatically retrying "write" operations, as they are not idempotent. // We are not automatically retrying "write" operations, as they are not idempotent.
@ -1011,7 +1027,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
return false return false
}) })
if retry { if retry {
err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String(), r.body) err := withRetry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String(), r.body)
if err == nil { if err == nil {
return false return false
} }

View File

@ -284,18 +284,15 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
c.indices[name] = index c.indices[name] = index
} }
for _, value := range oldIndexValues { if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] {
// We optimize for the most common case where index returns a single value. // We optimize for the most common case where indexFunc returns a single value which has not been changed
if len(indexValues) == 1 && value == indexValues[0] {
continue continue
} }
for _, value := range oldIndexValues {
c.deleteKeyFromIndex(key, value, index) c.deleteKeyFromIndex(key, value, index)
} }
for _, value := range indexValues { for _, value := range indexValues {
// We optimize for the most common case where index returns a single value.
if len(oldIndexValues) == 1 && value == oldIndexValues[0] {
continue
}
c.addKeyToIndex(key, value, index) c.addKeyToIndex(key, value, index)
} }
} }

View File

@ -51,10 +51,10 @@ func (a *PromptingAuthLoader) LoadAuth(path string) (*clientauth.Info, error) {
// Prompt for user/pass and write a file if none exists. // Prompt for user/pass and write a file if none exists.
if _, err := os.Stat(path); os.IsNotExist(err) { if _, err := os.Stat(path); os.IsNotExist(err) {
authPtr, err := a.Prompt() authPtr, err := a.Prompt()
auth := *authPtr
if err != nil { if err != nil {
return nil, err return nil, err
} }
auth := *authPtr
data, err := json.Marshal(auth) data, err := json.Marshal(auth)
if err != nil { if err != nil {
return &auth, err return &auth, err

View File

@ -17,6 +17,7 @@ limitations under the License.
package transport package transport
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"net/http" "net/http"
@ -50,6 +51,9 @@ type tlsCacheKey struct {
serverName string serverName string
nextProtos string nextProtos string
disableCompression bool disableCompression bool
// these functions are wrapped to allow them to be used as map keys
getCert *GetCertHolder
dial *DialHolder
} }
func (t tlsCacheKey) String() string { func (t tlsCacheKey) String() string {
@ -57,7 +61,8 @@ func (t tlsCacheKey) String() string {
if len(t.keyData) > 0 { if len(t.keyData) > 0 {
keyText = "<redacted>" keyText = "<redacted>"
} }
return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t", t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression) return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, serverName:%s, disableCompression:%t, getCert:%p, dial:%p",
t.insecure, t.caData, t.certData, keyText, t.serverName, t.disableCompression, t.getCert, t.dial)
} }
func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
@ -87,8 +92,10 @@ func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) {
return http.DefaultTransport, nil return http.DefaultTransport, nil
} }
dial := config.Dial var dial func(ctx context.Context, network, address string) (net.Conn, error)
if dial == nil { if config.Dial != nil {
dial = config.Dial
} else {
dial = (&net.Dialer{ dial = (&net.Dialer{
Timeout: 30 * time.Second, Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second, KeepAlive: 30 * time.Second,
@ -133,10 +140,18 @@ func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) {
return tlsCacheKey{}, false, err return tlsCacheKey{}, false, err
} }
if c.TLS.GetCert != nil || c.Dial != nil || c.Proxy != nil { if c.Proxy != nil {
// cannot determine equality for functions // cannot determine equality for functions
return tlsCacheKey{}, false, nil return tlsCacheKey{}, false, nil
} }
if c.Dial != nil && c.DialHolder == nil {
// cannot determine equality for dial function that doesn't have non-nil DialHolder set as well
return tlsCacheKey{}, false, nil
}
if c.TLS.GetCert != nil && c.TLS.GetCertHolder == nil {
// cannot determine equality for getCert function that doesn't have non-nil GetCertHolder set as well
return tlsCacheKey{}, false, nil
}
k := tlsCacheKey{ k := tlsCacheKey{
insecure: c.TLS.Insecure, insecure: c.TLS.Insecure,
@ -144,6 +159,8 @@ func tlsConfigKey(c *Config) (tlsCacheKey, bool, error) {
serverName: c.TLS.ServerName, serverName: c.TLS.ServerName,
nextProtos: strings.Join(c.TLS.NextProtos, ","), nextProtos: strings.Join(c.TLS.NextProtos, ","),
disableCompression: c.DisableCompression, disableCompression: c.DisableCompression,
getCert: c.TLS.GetCertHolder,
dial: c.DialHolder,
} }
if c.TLS.ReloadTLSFiles { if c.TLS.ReloadTLSFiles {

View File

@ -68,7 +68,11 @@ type Config struct {
WrapTransport WrapperFunc WrapTransport WrapperFunc
// Dial specifies the dial function for creating unencrypted TCP connections. // Dial specifies the dial function for creating unencrypted TCP connections.
// If specified, this transport will be non-cacheable unless DialHolder is also set.
Dial func(ctx context.Context, network, address string) (net.Conn, error) Dial func(ctx context.Context, network, address string) (net.Conn, error)
// DialHolder can be populated to make transport configs cacheable.
// If specified, DialHolder.Dial must be equal to Dial.
DialHolder *DialHolder
// Proxy is the proxy func to be used for all requests made by this // Proxy is the proxy func to be used for all requests made by this
// transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy // transport. If Proxy is nil, http.ProxyFromEnvironment is used. If Proxy
@ -78,6 +82,11 @@ type Config struct {
Proxy func(*http.Request) (*url.URL, error) Proxy func(*http.Request) (*url.URL, error)
} }
// DialHolder is used to make the wrapped function comparable so that it can be used as a map key.
type DialHolder struct {
Dial func(ctx context.Context, network, address string) (net.Conn, error)
}
// ImpersonationConfig has all the available impersonation options // ImpersonationConfig has all the available impersonation options
type ImpersonationConfig struct { type ImpersonationConfig struct {
// UserName matches user.Info.GetName() // UserName matches user.Info.GetName()
@ -143,5 +152,15 @@ type TLSConfig struct {
// To use only http/1.1, set to ["http/1.1"]. // To use only http/1.1, set to ["http/1.1"].
NextProtos []string NextProtos []string
GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field.
// If specified, this transport is non-cacheable unless CertHolder is populated.
GetCert func() (*tls.Certificate, error)
// CertHolder can be populated to make transport configs that set GetCert cacheable.
// If set, CertHolder.GetCert must be equal to GetCert.
GetCertHolder *GetCertHolder
}
// GetCertHolder is used to make the wrapped function comparable so that it can be used as a map key.
type GetCertHolder struct {
GetCert func() (*tls.Certificate, error)
} }

View File

@ -24,6 +24,7 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"reflect"
"sync" "sync"
"time" "time"
@ -39,6 +40,10 @@ func New(config *Config) (http.RoundTripper, error) {
return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed")
} }
if !isValidHolders(config) {
return nil, fmt.Errorf("misconfigured holder for dialer or cert callback")
}
var ( var (
rt http.RoundTripper rt http.RoundTripper
err error err error
@ -56,6 +61,26 @@ func New(config *Config) (http.RoundTripper, error) {
return HTTPWrappersForConfig(config, rt) return HTTPWrappersForConfig(config, rt)
} }
func isValidHolders(config *Config) bool {
if config.TLS.GetCertHolder != nil {
if config.TLS.GetCertHolder.GetCert == nil ||
config.TLS.GetCert == nil ||
reflect.ValueOf(config.TLS.GetCertHolder.GetCert).Pointer() != reflect.ValueOf(config.TLS.GetCert).Pointer() {
return false
}
}
if config.DialHolder != nil {
if config.DialHolder.Dial == nil ||
config.Dial == nil ||
reflect.ValueOf(config.DialHolder.Dial).Pointer() != reflect.ValueOf(config.Dial).Pointer() {
return false
}
}
return true
}
// TLSConfigFor returns a tls.Config that will provide the transport level security defined // TLSConfigFor returns a tls.Config that will provide the transport level security defined
// by the provided Config. Will return nil if no transport level security is requested. // by the provided Config. Will return nil if no transport level security is requested.
func TLSConfigFor(c *Config) (*tls.Config, error) { func TLSConfigFor(c *Config) (*tls.Config, error) {

View File

@ -1 +0,0 @@
base.go export-subst

View File

@ -301,12 +301,28 @@ func IsPodReady(pod *v1.Pod) bool {
return IsPodReadyConditionTrue(pod.Status) return IsPodReadyConditionTrue(pod.Status)
} }
// IsPodTerminal returns true if a pod is terminal, all containers are stopped and cannot ever regress.
func IsPodTerminal(pod *v1.Pod) bool {
return IsPodPhaseTerminal(pod.Status.Phase)
}
// IsPhaseTerminal returns true if the pod's phase is terminal.
func IsPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise. // IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsPodReadyConditionTrue(status v1.PodStatus) bool { func IsPodReadyConditionTrue(status v1.PodStatus) bool {
condition := GetPodReadyCondition(status) condition := GetPodReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue return condition != nil && condition.Status == v1.ConditionTrue
} }
// IsContainersReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsContainersReadyConditionTrue(status v1.PodStatus) bool {
condition := GetContainersReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue
}
// GetPodReadyCondition extracts the pod ready condition from the given status and returns that. // GetPodReadyCondition extracts the pod ready condition from the given status and returns that.
// Returns nil if the condition is not present. // Returns nil if the condition is not present.
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition { func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
@ -314,6 +330,13 @@ func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
return condition return condition
} }
// GetContainersReadyCondition extracts the containers ready condition from the given status and returns that.
// Returns nil if the condition is not present.
func GetContainersReadyCondition(status v1.PodStatus) *v1.PodCondition {
_, condition := GetPodCondition(&status, v1.ContainersReady)
return condition
}
// GetPodCondition extracts the provided condition from the given status and returns that. // GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition. // Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) { func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {

View File

@ -246,6 +246,7 @@ type StatefulSetStatus struct {
// Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
// This is a beta field and requires enabling StatefulSetMinReadySeconds feature gate. // This is a beta field and requires enabling StatefulSetMinReadySeconds feature gate.
// +optional
AvailableReplicas int32 AvailableReplicas int32
} }

View File

@ -95,7 +95,18 @@ func ValidateEventUpdate(newEvent, oldEvent *core.Event, requestVersion schema.G
allErrs = append(allErrs, ValidateImmutableField(newEvent.Count, oldEvent.Count, field.NewPath("count"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.Count, oldEvent.Count, field.NewPath("count"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.Reason, oldEvent.Reason, field.NewPath("reason"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Type, oldEvent.Type, field.NewPath("type"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.Type, oldEvent.Type, field.NewPath("type"))...)
// Disallow changes to eventTime greater than microsecond-level precision.
// Tolerating sub-microsecond changes is required to tolerate updates
// from clients that correctly truncate to microsecond-precision when serializing,
// or from clients built with incorrect nanosecond-precision protobuf serialization.
// See https://github.com/kubernetes/kubernetes/issues/111928
newTruncated := newEvent.EventTime.Truncate(time.Microsecond).UTC()
oldTruncated := oldEvent.EventTime.Truncate(time.Microsecond).UTC()
if newTruncated != oldTruncated {
allErrs = append(allErrs, ValidateImmutableField(newEvent.EventTime, oldEvent.EventTime, field.NewPath("eventTime"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.EventTime, oldEvent.EventTime, field.NewPath("eventTime"))...)
}
allErrs = append(allErrs, ValidateImmutableField(newEvent.Action, oldEvent.Action, field.NewPath("action"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.Action, oldEvent.Action, field.NewPath("action"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.Related, oldEvent.Related, field.NewPath("related"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.Related, oldEvent.Related, field.NewPath("related"))...)
allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingController, oldEvent.ReportingController, field.NewPath("reportingController"))...) allErrs = append(allErrs, ValidateImmutableField(newEvent.ReportingController, oldEvent.ReportingController, field.NewPath("reportingController"))...)

View File

@ -1127,9 +1127,14 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str
// PatchNodeTaints patches node's taints. // PatchNodeTaints patches node's taints.
func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode) // Strip base diff node from RV to ensure that our Patch request will set RV to check for conflicts over .spec.taints.
// This is needed because .spec.taints does not specify patchMergeKey and patchStrategy and adding them is no longer an option for compatibility reasons.
// Using other Patch strategy works for adding new taints, however will not resolve problem with taint removal.
oldNodeNoRV := oldNode.DeepCopy()
oldNodeNoRV.ResourceVersion = ""
oldDataNoRV, err := json.Marshal(&oldNodeNoRV)
if err != nil { if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNodeNoRV, nodeName, err)
} }
newTaints := newNode.Spec.Taints newTaints := newNode.Spec.Taints
@ -1140,7 +1145,7 @@ func PatchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
} }
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldDataNoRV, newData, v1.Node{})
if err != nil { if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
} }

View File

@ -910,7 +910,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, ProcMountType: {Default: false, PreRelease: featuregate.Alpha},
TTLAfterFinished: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25 TTLAfterFinished: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
IndexedJob: {Default: true, PreRelease: featuregate.Beta}, IndexedJob: {Default: true, PreRelease: featuregate.Beta},
JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.Beta}, JobTrackingWithFinalizers: {Default: false, PreRelease: featuregate.Beta}, // Disabled due to #109485
JobReadyPods: {Default: false, PreRelease: featuregate.Alpha}, JobReadyPods: {Default: false, PreRelease: featuregate.Alpha},
KubeletPodResources: {Default: true, PreRelease: featuregate.Beta}, KubeletPodResources: {Default: true, PreRelease: featuregate.Beta},
LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha},

View File

@ -97,7 +97,7 @@ func IsProxyableIP(ip string) error {
} }
func isProxyableIP(ip net.IP) error { func isProxyableIP(ip net.IP) error {
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() { if !ip.IsGlobalUnicast() {
return ErrAddressNotAllowed return ErrAddressNotAllowed
} }
return nil return nil

View File

@ -561,18 +561,23 @@ func doSafeOpen(pathname string, base string) (int, error) {
// Follow the segments one by one using openat() to make // Follow the segments one by one using openat() to make
// sure the user cannot change already existing directories into symlinks. // sure the user cannot change already existing directories into symlinks.
for _, seg := range segments { for _, seg := range segments {
var deviceStat unix.Stat_t
currentPath = filepath.Join(currentPath, seg) currentPath = filepath.Join(currentPath, seg)
if !mount.PathWithinBase(currentPath, base) { if !mount.PathWithinBase(currentPath, base) {
return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base) return -1, fmt.Errorf("path %s is outside of allowed base %s", currentPath, base)
} }
// Trigger auto mount if it's an auto-mounted directory, ignore error if not a directory.
// Notice the trailing slash is mandatory, see "automount" in openat(2) and open_by_handle_at(2).
unix.Fstatat(parentFD, seg+"/", &deviceStat, unix.AT_SYMLINK_NOFOLLOW)
klog.V(5).Infof("Opening path %s", currentPath) klog.V(5).Infof("Opening path %s", currentPath)
childFD, err = syscall.Openat(parentFD, seg, openFDFlags|unix.O_CLOEXEC, 0) childFD, err = syscall.Openat(parentFD, seg, openFDFlags|unix.O_CLOEXEC, 0)
if err != nil { if err != nil {
return -1, fmt.Errorf("cannot open %s: %s", currentPath, err) return -1, fmt.Errorf("cannot open %s: %s", currentPath, err)
} }
var deviceStat unix.Stat_t
err := unix.Fstat(childFD, &deviceStat) err := unix.Fstat(childFD, &deviceStat)
if err != nil { if err != nil {
return -1, fmt.Errorf("error running fstat on %s with %v", currentPath, err) return -1, fmt.Errorf("error running fstat on %s with %v", currentPath, err)

View File

@ -248,6 +248,9 @@ func (f *Framework) BeforeEach() {
ginkgo.By("Waiting for a default service account to be provisioned in namespace") ginkgo.By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name) err = WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
ExpectNoError(err) ExpectNoError(err)
ginkgo.By("Waiting for kube-root-ca.crt to be provisioned in namespace")
err = WaitForKubeRootCAInNamespace(f.ClientSet, namespace.Name)
ExpectNoError(err)
} else { } else {
Logf("Skipping waiting for service account") Logf("Skipping waiting for service account")
} }

View File

@ -34,7 +34,7 @@ import (
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
) )
const etcdImage = "3.5.1-0" const etcdImage = "3.5.5-0"
// EtcdUpgrade upgrades etcd on GCE. // EtcdUpgrade upgrades etcd on GCE.
func EtcdUpgrade(targetStorage, targetVersion string) error { func EtcdUpgrade(targetStorage, targetVersion string) error {

View File

@ -316,7 +316,8 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.") flags.BoolVar(&TestContext.DumpSystemdJournal, "dump-systemd-journal", false, "Whether to dump the full systemd journal.")
flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.") flags.StringVar(&TestContext.ImageServiceEndpoint, "image-service-endpoint", "", "The image service endpoint of cluster VM instances.")
flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.") flags.StringVar(&TestContext.DockershimCheckpointDir, "dockershim-checkpoint-dir", "/var/lib/dockershim/sandbox", "The directory for dockershim to store sandbox checkpoints.")
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests.") // Expect the test suite to work with both the new and legacy non-blocking control plane taints by default
flags.StringVar(&TestContext.NonblockingTaints, "non-blocking-taints", `node-role.kubernetes.io/control-plane,node-role.kubernetes.io/master`, "Nodes with taints in this comma-delimited list will not block the test framework from starting tests. The default taint 'node-role.kubernetes.io/master' is DEPRECATED and will be removed from the list in a future release.")
flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.") flags.BoolVar(&TestContext.ListImages, "list-images", false, "If true, will show list of images used for runnning tests.")
flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.") flags.BoolVar(&TestContext.ListConformanceTests, "list-conformance-tests", false, "If true, will show list of conformance tests.")

View File

@ -282,6 +282,32 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
}) })
} }
func waitForConfigMapInNamespace(c clientset.Interface, ns, name string, timeout time.Duration) error {
fieldSelector := fields.OneTermEqualSelector("metadata.name", name).String()
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (object runtime.Object, e error) {
options.FieldSelector = fieldSelector
return c.CoreV1().ConfigMaps(ns).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
options.FieldSelector = fieldSelector
return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), options)
},
}
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel()
_, err := watchtools.UntilWithSync(ctx, lw, &v1.ConfigMap{}, nil, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, apierrors.NewNotFound(schema.GroupResource{Resource: "configmaps"}, name)
case watch.Added, watch.Modified:
return true, nil
}
return false, nil
})
return err
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
fieldSelector := fields.OneTermEqualSelector("metadata.name", serviceAccountName).String() fieldSelector := fields.OneTermEqualSelector("metadata.name", serviceAccountName).String()
lw := &cache.ListWatch{ lw := &cache.ListWatch{
@ -296,22 +322,16 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN
} }
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout) ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), timeout)
defer cancel() defer cancel()
_, err := watchtools.UntilWithSync(ctx, lw, &v1.ServiceAccount{}, nil, serviceAccountHasSecrets) _, err := watchtools.UntilWithSync(ctx, lw, &v1.ServiceAccount{}, nil, func(event watch.Event) (bool, error) {
return err
}
// serviceAccountHasSecrets returns true if the service account has at least one secret,
// false if it does not, or an error.
func serviceAccountHasSecrets(event watch.Event) (bool, error) {
switch event.Type { switch event.Type {
case watch.Deleted: case watch.Deleted:
return false, apierrors.NewNotFound(schema.GroupResource{Resource: "serviceaccounts"}, "") return false, apierrors.NewNotFound(schema.GroupResource{Resource: "serviceaccounts"}, serviceAccountName)
} case watch.Added, watch.Modified:
switch t := event.Object.(type) { return true, nil
case *v1.ServiceAccount:
return len(t.Secrets) > 0, nil
} }
return false, nil return false, nil
})
return err
} }
// WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned // WaitForDefaultServiceAccountInNamespace waits for the default service account to be provisioned
@ -321,6 +341,13 @@ func WaitForDefaultServiceAccountInNamespace(c clientset.Interface, namespace st
return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout) return waitForServiceAccountInNamespace(c, namespace, "default", ServiceAccountProvisionTimeout)
} }
// WaitForKubeRootCAInNamespace waits for the configmap kube-root-ca.crt containing the service account
// CA trust bundle to be provisioned in the specified namespace so that pods do not have to retry mounting
// the config map (which creates noise that hides other issues in the Kubelet).
func WaitForKubeRootCAInNamespace(c clientset.Interface, namespace string) error {
return waitForConfigMapInNamespace(c, namespace, "kube-root-ca.crt", ServiceAccountProvisionTimeout)
}
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name. // CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly. // Please see NewFramework instead of using this directly.
func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) { func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {

View File

@ -17,10 +17,13 @@ limitations under the License.
package image package image
import ( import (
"bufio"
"bytes"
"crypto/sha256" "crypto/sha256"
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io/ioutil" "io"
"net/http"
"os" "os"
"regexp" "regexp"
"strings" "strings"
@ -73,9 +76,20 @@ func initReg() RegistryList {
return registry return registry
} }
fileContent, err := ioutil.ReadFile(repoList) var fileContent []byte
var err error
if strings.HasPrefix(repoList, "https://") || strings.HasPrefix(repoList, "http://") {
var b bytes.Buffer
err = readFromURL(repoList, bufio.NewWriter(&b))
if err != nil { if err != nil {
panic(fmt.Errorf("Error reading '%v' file contents: %v", repoList, err)) panic(fmt.Errorf("error reading '%v' url contents: %v", repoList, err))
}
fileContent = b.Bytes()
} else {
fileContent, err = os.ReadFile(repoList)
if err != nil {
panic(fmt.Errorf("error reading '%v' file contents: %v", repoList, err))
}
} }
err = yaml.Unmarshal(fileContent, &registry) err = yaml.Unmarshal(fileContent, &registry)
@ -85,6 +99,27 @@ func initReg() RegistryList {
return registry return registry
} }
// Essentially curl url | writer
func readFromURL(url string, writer io.Writer) error {
httpTransport := new(http.Transport)
httpTransport.Proxy = http.ProxyFromEnvironment
c := &http.Client{Transport: httpTransport}
r, err := c.Get(url)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode >= 400 {
return fmt.Errorf("%v returned %d", url, r.StatusCode)
}
_, err = io.Copy(writer, r.Body)
if err != nil {
return err
}
return nil
}
var ( var (
initRegistry = RegistryList{ initRegistry = RegistryList{
GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling", GcAuthenticatedRegistry: "gcr.io/authenticated-image-pulling",
@ -198,7 +233,7 @@ const (
func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) { func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
configs := map[int]Config{} configs := map[int]Config{}
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.33"} configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.39"}
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
@ -210,7 +245,7 @@ func initImageConfigs(list RegistryList) (map[int]Config, map[int]Config) {
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.2"}
configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "bullseye-v1.1.0"} configs[DebianIptables] = Config{list.BuildImageRegistry, "debian-iptables", "bullseye-v1.1.0"}
configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.4"} configs[EchoServer] = Config{list.PromoterE2eRegistry, "echoserver", "2.4"}
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.1-0"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.5-0"}
configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"} configs[GlusterDynamicProvisioner] = Config{list.PromoterE2eRegistry, "glusterdynamic-provisioner", "v1.3"}
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-2"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-2"}
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-2"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-2"}

76
vendor/modules.txt vendored
View File

@ -466,7 +466,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1 # gopkg.in/yaml.v3 v3.0.1
## explicit ## explicit
gopkg.in/yaml.v3 gopkg.in/yaml.v3
# k8s.io/api v0.23.3 => k8s.io/api v0.23.3 # k8s.io/api v0.23.14 => k8s.io/api v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/api/admission/v1 k8s.io/api/admission/v1
k8s.io/api/admission/v1beta1 k8s.io/api/admission/v1beta1
@ -515,7 +515,7 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1 k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1 k8s.io/api/storage/v1beta1
# k8s.io/apimachinery v0.23.3 => k8s.io/apimachinery v0.23.3 # k8s.io/apimachinery v0.23.14 => k8s.io/apimachinery v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/errors
@ -571,7 +571,7 @@ k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.23.3 => k8s.io/apiserver v0.23.3 # k8s.io/apiserver v0.23.14 => k8s.io/apiserver v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission
k8s.io/apiserver/pkg/admission/configuration k8s.io/apiserver/pkg/admission/configuration
@ -612,7 +612,7 @@ k8s.io/apiserver/pkg/util/feature
k8s.io/apiserver/pkg/util/webhook k8s.io/apiserver/pkg/util/webhook
k8s.io/apiserver/pkg/util/x509metrics k8s.io/apiserver/pkg/util/x509metrics
k8s.io/apiserver/pkg/warning k8s.io/apiserver/pkg/warning
# k8s.io/client-go v0.23.3 => k8s.io/client-go v0.23.3 # k8s.io/client-go v0.23.14 => k8s.io/client-go v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
@ -852,10 +852,10 @@ k8s.io/client-go/util/homedir
k8s.io/client-go/util/keyutil k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue k8s.io/client-go/util/workqueue
# k8s.io/cloud-provider v0.23.3 => k8s.io/cloud-provider v0.23.3 # k8s.io/cloud-provider v0.23.14 => k8s.io/cloud-provider v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/cloud-provider k8s.io/cloud-provider
# k8s.io/component-base v0.23.3 => k8s.io/component-base v0.23.3 # k8s.io/component-base v0.23.14 => k8s.io/component-base v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/component-base/cli/flag k8s.io/component-base/cli/flag
k8s.io/component-base/config k8s.io/component-base/config
@ -865,7 +865,7 @@ k8s.io/component-base/metrics/legacyregistry
k8s.io/component-base/metrics/testutil k8s.io/component-base/metrics/testutil
k8s.io/component-base/traces k8s.io/component-base/traces
k8s.io/component-base/version k8s.io/component-base/version
# k8s.io/component-helpers v0.23.3 => k8s.io/component-helpers v0.23.3 # k8s.io/component-helpers v0.23.14 => k8s.io/component-helpers v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/component-helpers/node/util/sysctl k8s.io/component-helpers/node/util/sysctl
k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1
@ -882,14 +882,14 @@ k8s.io/klog/v2/internal/severity
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemaconv
k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/proto
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.23.3 # k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/scale
k8s.io/kubectl/pkg/util/podutils k8s.io/kubectl/pkg/util/podutils
# k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.23.3 # k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/kubelet/pkg/apis/stats/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1
# k8s.io/kubernetes v1.23.3 # k8s.io/kubernetes v1.23.14
## explicit; go 1.16 ## explicit; go 1.16
k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/legacyscheme
k8s.io/kubernetes/pkg/api/service k8s.io/kubernetes/pkg/api/service
@ -952,7 +952,7 @@ k8s.io/kubernetes/test/e2e/storage/podlogs
k8s.io/kubernetes/test/e2e/storage/utils k8s.io/kubernetes/test/e2e/storage/utils
k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils
k8s.io/kubernetes/test/utils/image k8s.io/kubernetes/test/utils/image
# k8s.io/mount-utils v0.23.3 => k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2 # k8s.io/mount-utils v0.23.14 => k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2
## explicit; go 1.19 ## explicit; go 1.19
k8s.io/mount-utils k8s.io/mount-utils
# k8s.io/utils v0.0.0-20221107191617-1a15be271d1d # k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
@ -972,7 +972,7 @@ k8s.io/utils/nsenter
k8s.io/utils/path k8s.io/utils/path
k8s.io/utils/pointer k8s.io/utils/pointer
k8s.io/utils/trace k8s.io/utils/trace
# sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27 # sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.33
## explicit; go 1.17 ## explicit; go 1.17
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
@ -989,30 +989,30 @@ sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.2.0 # sigs.k8s.io/yaml v1.2.0
## explicit; go 1.12 ## explicit; go 1.12
sigs.k8s.io/yaml sigs.k8s.io/yaml
# k8s.io/api => k8s.io/api v0.23.3 # k8s.io/api => k8s.io/api v0.23.14
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3 # k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.14
# k8s.io/apimachinery => k8s.io/apimachinery v0.23.3 # k8s.io/apimachinery => k8s.io/apimachinery v0.23.14
# k8s.io/apiserver => k8s.io/apiserver v0.23.3 # k8s.io/apiserver => k8s.io/apiserver v0.23.14
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3 # k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.14
# k8s.io/client-go => k8s.io/client-go v0.23.3 # k8s.io/client-go => k8s.io/client-go v0.23.14
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3 # k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.14
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3 # k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.14
# k8s.io/code-generator => k8s.io/code-generator v0.23.3 # k8s.io/code-generator => k8s.io/code-generator v0.23.14
# k8s.io/component-base => k8s.io/component-base v0.23.3 # k8s.io/component-base => k8s.io/component-base v0.23.14
# k8s.io/component-helpers => k8s.io/component-helpers v0.23.3 # k8s.io/component-helpers => k8s.io/component-helpers v0.23.14
# k8s.io/controller-manager => k8s.io/controller-manager v0.23.3 # k8s.io/controller-manager => k8s.io/controller-manager v0.23.14
# k8s.io/cri-api => k8s.io/cri-api v0.23.3 # k8s.io/cri-api => k8s.io/cri-api v0.23.14
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3 # k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.14
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3 # k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.14
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3 # k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.14
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3 # k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.14
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3 # k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.14
# k8s.io/kubectl => k8s.io/kubectl v0.23.3 # k8s.io/kubectl => k8s.io/kubectl v0.23.14
# k8s.io/kubelet => k8s.io/kubelet v0.23.3 # k8s.io/kubelet => k8s.io/kubelet v0.23.14
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3 # k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.14
# k8s.io/metrics => k8s.io/metrics v0.23.3 # k8s.io/metrics => k8s.io/metrics v0.23.14
# k8s.io/mount-utils => k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2 # k8s.io/mount-utils => k8s.io/mount-utils v0.0.0-20230103133730-1df1a57439e2
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3 # k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.14
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3 # k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.14
# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.3 # k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.23.14
# k8s.io/sample-controller => k8s.io/sample-controller v0.23.3 # k8s.io/sample-controller => k8s.io/sample-controller v0.23.14

View File

@ -24,6 +24,7 @@ import (
"math/rand" "math/rand"
"net" "net"
"sync" "sync"
"sync/atomic"
"time" "time"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -35,25 +36,101 @@ import (
type Tunnel interface { type Tunnel interface {
// Dial connects to the address on the named network, similar to // Dial connects to the address on the named network, similar to
// what net.Dial does. The only supported protocol is tcp. // what net.Dial does. The only supported protocol is tcp.
DialContext(ctx context.Context, protocol, address string) (net.Conn, error) DialContext(requestCtx context.Context, protocol, address string) (net.Conn, error)
// Done returns a channel that is closed when the tunnel is no longer serving any connections,
// and can no longer be used.
Done() <-chan struct{}
} }
type dialResult struct { type dialResult struct {
err string err *dialFailure
connid int64 connid int64
} }
type pendingDial struct {
// resultCh is the channel to send the dial result to
resultCh chan<- dialResult
// cancelCh is the channel closed when resultCh no longer has a receiver
cancelCh <-chan struct{}
}
// TODO: Replace with a generic implementation once it is safe to assume the client is built with go1.18+
type pendingDialManager struct {
pendingDials map[int64]pendingDial
mutex sync.RWMutex
}
func (p *pendingDialManager) add(dialID int64, pd pendingDial) {
p.mutex.Lock()
defer p.mutex.Unlock()
p.pendingDials[dialID] = pd
}
func (p *pendingDialManager) remove(dialID int64) {
p.mutex.Lock()
defer p.mutex.Unlock()
delete(p.pendingDials, dialID)
}
func (p *pendingDialManager) get(dialID int64) (pendingDial, bool) {
p.mutex.RLock()
defer p.mutex.RUnlock()
pd, ok := p.pendingDials[dialID]
return pd, ok
}
// TODO: Replace with a generic implementation once it is safe to assume the client is built with go1.18+
type connectionManager struct {
conns map[int64]*conn
mutex sync.RWMutex
}
func (cm *connectionManager) add(connID int64, c *conn) {
cm.mutex.Lock()
defer cm.mutex.Unlock()
cm.conns[connID] = c
}
func (cm *connectionManager) remove(connID int64) {
cm.mutex.Lock()
defer cm.mutex.Unlock()
delete(cm.conns, connID)
}
func (cm *connectionManager) get(connID int64) (*conn, bool) {
cm.mutex.RLock()
defer cm.mutex.RUnlock()
c, ok := cm.conns[connID]
return c, ok
}
func (cm *connectionManager) closeAll() {
cm.mutex.Lock()
defer cm.mutex.Unlock()
for _, conn := range cm.conns {
close(conn.readCh)
}
}
// grpcTunnel implements Tunnel // grpcTunnel implements Tunnel
type grpcTunnel struct { type grpcTunnel struct {
stream client.ProxyService_ProxyClient stream client.ProxyService_ProxyClient
pendingDial map[int64]chan<- dialResult clientConn clientConn
conns map[int64]*conn pendingDial pendingDialManager
pendingDialLock sync.RWMutex conns connectionManager
connsLock sync.RWMutex
// The tunnel will be closed if the caller fails to read via conn.Read() // The tunnel will be closed if the caller fails to read via conn.Read()
// more than readTimeoutSeconds after a packet has been received. // more than readTimeoutSeconds after a packet has been received.
readTimeoutSeconds int readTimeoutSeconds int
// The done channel is closed after the tunnel has cleaned up all connections and is no longer
// serving.
done chan struct{}
// closing is an atomic bool represented as a 0 or 1, and set to true when the tunnel is being closed.
// closing should only be accessed through atomic methods.
// TODO: switch this to an atomic.Bool once the client is exclusively buit with go1.19+
closing uint32
} }
type clientConn interface { type clientConn interface {
@ -66,37 +143,65 @@ var _ clientConn = &grpc.ClientConn{}
// gRPC based proxy service. // gRPC based proxy service.
// Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated // Currently, a single tunnel supports a single connection, and the tunnel is closed when the connection is terminated
// The Dial() method of the returned tunnel should only be called once // The Dial() method of the returned tunnel should only be called once
func CreateSingleUseGrpcTunnel(ctx context.Context, address string, opts ...grpc.DialOption) (Tunnel, error) { // Deprecated 2022-06-07: use CreateSingleUseGrpcTunnelWithContext
c, err := grpc.DialContext(ctx, address, opts...) func CreateSingleUseGrpcTunnel(tunnelCtx context.Context, address string, opts ...grpc.DialOption) (Tunnel, error) {
return CreateSingleUseGrpcTunnelWithContext(context.TODO(), tunnelCtx, address, opts...)
}
// CreateSingleUseGrpcTunnelWithContext creates a Tunnel to dial to a remote server through a
// gRPC based proxy service.
// Currently, a single tunnel supports a single connection.
// The tunnel is normally closed when the connection is terminated.
// If createCtx is cancelled before tunnel creation, an error will be returned.
// If tunnelCtx is cancelled while the tunnel is still in use, the tunnel (and any in flight connections) will be closed.
// The Dial() method of the returned tunnel should only be called once
func CreateSingleUseGrpcTunnelWithContext(createCtx, tunnelCtx context.Context, address string, opts ...grpc.DialOption) (Tunnel, error) {
c, err := grpc.DialContext(createCtx, address, opts...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
grpcClient := client.NewProxyServiceClient(c) grpcClient := client.NewProxyServiceClient(c)
stream, err := grpcClient.Proxy(ctx) stream, err := grpcClient.Proxy(tunnelCtx)
if err != nil { if err != nil {
c.Close()
return nil, err return nil, err
} }
tunnel := &grpcTunnel{ tunnel := newUnstartedTunnel(stream, c)
stream: stream,
pendingDial: make(map[int64]chan<- dialResult),
conns: make(map[int64]*conn),
readTimeoutSeconds: 10,
}
go tunnel.serve(c) go tunnel.serve(tunnelCtx)
return tunnel, nil return tunnel, nil
} }
func (t *grpcTunnel) serve(c clientConn) { func newUnstartedTunnel(stream client.ProxyService_ProxyClient, c clientConn) *grpcTunnel {
defer c.Close() return &grpcTunnel{
stream: stream,
clientConn: c,
pendingDial: pendingDialManager{pendingDials: make(map[int64]pendingDial)},
conns: connectionManager{conns: make(map[int64]*conn)},
readTimeoutSeconds: 10,
done: make(chan struct{}),
}
}
func (t *grpcTunnel) serve(tunnelCtx context.Context) {
defer func() {
t.clientConn.Close()
// A connection in t.conns after serve() returns means
// we never received a CLOSE_RSP for it, so we need to
// close any channels remaining for these connections.
t.conns.closeAll()
close(t.done)
}()
for { for {
pkt, err := t.stream.Recv() pkt, err := t.stream.Recv()
if err == io.EOF { if err == io.EOF || t.isClosing() {
return return
} }
if err != nil || pkt == nil { if err != nil || pkt == nil {
@ -109,25 +214,35 @@ func (t *grpcTunnel) serve(c clientConn) {
switch pkt.Type { switch pkt.Type {
case client.PacketType_DIAL_RSP: case client.PacketType_DIAL_RSP:
resp := pkt.GetDialResponse() resp := pkt.GetDialResponse()
t.pendingDialLock.RLock() pendingDial, ok := t.pendingDial.get(resp.Random)
ch, ok := t.pendingDial[resp.Random]
t.pendingDialLock.RUnlock()
if !ok { if !ok {
// If the DIAL_RSP does not match a pending dial, it means one of two things:
// 1. There was a second DIAL_RSP for the connection request (this is very unlikely but possible)
// 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context
//
// In either scenario, we should return here and close the tunnel as it is no longer needed.
klog.V(1).InfoS("DialResp not recognized; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random) klog.V(1).InfoS("DialResp not recognized; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random)
return return
} else { }
result := dialResult{
err: resp.Error, result := dialResult{connid: resp.ConnectID}
connid: resp.ConnectID, if resp.Error != "" {
result.err = &dialFailure{resp.Error, DialFailureEndpoint}
} }
select { select {
case ch <- result: // try to send to the result channel
default: case pendingDial.resultCh <- result:
klog.ErrorS(fmt.Errorf("blocked pending channel"), "Received second dial response for connection request", "connectionID", resp.ConnectID, "dialID", resp.Random) // unblock if the cancel channel is closed
// On multiple dial responses, avoid leaking serve goroutine. case <-pendingDial.cancelCh:
// Note: this condition can only be hit by a race condition where the
// DialContext() returns early (timeout) after the pendingDial is already
// fetched here, but before the result is sent.
klog.V(1).InfoS("Pending dial has been cancelled; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random)
return
case <-tunnelCtx.Done():
klog.V(1).InfoS("Tunnel has been closed; dropped", "connectionID", resp.ConnectID, "dialID", resp.Random)
return return
}
} }
if resp.Error != "" { if resp.Error != "" {
@ -135,14 +250,41 @@ func (t *grpcTunnel) serve(c clientConn) {
return return
} }
case client.PacketType_DIAL_CLS:
resp := pkt.GetCloseDial()
pendingDial, ok := t.pendingDial.get(resp.Random)
if !ok {
// If the DIAL_CLS does not match a pending dial, it means one of two things:
// 1. There was a DIAL_CLS receieved after a DIAL_RSP (unlikely but possible)
// 2. grpcTunnel.DialContext() returned early due to a dial timeout or the client canceling the context
//
// In either scenario, we should return here and close the tunnel as it is no longer needed.
klog.V(1).InfoS("DIAL_CLS after dial finished", "dialID", resp.Random)
} else {
result := dialResult{
err: &dialFailure{"dial closed", DialFailureDialClosed},
}
select {
case pendingDial.resultCh <- result:
case <-pendingDial.cancelCh:
// Note: this condition can only be hit by a race condition where the
// DialContext() returns early (timeout) after the pendingDial is already
// fetched here, but before the result is sent.
case <-tunnelCtx.Done():
}
}
return // Stop serving & close the tunnel.
case client.PacketType_DATA: case client.PacketType_DATA:
resp := pkt.GetData() resp := pkt.GetData()
// TODO: flow control // TODO: flow control
t.connsLock.RLock() conn, ok := t.conns.get(resp.ConnectID)
conn, ok := t.conns[resp.ConnectID]
t.connsLock.RUnlock()
if ok { if !ok {
klog.V(1).InfoS("Connection not recognized", "connectionID", resp.ConnectID)
continue
}
timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second) timer := time.NewTimer((time.Duration)(t.readTimeoutSeconds) * time.Second)
select { select {
case conn.readCh <- resp.Data: case conn.readCh <- resp.Data:
@ -150,47 +292,51 @@ func (t *grpcTunnel) serve(c clientConn) {
case <-timer.C: case <-timer.C:
klog.ErrorS(fmt.Errorf("timeout"), "readTimeout has been reached, the grpc connection to the proxy server will be closed", "connectionID", conn.connID, "readTimeoutSeconds", t.readTimeoutSeconds) klog.ErrorS(fmt.Errorf("timeout"), "readTimeout has been reached, the grpc connection to the proxy server will be closed", "connectionID", conn.connID, "readTimeoutSeconds", t.readTimeoutSeconds)
return return
case <-tunnelCtx.Done():
klog.V(1).InfoS("Tunnel has been closed, the grpc connection to the proxy server will be closed", "connectionID", conn.connID)
} }
} else {
klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID)
}
case client.PacketType_CLOSE_RSP: case client.PacketType_CLOSE_RSP:
resp := pkt.GetCloseResponse() resp := pkt.GetCloseResponse()
t.connsLock.RLock() conn, ok := t.conns.get(resp.ConnectID)
conn, ok := t.conns[resp.ConnectID]
t.connsLock.RUnlock()
if ok { if !ok {
klog.V(1).InfoS("Connection not recognized", "connectionID", resp.ConnectID)
continue
}
close(conn.readCh) close(conn.readCh)
conn.closeCh <- resp.Error conn.closeCh <- resp.Error
close(conn.closeCh) close(conn.closeCh)
t.connsLock.Lock() t.conns.remove(resp.ConnectID)
delete(t.conns, resp.ConnectID)
t.connsLock.Unlock()
return return
} }
klog.V(1).InfoS("connection not recognized", "connectionID", resp.ConnectID)
}
} }
} }
// Dial connects to the address on the named network, similar to // Dial connects to the address on the named network, similar to
// what net.Dial does. The only supported protocol is tcp. // what net.Dial does. The only supported protocol is tcp.
func (t *grpcTunnel) DialContext(ctx context.Context, protocol, address string) (net.Conn, error) { func (t *grpcTunnel) DialContext(requestCtx context.Context, protocol, address string) (net.Conn, error) {
select {
case <-t.done:
return nil, errors.New("tunnel is closed")
default: // Tunnel is open, carry on.
}
if protocol != "tcp" { if protocol != "tcp" {
return nil, errors.New("protocol not supported") return nil, errors.New("protocol not supported")
} }
random := rand.Int63() /* #nosec G404 */ random := rand.Int63() /* #nosec G404 */
resCh := make(chan dialResult, 1)
t.pendingDialLock.Lock() // This channel is closed once we're returning and no longer waiting on resultCh
t.pendingDial[random] = resCh cancelCh := make(chan struct{})
t.pendingDialLock.Unlock() defer close(cancelCh)
defer func() {
t.pendingDialLock.Lock() // This channel MUST NOT be buffered. The sender needs to know when we are not receiving things, so they can abort.
delete(t.pendingDial, random) resCh := make(chan dialResult)
t.pendingDialLock.Unlock()
}() t.pendingDial.add(random, pendingDial{resultCh: resCh, cancelCh: cancelCh})
defer t.pendingDial.remove(random)
req := &client.Packet{ req := &client.Packet{
Type: client.PacketType_DIAL_REQ, Type: client.PacketType_DIAL_REQ,
@ -211,24 +357,98 @@ func (t *grpcTunnel) DialContext(ctx context.Context, protocol, address string)
klog.V(5).Infoln("DIAL_REQ sent to proxy server") klog.V(5).Infoln("DIAL_REQ sent to proxy server")
c := &conn{stream: t.stream, random: random} c := &conn{
stream: t.stream,
random: random,
closeTunnel: t.closeTunnel,
}
select { select {
case res := <-resCh: case res := <-resCh:
if res.err != "" { if res.err != nil {
return nil, errors.New(res.err) return nil, res.err
} }
c.connID = res.connid c.connID = res.connid
c.readCh = make(chan []byte, 10) c.readCh = make(chan []byte, 10)
c.closeCh = make(chan string, 1) c.closeCh = make(chan string, 1)
t.connsLock.Lock() t.conns.add(res.connid, c)
t.conns[res.connid] = c
t.connsLock.Unlock()
case <-time.After(30 * time.Second): case <-time.After(30 * time.Second):
return nil, errors.New("dial timeout, backstop") klog.V(5).InfoS("Timed out waiting for DialResp", "dialID", random)
case <-ctx.Done(): go t.closeDial(random)
return nil, errors.New("dial timeout, context") return nil, &dialFailure{"dial timeout, backstop", DialFailureTimeout}
case <-requestCtx.Done():
klog.V(5).InfoS("Context canceled waiting for DialResp", "ctxErr", requestCtx.Err(), "dialID", random)
go t.closeDial(random)
return nil, &dialFailure{"dial timeout, context", DialFailureContext}
case <-t.done:
klog.V(5).InfoS("Tunnel closed while waiting for DialResp", "dialID", random)
return nil, &dialFailure{"tunnel closed", DialFailureTunnelClosed}
} }
return c, nil return c, nil
} }
func (t *grpcTunnel) Done() <-chan struct{} {
return t.done
}
// Send a best-effort DIAL_CLS request for the given dial ID.
func (t *grpcTunnel) closeDial(dialID int64) {
req := &client.Packet{
Type: client.PacketType_DIAL_CLS,
Payload: &client.Packet_CloseDial{
CloseDial: &client.CloseDial{
Random: dialID,
},
},
}
if err := t.stream.Send(req); err != nil {
klog.V(5).InfoS("Failed to send DIAL_CLS", "err", err, "dialID", dialID)
}
t.closeTunnel()
}
func (t *grpcTunnel) closeTunnel() {
atomic.StoreUint32(&t.closing, 1)
t.clientConn.Close()
}
func (t *grpcTunnel) isClosing() bool {
return atomic.LoadUint32(&t.closing) != 0
}
func GetDialFailureReason(err error) (isDialFailure bool, reason DialFailureReason) {
var df *dialFailure
if errors.As(err, &df) {
return true, df.reason
}
return false, DialFailureUnknown
}
type dialFailure struct {
msg string
reason DialFailureReason
}
func (df *dialFailure) Error() string {
return df.msg
}
type DialFailureReason string
const (
DialFailureUnknown DialFailureReason = "unknown"
// DialFailureTimeout indicates the hard 30 second timeout was hit.
DialFailureTimeout DialFailureReason = "timeout"
// DialFailureContext indicates that the context was cancelled or reached it's deadline before
// the dial response was returned.
DialFailureContext DialFailureReason = "context"
// DialFailureEndpoint indicates that the konnectivity-agent was unable to reach the backend endpoint.
DialFailureEndpoint DialFailureReason = "endpoint"
// DialFailureDialClosed indicates that the client received a CloseDial response, indicating the
// connection was closed before the dial could complete.
DialFailureDialClosed DialFailureReason = "dialclosed"
// DialFailureTunnelClosed indicates that the client connection was closed before the dial could
// complete.
DialFailureTunnelClosed DialFailureReason = "tunnelclosed"
)

View File

@ -30,6 +30,8 @@ import (
// successful delivery of CLOSE_REQ. // successful delivery of CLOSE_REQ.
const CloseTimeout = 10 * time.Second const CloseTimeout = 10 * time.Second
var errConnCloseTimeout = errors.New("close timeout")
// conn is an implementation of net.Conn, where the data is transported // conn is an implementation of net.Conn, where the data is transported
// over an established tunnel defined by a gRPC service ProxyService. // over an established tunnel defined by a gRPC service ProxyService.
type conn struct { type conn struct {
@ -39,6 +41,9 @@ type conn struct {
readCh chan []byte readCh chan []byte
closeCh chan string closeCh chan string
rdata []byte rdata []byte
// closeTunnel is an optional callback to close the underlying grpc connection.
closeTunnel func()
} }
var _ net.Conn = &conn{} var _ net.Conn = &conn{}
@ -114,6 +119,10 @@ func (c *conn) SetWriteDeadline(t time.Time) error {
// proxy service to notify remote to drop the connection. // proxy service to notify remote to drop the connection.
func (c *conn) Close() error { func (c *conn) Close() error {
klog.V(4).Infoln("closing connection") klog.V(4).Infoln("closing connection")
if c.closeTunnel != nil {
defer c.closeTunnel()
}
var req *client.Packet var req *client.Packet
if c.connID != 0 { if c.connID != 0 {
req = &client.Packet{ req = &client.Packet{
@ -151,5 +160,5 @@ func (c *conn) Close() error {
case <-time.After(CloseTimeout): case <-time.After(CloseTimeout):
} }
return errors.New("close timeout") return errConnCloseTimeout
} }