fix: CVE-2024-3177
fix
This commit is contained in:
parent
f929432461
commit
b1fec8d160
62
go.mod
62
go.mod
@ -6,21 +6,21 @@ require (
|
||||
github.com/container-storage-interface/spec v1.8.0
|
||||
github.com/kubernetes-csi/csi-lib-utils v0.9.0
|
||||
github.com/onsi/ginkgo/v2 v2.17.1
|
||||
github.com/onsi/gomega v1.32.0
|
||||
github.com/onsi/gomega v1.33.0
|
||||
github.com/pborman/uuid v1.2.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/net v0.24.0
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.33.0
|
||||
k8s.io/api v0.28.8
|
||||
k8s.io/apimachinery v0.28.8
|
||||
k8s.io/client-go v0.28.8
|
||||
k8s.io/api v0.28.9
|
||||
k8s.io/apimachinery v0.28.9
|
||||
k8s.io/client-go v0.28.9
|
||||
k8s.io/klog/v2 v2.120.1
|
||||
k8s.io/kubernetes v1.28.7
|
||||
k8s.io/kubernetes v1.28.9
|
||||
k8s.io/mount-utils v0.29.4
|
||||
k8s.io/pod-security-admission v0.0.0
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.4
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.9
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
@ -37,7 +37,7 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
@ -95,9 +95,9 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.22.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 // indirect
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect
|
||||
golang.org/x/oauth2 v0.17.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.19.0 // indirect
|
||||
golang.org/x/term v0.19.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
@ -112,34 +112,34 @@ require (
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.0.0 // indirect
|
||||
k8s.io/apiserver v0.28.4 // indirect
|
||||
k8s.io/cloud-provider v0.28.4 // indirect
|
||||
k8s.io/component-base v0.28.4 // indirect
|
||||
k8s.io/component-helpers v0.28.4 // indirect
|
||||
k8s.io/controller-manager v0.28.4 // indirect
|
||||
k8s.io/kms v0.28.4 // indirect
|
||||
k8s.io/apiserver v0.28.9 // indirect
|
||||
k8s.io/cloud-provider v0.28.9 // indirect
|
||||
k8s.io/component-base v0.28.9 // indirect
|
||||
k8s.io/component-helpers v0.28.9 // indirect
|
||||
k8s.io/controller-manager v0.28.9 // indirect
|
||||
k8s.io/kms v0.28.9 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
k8s.io/kubectl v0.0.0 // indirect
|
||||
k8s.io/kubelet v0.28.4 // indirect
|
||||
k8s.io/kubelet v0.28.9 // indirect
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.4
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.4
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.4
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.28.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.28.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.28.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.4
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.4
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.9
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.9
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.9
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.9
|
||||
k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.9
|
||||
k8s.io/endpointslice => k8s.io/endpointslice v0.28.9
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.9
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.9
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.9
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.9
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.28.9
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.28.9
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.9
|
||||
k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.9
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.9
|
||||
)
|
||||
|
||||
80
go.sum
80
go.sum
@ -115,8 +115,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls=
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
@ -297,8 +297,8 @@ github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8
|
||||
github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
|
||||
github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
|
||||
github.com/onsi/gomega v1.33.0 h1:snPCflnZrpMsy94p4lXVEkHo12lmPnc3vY5XBbreexE=
|
||||
github.com/onsi/gomega v1.33.0/go.mod h1:+925n5YtiFsLzzafLUHzVMBpvvRAzrydIBiSIxjX3wY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
|
||||
@ -440,8 +440,8 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91 h1:tnebWN09GYg9OLPss1KXj8txwZc6X6uMr6VFdcGNbHw=
|
||||
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
|
||||
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -496,8 +496,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -657,57 +657,57 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=
|
||||
k8s.io/api v0.28.8 h1:G0/G7yX1puRAcon/+XPLsKXZ9A5L7Ds6oKbDIe027xw=
|
||||
k8s.io/api v0.28.8/go.mod h1:rU8f1t9CNUAXlk/1j/wMJ7XnaxkR1g1AlZGQAOOL+sw=
|
||||
k8s.io/apiextensions-apiserver v0.28.4 h1:AZpKY/7wQ8n+ZYDtNHbAJBb+N4AXXJvyZx6ww6yAJvU=
|
||||
k8s.io/apiextensions-apiserver v0.28.4/go.mod h1:pgQIZ1U8eJSMQcENew/0ShUTlePcSGFq6dxSxf2mwPM=
|
||||
k8s.io/api v0.28.9 h1:E7VEXXCAlSrp+08zq4zgd+ko6Ttu0Mw+XoXlIkDTVW0=
|
||||
k8s.io/api v0.28.9/go.mod h1:AnCsDYf3SHjfa8mPG5LGYf+iF4mie+3peLQR51MMCgw=
|
||||
k8s.io/apiextensions-apiserver v0.28.9 h1:yzPHp+4IASHeu7XIPkAKJrY4UjWdjiAjOcQMd6oNKj0=
|
||||
k8s.io/apiextensions-apiserver v0.28.9/go.mod h1:Rjhvq5y3JESdZgV2UOByldyefCfRrUguVpBLYOAIbVs=
|
||||
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/apimachinery v0.28.8 h1:hi/nrxHwk4QLV+W/SHve1bypTE59HCDorLY1stBIxKQ=
|
||||
k8s.io/apimachinery v0.28.8/go.mod h1:cBnwIM3fXoRo28SqbV/Ihxf/iviw85KyXOrzxvZQ83U=
|
||||
k8s.io/apiserver v0.28.4 h1:BJXlaQbAU/RXYX2lRz+E1oPe3G3TKlozMMCZWu5GMgg=
|
||||
k8s.io/apiserver v0.28.4/go.mod h1:Idq71oXugKZoVGUUL2wgBCTHbUR+FYTWa4rq9j4n23w=
|
||||
k8s.io/apimachinery v0.28.9 h1:aXz4Zxsw+Pk4KhBerAtKRxNN1uSMWKfciL/iOdBfXvA=
|
||||
k8s.io/apimachinery v0.28.9/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
|
||||
k8s.io/apiserver v0.28.9 h1:koPXvgSXRBDxKJQjJGdZNgPsT9lQv6scJJFipd1m86E=
|
||||
k8s.io/apiserver v0.28.9/go.mod h1:D51I37WBZojJhmLcjNVE4GSVrjiUHP+yq+N5KvKn2wY=
|
||||
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
|
||||
k8s.io/client-go v0.28.8 h1:TE59Tjd87WKvS2FPBTfIKLFX0nQJ4SSHsnDo5IHjgOw=
|
||||
k8s.io/client-go v0.28.8/go.mod h1:uDVQ/rPzWpWIy40c6lZ4mUwaEvRWGnpoqSO4FM65P3o=
|
||||
k8s.io/cloud-provider v0.28.4 h1:7obmeuJJ5CYTO9HANDqemf/d2v95U+F0t8aeH4jNOsQ=
|
||||
k8s.io/cloud-provider v0.28.4/go.mod h1:xbhmGZ7wRHgXFP3SNsvdmFRO87KJIvirDYQA5ydMgGA=
|
||||
k8s.io/client-go v0.28.9 h1:mmMvejwc/KDjMLmDpyaxkWNzlWRCJ6ht7Qsbsnwn39Y=
|
||||
k8s.io/client-go v0.28.9/go.mod h1:GFDy3rUNId++WGrr0hRaBrs+y1eZz5JtVZODEalhRMo=
|
||||
k8s.io/cloud-provider v0.28.9 h1:FBW4Ii1NdXCHKprzkM8/s5BpxvLgJmYrZTNJABsVX7Y=
|
||||
k8s.io/cloud-provider v0.28.9/go.mod h1:7tFyiftAlSARvJS6mzZQQKKDQA81asNQ2usg35R3Exo=
|
||||
k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y=
|
||||
k8s.io/component-base v0.28.4 h1:c/iQLWPdUgI90O+T9TeECg8o7N3YJTiuz2sKxILYcYo=
|
||||
k8s.io/component-base v0.28.4/go.mod h1:m9hR0uvqXDybiGL2nf/3Lf0MerAfQXzkfWhUY58JUbU=
|
||||
k8s.io/component-helpers v0.28.4 h1:+X9VXT5+jUsRdC26JyMZ8Fjfln7mSjgumafocE509C4=
|
||||
k8s.io/component-helpers v0.28.4/go.mod h1:8LzMalOQ0K10tkBJWBWq8h0HTI9HDPx4WT3QvTFn9Ro=
|
||||
k8s.io/controller-manager v0.28.4 h1:8uJmo1pD6fWYk4mC/JfZQU6zPvuCgEHf3pd5G39ldDU=
|
||||
k8s.io/controller-manager v0.28.4/go.mod h1:pnO+UK2mcWNu1MxucqI8xHPD/8UBm04IUmp2u/3vbnM=
|
||||
k8s.io/csi-translation-lib v0.28.4 h1:4TrU2zefZGU5HQCyPZvcPxkS6IowqZ/jBs2Qi/dPUpc=
|
||||
k8s.io/csi-translation-lib v0.28.4/go.mod h1:oxwDdx0hyVqViINOUF7TGrVt51eqsOkQ0BTI+A9QcQs=
|
||||
k8s.io/component-base v0.28.9 h1:ySM2PR8Z/xaUSG1Akd3yM6dqUezTltI7S5aV41MMuuc=
|
||||
k8s.io/component-base v0.28.9/go.mod h1:QtWzscEhCKRfHV24/S+11BwWjVxhC6fd3RYoEgZcWFU=
|
||||
k8s.io/component-helpers v0.28.9 h1:knX9F2nRoxF4wplgXO4C5tE4/k7HGszK3177Tm4+CUc=
|
||||
k8s.io/component-helpers v0.28.9/go.mod h1:TdAkLbywEDE2CB5h8LbM/W03T3k8wvqAaoPcEZrr6Z4=
|
||||
k8s.io/controller-manager v0.28.9 h1:muAtmO2mDN7pDkAJQMknvWy+WQhkvvi/jK1V82+qbLw=
|
||||
k8s.io/controller-manager v0.28.9/go.mod h1:RYP65K6GWLRWYZR7PRRaStfvgeXkhCGZwJsxRPuaDV0=
|
||||
k8s.io/csi-translation-lib v0.28.9 h1:zl93l7wk0iwKInyRJfaodvsWf1z8QtWCN9a5OqHeT3o=
|
||||
k8s.io/csi-translation-lib v0.28.9/go.mod h1:eOniPQitdkuyVh+gtktg3yeDJQu/IidIUSMadDPLhak=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kms v0.28.4 h1:PMgY/3CQTWP9eIKmNQiTgjLIZ0ns6O+voagzD2/4mSg=
|
||||
k8s.io/kms v0.28.4/go.mod h1:HL4/lR/bhjAJPbqycKtfhWiKh1Sp21cpHOL8P4oo87w=
|
||||
k8s.io/kms v0.28.9 h1:ApCWJulBl+uFRTr2jtTpG1lffmqqMuLnOH/RUbtO4UY=
|
||||
k8s.io/kms v0.28.9/go.mod h1:VgyAIRMFqZX9lHyixecU/JTI0wnPD1wCIlquvlXRJ+Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
|
||||
k8s.io/kubectl v0.28.4 h1:gWpUXW/T7aFne+rchYeHkyB8eVDl5UZce8G4X//kjUQ=
|
||||
k8s.io/kubectl v0.28.4/go.mod h1:CKOccVx3l+3MmDbkXtIUtibq93nN2hkDR99XDCn7c/c=
|
||||
k8s.io/kubelet v0.28.4 h1:Ypxy1jaFlSXFXbg/yVtFOU2ZxErBVRJfLu8+t4s7Dtw=
|
||||
k8s.io/kubelet v0.28.4/go.mod h1:w1wPI12liY/aeC70nqKYcNNkr6/nbyvdMB7P7wmww2o=
|
||||
k8s.io/kubernetes v1.28.7 h1:XV7AiCqtuOLaERR7uWcJnVpGG090lzJ9A37ftQuJhN4=
|
||||
k8s.io/kubernetes v1.28.7/go.mod h1:0qpyGJTR3blkbQOmZA3Z0u1VDZJNxJM8ifLUVNJN0X8=
|
||||
k8s.io/kubectl v0.28.9 h1:FTf/aapuuFxPmt8gYUeqUmcsgG0gKC2ei6n+TO5sGOw=
|
||||
k8s.io/kubectl v0.28.9/go.mod h1:ip/zTUr1MM/H2M+YbPHnSKLt0x6kb85SJtRSjwEGDfs=
|
||||
k8s.io/kubelet v0.28.9 h1:76v00fFLeniz27kXhGGUIxONdwa9LKcD2Jd5cXYAZko=
|
||||
k8s.io/kubelet v0.28.9/go.mod h1:46P39DFjI+E59nU2OgpatyS3oWy58ClulKO6riZ/97o=
|
||||
k8s.io/kubernetes v1.28.9 h1:I4sYGQJOuxEo4/QWoY7M8kDB7O0HcH266t6o6mR6ogg=
|
||||
k8s.io/kubernetes v1.28.9/go.mod h1:chlmcCDBnOA/y+572cw8dO0Rci1wiA8bm5+zhPdFLCk=
|
||||
k8s.io/mount-utils v0.29.4 h1:tW/URea4gtXlaVW7VObr52NQhS+z3SXTg1GUaFZjRL4=
|
||||
k8s.io/mount-utils v0.29.4/go.mod h1:SHUMR9n3b6tLgEmlyT36cL6fV6Sjwa5CJhc0guCXvb0=
|
||||
k8s.io/pod-security-admission v0.28.4 h1:b9d6zfKNjkawrO2gF7rBr5XoSZqPfE6UjKLNjgXYrr0=
|
||||
k8s.io/pod-security-admission v0.28.4/go.mod h1:MVYrZx0Q6ewsZ05Ml2+Ox03HQMAVjO60oombQNmJ44E=
|
||||
k8s.io/pod-security-admission v0.28.9 h1:towoNqSp7aU7gF8T89zftCuQUfliyib3ds20Kz/hysg=
|
||||
k8s.io/pod-security-admission v0.28.9/go.mod h1:mfEhECQ+AvP+zehqxemSq1pDL4YLoWCP7liL0YmkpZY=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2/go.mod h1:+qG7ISXqCDVVcyO8hLn12AKVYYUjM7ftlqsqmrhMZE0=
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.4 h1:TG/N0fjnZT+T53ymdoHvl3ft6QXvHcx8U7b6lcC1tC0=
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.4/go.mod h1:xtm6ROi1sIRLF8otWohSfrwAkVHCOk+dJ9xvB4QAXUU=
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.9 h1:OAF8cQubrNUEiMNbnDFowRl6jciWTt3DqI9FhWGcnpE=
|
||||
sigs.k8s.io/cloud-provider-azure v1.28.9/go.mod h1:63ByXruYF4XWLdOIRxtSz6RYel5PpdKRsCPKIj4Io58=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
|
||||
11
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
11
vendor/github.com/onsi/gomega/CHANGELOG.md
generated
vendored
@ -1,3 +1,14 @@
|
||||
## 1.33.0
|
||||
|
||||
### Features
|
||||
|
||||
`Receive` not accepts `Receive(<POINTER>, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer.
|
||||
|
||||
### Maintenance
|
||||
- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb]
|
||||
- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21]
|
||||
- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596]
|
||||
|
||||
## 1.32.0
|
||||
|
||||
### Maintenance
|
||||
|
||||
2
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
2
vendor/github.com/onsi/gomega/gomega_dsl.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const GOMEGA_VERSION = "1.32.0"
|
||||
const GOMEGA_VERSION = "1.33.0"
|
||||
|
||||
const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler.
|
||||
If you're using Ginkgo then you probably forgot to put your assertion in an It().
|
||||
|
||||
15
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
15
vendor/github.com/onsi/gomega/matchers.go
generated
vendored
@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher {
|
||||
//
|
||||
// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received.
|
||||
//
|
||||
// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
|
||||
// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type:
|
||||
//
|
||||
// var myThing thing
|
||||
// Eventually(thingChan).Should(Receive(&myThing))
|
||||
// Expect(myThing.Sprocket).Should(Equal("foo"))
|
||||
// Expect(myThing.IsValid()).Should(BeTrue())
|
||||
//
|
||||
// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received,
|
||||
// you can pass a pointer to a variable of the approriate type first, and second a matcher:
|
||||
//
|
||||
// var myThing thing
|
||||
// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar")))
|
||||
func Receive(args ...interface{}) types.GomegaMatcher {
|
||||
var arg interface{}
|
||||
if len(args) > 0 {
|
||||
arg = args[0]
|
||||
}
|
||||
|
||||
return &matchers.ReceiveMatcher{
|
||||
Arg: arg,
|
||||
Args: args,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
70
vendor/github.com/onsi/gomega/matchers/receive_matcher.go
generated
vendored
70
vendor/github.com/onsi/gomega/matchers/receive_matcher.go
generated
vendored
@ -3,6 +3,7 @@
|
||||
package matchers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
@ -10,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
type ReceiveMatcher struct {
|
||||
Arg interface{}
|
||||
Args []interface{}
|
||||
receivedValue reflect.Value
|
||||
channelClosed bool
|
||||
}
|
||||
@ -29,16 +30,39 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
|
||||
|
||||
var subMatcher omegaMatcher
|
||||
var hasSubMatcher bool
|
||||
var resultReference interface{}
|
||||
|
||||
if matcher.Arg != nil {
|
||||
subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher)
|
||||
if !hasSubMatcher {
|
||||
argType := reflect.TypeOf(matcher.Arg)
|
||||
if argType.Kind() != reflect.Ptr {
|
||||
return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1))
|
||||
}
|
||||
// Valid arg formats are as follows, always with optional POINTER before
|
||||
// optional MATCHER:
|
||||
// - Receive()
|
||||
// - Receive(POINTER)
|
||||
// - Receive(MATCHER)
|
||||
// - Receive(POINTER, MATCHER)
|
||||
args := matcher.Args
|
||||
if len(args) > 0 {
|
||||
arg := args[0]
|
||||
_, isSubMatcher := arg.(omegaMatcher)
|
||||
if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr {
|
||||
// Consume optional POINTER arg first, if it ain't no matcher ;)
|
||||
resultReference = arg
|
||||
args = args[1:]
|
||||
}
|
||||
}
|
||||
if len(args) > 0 {
|
||||
arg := args[0]
|
||||
subMatcher, hasSubMatcher = arg.(omegaMatcher)
|
||||
if !hasSubMatcher {
|
||||
// At this point we assume the dev user wanted to assign a received
|
||||
// value, so [POINTER,]MATCHER.
|
||||
return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1))
|
||||
}
|
||||
// Consume optional MATCHER arg.
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
// If there are still args present, reject all.
|
||||
return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher")
|
||||
}
|
||||
|
||||
winnerIndex, value, open := reflect.Select([]reflect.SelectCase{
|
||||
{Dir: reflect.SelectRecv, Chan: channelValue},
|
||||
@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
|
||||
}
|
||||
|
||||
if hasSubMatcher {
|
||||
if didReceive {
|
||||
matcher.receivedValue = value
|
||||
return subMatcher.Match(matcher.receivedValue.Interface())
|
||||
if !didReceive {
|
||||
return false, nil
|
||||
}
|
||||
return false, nil
|
||||
matcher.receivedValue = value
|
||||
if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match {
|
||||
return match, err
|
||||
}
|
||||
// if we received a match, then fall through in order to handle an
|
||||
// optional assignment of the received value to the specified reference.
|
||||
}
|
||||
|
||||
if didReceive {
|
||||
if matcher.Arg != nil {
|
||||
outValue := reflect.ValueOf(matcher.Arg)
|
||||
if resultReference != nil {
|
||||
outValue := reflect.ValueOf(resultReference)
|
||||
|
||||
if value.Type().AssignableTo(outValue.Elem().Type()) {
|
||||
outValue.Elem().Set(value)
|
||||
@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
|
||||
outValue.Elem().Set(value.Elem())
|
||||
return true, nil
|
||||
} else {
|
||||
return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1))
|
||||
return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1))
|
||||
}
|
||||
|
||||
}
|
||||
@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
|
||||
}
|
||||
|
||||
func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
|
||||
subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
|
||||
var matcherArg interface{}
|
||||
if len(matcher.Args) > 0 {
|
||||
matcherArg = matcher.Args[len(matcher.Args)-1]
|
||||
}
|
||||
subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
|
||||
|
||||
closedAddendum := ""
|
||||
if matcher.channelClosed {
|
||||
@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
|
||||
}
|
||||
|
||||
func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
|
||||
subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher)
|
||||
var matcherArg interface{}
|
||||
if len(matcher.Args) > 0 {
|
||||
matcherArg = matcher.Args[len(matcher.Args)-1]
|
||||
}
|
||||
subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher)
|
||||
|
||||
closedAddendum := ""
|
||||
if matcher.channelClosed {
|
||||
|
||||
44
vendor/golang.org/x/exp/slices/cmp.go
generated
vendored
Normal file
44
vendor/golang.org/x/exp/slices/cmp.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
|
||||
// min is a version of the predeclared function from the Go 1.21 release.
|
||||
func min[T constraints.Ordered](a, b T) T {
|
||||
if a < b || isNaN(a) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// max is a version of the predeclared function from the Go 1.21 release.
|
||||
func max[T constraints.Ordered](a, b T) T {
|
||||
if a > b || isNaN(a) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// cmpLess is a copy of cmp.Less from the Go 1.21 release.
|
||||
func cmpLess[T constraints.Ordered](x, y T) bool {
|
||||
return (isNaN(x) && !isNaN(y)) || x < y
|
||||
}
|
||||
|
||||
// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
|
||||
func cmpCompare[T constraints.Ordered](x, y T) int {
|
||||
xNaN := isNaN(x)
|
||||
yNaN := isNaN(y)
|
||||
if xNaN && yNaN {
|
||||
return 0
|
||||
}
|
||||
if xNaN || x < y {
|
||||
return -1
|
||||
}
|
||||
if yNaN || x > y {
|
||||
return +1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
408
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
408
vendor/golang.org/x/exp/slices/slices.go
generated
vendored
@ -3,23 +3,20 @@
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package slices defines various functions useful with slices of any type.
|
||||
// Unless otherwise specified, these functions all apply to the elements
|
||||
// of a slice at index 0 <= i < len(s).
|
||||
//
|
||||
// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a
|
||||
// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings),
|
||||
// or the sorting may fail to sort correctly. A common case is when sorting slices of
|
||||
// floating-point numbers containing NaN values.
|
||||
package slices
|
||||
|
||||
import "golang.org/x/exp/constraints"
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
// Equal reports whether two slices are equal: the same length and all
|
||||
// elements equal. If the lengths are different, Equal returns false.
|
||||
// Otherwise, the elements are compared in increasing index order, and the
|
||||
// comparison stops at the first unequal pair.
|
||||
// Floating point NaNs are not considered equal.
|
||||
func Equal[E comparable](s1, s2 []E) bool {
|
||||
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// EqualFunc reports whether two slices are equal using a comparison
|
||||
// EqualFunc reports whether two slices are equal using an equality
|
||||
// function on each pair of elements. If the lengths are different,
|
||||
// EqualFunc returns false. Otherwise, the elements are compared in
|
||||
// increasing index order, and the comparison stops at the first index
|
||||
// for which eq returns false.
|
||||
func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
|
||||
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Compare compares the elements of s1 and s2.
|
||||
// The elements are compared sequentially, starting at index 0,
|
||||
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
|
||||
// of elements. The elements are compared sequentially, starting at index 0,
|
||||
// until one element is not equal to the other.
|
||||
// The result of comparing the first non-matching elements is returned.
|
||||
// If both slices are equal until one of them ends, the shorter slice is
|
||||
// considered less than the longer one.
|
||||
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
|
||||
// Comparisons involving floating point NaNs are ignored.
|
||||
func Compare[E constraints.Ordered](s1, s2 []E) int {
|
||||
s2len := len(s2)
|
||||
func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
|
||||
for i, v1 := range s1 {
|
||||
if i >= s2len {
|
||||
if i >= len(s2) {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
switch {
|
||||
case v1 < v2:
|
||||
return -1
|
||||
case v1 > v2:
|
||||
return +1
|
||||
if c := cmpCompare(v1, v2); c != 0 {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < s2len {
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// CompareFunc is like Compare but uses a comparison function
|
||||
// on each pair of elements. The elements are compared in increasing
|
||||
// index order, and the comparisons stop after the first time cmp
|
||||
// returns non-zero.
|
||||
// CompareFunc is like [Compare] but uses a custom comparison function on each
|
||||
// pair of elements.
|
||||
// The result is the first non-zero result of cmp; if cmp always
|
||||
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
|
||||
// and +1 if len(s1) > len(s2).
|
||||
func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
|
||||
s2len := len(s2)
|
||||
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
|
||||
for i, v1 := range s1 {
|
||||
if i >= s2len {
|
||||
if i >= len(s2) {
|
||||
return +1
|
||||
}
|
||||
v2 := s2[i]
|
||||
@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
|
||||
return c
|
||||
}
|
||||
}
|
||||
if len(s1) < s2len {
|
||||
if len(s1) < len(s2) {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
@ -103,9 +92,9 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int {
|
||||
|
||||
// Index returns the index of the first occurrence of v in s,
|
||||
// or -1 if not present.
|
||||
func Index[E comparable](s []E, v E) int {
|
||||
for i, vs := range s {
|
||||
if v == vs {
|
||||
func Index[S ~[]E, E comparable](s S, v E) int {
|
||||
for i := range s {
|
||||
if v == s[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
@ -114,9 +103,9 @@ func Index[E comparable](s []E, v E) int {
|
||||
|
||||
// IndexFunc returns the first index i satisfying f(s[i]),
|
||||
// or -1 if none do.
|
||||
func IndexFunc[E any](s []E, f func(E) bool) int {
|
||||
for i, v := range s {
|
||||
if f(v) {
|
||||
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
|
||||
for i := range s {
|
||||
if f(s[i]) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
@ -124,33 +113,104 @@ func IndexFunc[E any](s []E, f func(E) bool) int {
|
||||
}
|
||||
|
||||
// Contains reports whether v is present in s.
|
||||
func Contains[E comparable](s []E, v E) bool {
|
||||
func Contains[S ~[]E, E comparable](s S, v E) bool {
|
||||
return Index(s, v) >= 0
|
||||
}
|
||||
|
||||
// ContainsFunc reports whether at least one
|
||||
// element e of s satisfies f(e).
|
||||
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
|
||||
return IndexFunc(s, f) >= 0
|
||||
}
|
||||
|
||||
// Insert inserts the values v... into s at index i,
|
||||
// returning the modified slice.
|
||||
// In the returned slice r, r[i] == v[0].
|
||||
// The elements at s[i:] are shifted up to make room.
|
||||
// In the returned slice r, r[i] == v[0],
|
||||
// and r[i+len(v)] == value originally at r[i].
|
||||
// Insert panics if i is out of range.
|
||||
// This function is O(len(s) + len(v)).
|
||||
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
|
||||
tot := len(s) + len(v)
|
||||
if tot <= cap(s) {
|
||||
s2 := s[:tot]
|
||||
copy(s2[i+len(v):], s[i:])
|
||||
m := len(v)
|
||||
if m == 0 {
|
||||
return s
|
||||
}
|
||||
n := len(s)
|
||||
if i == n {
|
||||
return append(s, v...)
|
||||
}
|
||||
if n+m > cap(s) {
|
||||
// Use append rather than make so that we bump the size of
|
||||
// the slice up to the next storage class.
|
||||
// This is what Grow does but we don't call Grow because
|
||||
// that might copy the values twice.
|
||||
s2 := append(s[:i], make(S, n+m-i)...)
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+m:], s[i:])
|
||||
return s2
|
||||
}
|
||||
s2 := make(S, tot)
|
||||
copy(s2, s[:i])
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+len(v):], s[i:])
|
||||
return s2
|
||||
s = s[:n+m]
|
||||
|
||||
// before:
|
||||
// s: aaaaaaaabbbbccccccccdddd
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// after:
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
//
|
||||
// a are the values that don't move in s.
|
||||
// v are the values copied in from v.
|
||||
// b and c are the values from s that are shifted up in index.
|
||||
// d are the values that get overwritten, never to be seen again.
|
||||
|
||||
if !overlaps(v, s[i+m:]) {
|
||||
// Easy case - v does not overlap either the c or d regions.
|
||||
// (It might be in some of a or b, or elsewhere entirely.)
|
||||
// The data we copy up doesn't write to v at all, so just do it.
|
||||
|
||||
copy(s[i+m:], s[i:])
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaabbbbbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// Note the b values are duplicated.
|
||||
|
||||
copy(s[i:], v)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// That's the result we want.
|
||||
return s
|
||||
}
|
||||
|
||||
// The hard case - v overlaps c or d. We can't just shift up
|
||||
// the data because we'd move or clobber the values we're trying
|
||||
// to insert.
|
||||
// So instead, write v on top of d, then rotate.
|
||||
copy(s[n:], v)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaabbbbccccccccvvvv
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
|
||||
rotateRight(s[i:], m)
|
||||
|
||||
// Now we have
|
||||
// s: aaaaaaaavvvvbbbbcccccccc
|
||||
// ^ ^ ^ ^
|
||||
// i i+m n n+m
|
||||
// That's the result we want.
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes the elements s[i:j] from s, returning the modified slice.
|
||||
// Delete panics if s[i:j] is not a valid slice of s.
|
||||
// Delete modifies the contents of the slice s; it does not create a new slice.
|
||||
// Delete is O(len(s)-j), so if many items must be deleted, it is better to
|
||||
// make a single call deleting them all together than to delete one at a time.
|
||||
// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those
|
||||
@ -162,6 +222,115 @@ func Delete[S ~[]E, E any](s S, i, j int) S {
|
||||
return append(s[:i], s[j:]...)
|
||||
}
|
||||
|
||||
// DeleteFunc removes any elements from s for which del returns true,
|
||||
// returning the modified slice.
|
||||
// When DeleteFunc removes m elements, it might not modify the elements
|
||||
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
|
||||
// zeroing those elements so that objects they reference can be garbage
|
||||
// collected.
|
||||
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
|
||||
i := IndexFunc(s, del)
|
||||
if i == -1 {
|
||||
return s
|
||||
}
|
||||
// Don't start copying elements until we find one to delete.
|
||||
for j := i + 1; j < len(s); j++ {
|
||||
if v := s[j]; !del(v) {
|
||||
s[i] = v
|
||||
i++
|
||||
}
|
||||
}
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// Replace replaces the elements s[i:j] by the given v, and returns the
|
||||
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
|
||||
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
|
||||
_ = s[i:j] // verify that i:j is a valid subslice
|
||||
|
||||
if i == j {
|
||||
return Insert(s, i, v...)
|
||||
}
|
||||
if j == len(s) {
|
||||
return append(s[:i], v...)
|
||||
}
|
||||
|
||||
tot := len(s[:i]) + len(v) + len(s[j:])
|
||||
if tot > cap(s) {
|
||||
// Too big to fit, allocate and copy over.
|
||||
s2 := append(s[:i], make(S, tot-i)...) // See Insert
|
||||
copy(s2[i:], v)
|
||||
copy(s2[i+len(v):], s[j:])
|
||||
return s2
|
||||
}
|
||||
|
||||
r := s[:tot]
|
||||
|
||||
if i+len(v) <= j {
|
||||
// Easy, as v fits in the deleted portion.
|
||||
copy(r[i:], v)
|
||||
if i+len(v) != j {
|
||||
copy(r[i+len(v):], s[j:])
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// We are expanding (v is bigger than j-i).
|
||||
// The situation is something like this:
|
||||
// (example has i=4,j=8,len(s)=16,len(v)=6)
|
||||
// s: aaaaxxxxbbbbbbbbyy
|
||||
// ^ ^ ^ ^
|
||||
// i j len(s) tot
|
||||
// a: prefix of s
|
||||
// x: deleted range
|
||||
// b: more of s
|
||||
// y: area to expand into
|
||||
|
||||
if !overlaps(r[i+len(v):], v) {
|
||||
// Easy, as v is not clobbered by the first copy.
|
||||
copy(r[i+len(v):], s[j:])
|
||||
copy(r[i:], v)
|
||||
return r
|
||||
}
|
||||
|
||||
// This is a situation where we don't have a single place to which
|
||||
// we can copy v. Parts of it need to go to two different places.
|
||||
// We want to copy the prefix of v into y and the suffix into x, then
|
||||
// rotate |y| spots to the right.
|
||||
//
|
||||
// v[2:] v[:2]
|
||||
// | |
|
||||
// s: aaaavvvvbbbbbbbbvv
|
||||
// ^ ^ ^ ^
|
||||
// i j len(s) tot
|
||||
//
|
||||
// If either of those two destinations don't alias v, then we're good.
|
||||
y := len(v) - (j - i) // length of y portion
|
||||
|
||||
if !overlaps(r[i:j], v) {
|
||||
copy(r[i:j], v[y:])
|
||||
copy(r[len(s):], v[:y])
|
||||
rotateRight(r[i:], y)
|
||||
return r
|
||||
}
|
||||
if !overlaps(r[len(s):], v) {
|
||||
copy(r[len(s):], v[:y])
|
||||
copy(r[i:j], v[y:])
|
||||
rotateRight(r[i:], y)
|
||||
return r
|
||||
}
|
||||
|
||||
// Now we know that v overlaps both x and y.
|
||||
// That means that the entirety of b is *inside* v.
|
||||
// So we don't need to preserve b at all; instead we
|
||||
// can copy v first, then copy the b part of v out of
|
||||
// v to the right destination.
|
||||
k := startIdx(v, s[j:])
|
||||
copy(r[i:], v)
|
||||
copy(r[i+len(v):], r[i+k:])
|
||||
return r
|
||||
}
|
||||
|
||||
// Clone returns a copy of the slice.
|
||||
// The elements are copied using assignment, so this is a shallow clone.
|
||||
func Clone[S ~[]E, E any](s S) S {
|
||||
@ -174,35 +343,40 @@ func Clone[S ~[]E, E any](s S) S {
|
||||
|
||||
// Compact replaces consecutive runs of equal elements with a single copy.
|
||||
// This is like the uniq command found on Unix.
|
||||
// Compact modifies the contents of the slice s; it does not create a new slice.
|
||||
// Compact modifies the contents of the slice s and returns the modified slice,
|
||||
// which may have a smaller length.
|
||||
// When Compact discards m elements in total, it might not modify the elements
|
||||
// s[len(s)-m:len(s)]. If those elements contain pointers you might consider
|
||||
// zeroing those elements so that objects they reference can be garbage collected.
|
||||
func Compact[S ~[]E, E comparable](s S) S {
|
||||
if len(s) == 0 {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
last := s[0]
|
||||
for _, v := range s[1:] {
|
||||
if v != last {
|
||||
s[i] = v
|
||||
for k := 1; k < len(s); k++ {
|
||||
if s[k] != s[k-1] {
|
||||
if i != k {
|
||||
s[i] = s[k]
|
||||
}
|
||||
i++
|
||||
last = v
|
||||
}
|
||||
}
|
||||
return s[:i]
|
||||
}
|
||||
|
||||
// CompactFunc is like Compact but uses a comparison function.
|
||||
// CompactFunc is like [Compact] but uses an equality function to compare elements.
|
||||
// For runs of elements that compare equal, CompactFunc keeps the first one.
|
||||
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||
if len(s) == 0 {
|
||||
if len(s) < 2 {
|
||||
return s
|
||||
}
|
||||
i := 1
|
||||
last := s[0]
|
||||
for _, v := range s[1:] {
|
||||
if !eq(v, last) {
|
||||
s[i] = v
|
||||
for k := 1; k < len(s); k++ {
|
||||
if !eq(s[k], s[k-1]) {
|
||||
if i != k {
|
||||
s[i] = s[k]
|
||||
}
|
||||
i++
|
||||
last = v
|
||||
}
|
||||
}
|
||||
return s[:i]
|
||||
@ -210,14 +384,116 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
|
||||
|
||||
// Grow increases the slice's capacity, if necessary, to guarantee space for
|
||||
// another n elements. After Grow(n), at least n elements can be appended
|
||||
// to the slice without another allocation. Grow may modify elements of the
|
||||
// slice between the length and the capacity. If n is negative or too large to
|
||||
// to the slice without another allocation. If n is negative or too large to
|
||||
// allocate the memory, Grow panics.
|
||||
func Grow[S ~[]E, E any](s S, n int) S {
|
||||
return append(s, make(S, n)...)[:len(s)]
|
||||
if n < 0 {
|
||||
panic("cannot be negative")
|
||||
}
|
||||
if n -= cap(s) - len(s); n > 0 {
|
||||
// TODO(https://go.dev/issue/53888): Make using []E instead of S
|
||||
// to workaround a compiler bug where the runtime.growslice optimization
|
||||
// does not take effect. Revert when the compiler is fixed.
|
||||
s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
|
||||
func Clip[S ~[]E, E any](s S) S {
|
||||
return s[:len(s):len(s)]
|
||||
}
|
||||
|
||||
// Rotation algorithm explanation:
|
||||
//
|
||||
// rotate left by 2
|
||||
// start with
|
||||
// 0123456789
|
||||
// split up like this
|
||||
// 01 234567 89
|
||||
// swap first 2 and last 2
|
||||
// 89 234567 01
|
||||
// join first parts
|
||||
// 89234567 01
|
||||
// recursively rotate first left part by 2
|
||||
// 23456789 01
|
||||
// join at the end
|
||||
// 2345678901
|
||||
//
|
||||
// rotate left by 8
|
||||
// start with
|
||||
// 0123456789
|
||||
// split up like this
|
||||
// 01 234567 89
|
||||
// swap first 2 and last 2
|
||||
// 89 234567 01
|
||||
// join last parts
|
||||
// 89 23456701
|
||||
// recursively rotate second part left by 6
|
||||
// 89 01234567
|
||||
// join at the end
|
||||
// 8901234567
|
||||
|
||||
// TODO: There are other rotate algorithms.
|
||||
// This algorithm has the desirable property that it moves each element exactly twice.
|
||||
// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
|
||||
// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
|
||||
|
||||
// rotateLeft rotates b left by n spaces.
|
||||
// s_final[i] = s_orig[i+r], wrapping around.
|
||||
func rotateLeft[E any](s []E, r int) {
|
||||
for r != 0 && r != len(s) {
|
||||
if r*2 <= len(s) {
|
||||
swap(s[:r], s[len(s)-r:])
|
||||
s = s[:len(s)-r]
|
||||
} else {
|
||||
swap(s[:len(s)-r], s[r:])
|
||||
s, r = s[len(s)-r:], r*2-len(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
func rotateRight[E any](s []E, r int) {
|
||||
rotateLeft(s, len(s)-r)
|
||||
}
|
||||
|
||||
// swap swaps the contents of x and y. x and y must be equal length and disjoint.
|
||||
func swap[E any](x, y []E) {
|
||||
for i := 0; i < len(x); i++ {
|
||||
x[i], y[i] = y[i], x[i]
|
||||
}
|
||||
}
|
||||
|
||||
// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
|
||||
func overlaps[E any](a, b []E) bool {
|
||||
if len(a) == 0 || len(b) == 0 {
|
||||
return false
|
||||
}
|
||||
elemSize := unsafe.Sizeof(a[0])
|
||||
if elemSize == 0 {
|
||||
return false
|
||||
}
|
||||
// TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
|
||||
// Also see crypto/internal/alias/alias.go:AnyOverlap
|
||||
return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
|
||||
uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
|
||||
}
|
||||
|
||||
// startIdx returns the index in haystack where the needle starts.
|
||||
// prerequisite: the needle must be aliased entirely inside the haystack.
|
||||
func startIdx[E any](haystack, needle []E) int {
|
||||
p := &needle[0]
|
||||
for i := range haystack {
|
||||
if p == &haystack[i] {
|
||||
return i
|
||||
}
|
||||
}
|
||||
// TODO: what if the overlap is by a non-integral number of Es?
|
||||
panic("needle not found")
|
||||
}
|
||||
|
||||
// Reverse reverses the elements of the slice in place.
|
||||
func Reverse[S ~[]E, E any](s S) {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
}
|
||||
|
||||
176
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
176
vendor/golang.org/x/exp/slices/sort.go
generated
vendored
@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
|
||||
|
||||
package slices
|
||||
|
||||
import (
|
||||
@ -11,97 +13,157 @@ import (
|
||||
)
|
||||
|
||||
// Sort sorts a slice of any ordered type in ascending order.
|
||||
// Sort may fail to sort correctly when sorting slices of floating-point
|
||||
// numbers containing Not-a-number (NaN) values.
|
||||
// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))})
|
||||
// instead if the input may contain NaNs.
|
||||
func Sort[E constraints.Ordered](x []E) {
|
||||
// When sorting floating-point numbers, NaNs are ordered before other values.
|
||||
func Sort[S ~[]E, E constraints.Ordered](x S) {
|
||||
n := len(x)
|
||||
pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
|
||||
}
|
||||
|
||||
// SortFunc sorts the slice x in ascending order as determined by the less function.
|
||||
// This sort is not guaranteed to be stable.
|
||||
// SortFunc sorts the slice x in ascending order as determined by the cmp
|
||||
// function. This sort is not guaranteed to be stable.
|
||||
// cmp(a, b) should return a negative number when a < b, a positive number when
|
||||
// a > b and zero when a == b.
|
||||
//
|
||||
// SortFunc requires that less is a strict weak ordering.
|
||||
// SortFunc requires that cmp is a strict weak ordering.
|
||||
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
|
||||
func SortFunc[E any](x []E, less func(a, b E) bool) {
|
||||
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
|
||||
n := len(x)
|
||||
pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less)
|
||||
pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
|
||||
}
|
||||
|
||||
// SortStable sorts the slice x while keeping the original order of equal
|
||||
// elements, using less to compare elements.
|
||||
func SortStableFunc[E any](x []E, less func(a, b E) bool) {
|
||||
stableLessFunc(x, len(x), less)
|
||||
// SortStableFunc sorts the slice x while keeping the original order of equal
|
||||
// elements, using cmp to compare elements in the same way as [SortFunc].
|
||||
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
|
||||
stableCmpFunc(x, len(x), cmp)
|
||||
}
|
||||
|
||||
// IsSorted reports whether x is sorted in ascending order.
|
||||
func IsSorted[E constraints.Ordered](x []E) bool {
|
||||
func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if x[i] < x[i-1] {
|
||||
if cmpLess(x[i], x[i-1]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsSortedFunc reports whether x is sorted in ascending order, with less as the
|
||||
// comparison function.
|
||||
func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
|
||||
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
|
||||
// comparison function as defined by [SortFunc].
|
||||
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
|
||||
for i := len(x) - 1; i > 0; i-- {
|
||||
if less(x[i], x[i-1]) {
|
||||
if cmp(x[i], x[i-1]) < 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Min returns the minimal value in x. It panics if x is empty.
|
||||
// For floating-point numbers, Min propagates NaNs (any NaN value in x
|
||||
// forces the output to be NaN).
|
||||
func Min[S ~[]E, E constraints.Ordered](x S) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.Min: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
m = min(m, x[i])
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// MinFunc returns the minimal value in x, using cmp to compare elements.
|
||||
// It panics if x is empty. If there is more than one minimal element
|
||||
// according to the cmp function, MinFunc returns the first one.
|
||||
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.MinFunc: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
if cmp(x[i], m) < 0 {
|
||||
m = x[i]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Max returns the maximal value in x. It panics if x is empty.
|
||||
// For floating-point E, Max propagates NaNs (any NaN value in x
|
||||
// forces the output to be NaN).
|
||||
func Max[S ~[]E, E constraints.Ordered](x S) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.Max: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
m = max(m, x[i])
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// MaxFunc returns the maximal value in x, using cmp to compare elements.
|
||||
// It panics if x is empty. If there is more than one maximal element
|
||||
// according to the cmp function, MaxFunc returns the first one.
|
||||
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
|
||||
if len(x) < 1 {
|
||||
panic("slices.MaxFunc: empty list")
|
||||
}
|
||||
m := x[0]
|
||||
for i := 1; i < len(x); i++ {
|
||||
if cmp(x[i], m) > 0 {
|
||||
m = x[i]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// BinarySearch searches for target in a sorted slice and returns the position
|
||||
// where target is found, or the position where target would appear in the
|
||||
// sort order; it also returns a bool saying whether the target is really found
|
||||
// in the slice. The slice must be sorted in increasing order.
|
||||
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
|
||||
// search returns the leftmost position where f returns true, or len(x) if f
|
||||
// returns false for all x. This is the insertion position for target in x,
|
||||
// and could point to an element that's either == target or not.
|
||||
pos := search(len(x), func(i int) bool { return x[i] >= target })
|
||||
if pos >= len(x) || x[pos] != target {
|
||||
return pos, false
|
||||
} else {
|
||||
return pos, true
|
||||
}
|
||||
}
|
||||
|
||||
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
|
||||
// function. The slice must be sorted in increasing order, where "increasing" is
|
||||
// defined by cmp. cmp(a, b) is expected to return an integer comparing the two
|
||||
// parameters: 0 if a == b, a negative number if a < b and a positive number if
|
||||
// a > b.
|
||||
func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) {
|
||||
pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 })
|
||||
if pos >= len(x) || cmp(x[pos], target) != 0 {
|
||||
return pos, false
|
||||
} else {
|
||||
return pos, true
|
||||
}
|
||||
}
|
||||
|
||||
func search(n int, f func(int) bool) int {
|
||||
// Define f(-1) == false and f(n) == true.
|
||||
// Invariant: f(i-1) == false, f(j) == true.
|
||||
func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
|
||||
// Inlining is faster than calling BinarySearchFunc with a lambda.
|
||||
n := len(x)
|
||||
// Define x[-1] < target and x[n] >= target.
|
||||
// Invariant: x[i-1] < target, x[j] >= target.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if !f(h) {
|
||||
i = h + 1 // preserves f(i-1) == false
|
||||
if cmpLess(x[h], target) {
|
||||
i = h + 1 // preserves x[i-1] < target
|
||||
} else {
|
||||
j = h // preserves f(j) == true
|
||||
j = h // preserves x[j] >= target
|
||||
}
|
||||
}
|
||||
// i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
|
||||
return i
|
||||
// i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
|
||||
return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
|
||||
}
|
||||
|
||||
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
|
||||
// function. The slice must be sorted in increasing order, where "increasing"
|
||||
// is defined by cmp. cmp should return 0 if the slice element matches
|
||||
// the target, a negative number if the slice element precedes the target,
|
||||
// or a positive number if the slice element follows the target.
|
||||
// cmp must implement the same ordering as the slice, such that if
|
||||
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
|
||||
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
|
||||
n := len(x)
|
||||
// Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
|
||||
// Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
|
||||
i, j := 0, n
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1) // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if cmp(x[h], target) < 0 {
|
||||
i = h + 1 // preserves cmp(x[i - 1], target) < 0
|
||||
} else {
|
||||
j = h // preserves cmp(x[j], target) >= 0
|
||||
}
|
||||
}
|
||||
// i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
|
||||
return i, i < n && cmp(x[i], target) == 0
|
||||
}
|
||||
|
||||
type sortedHint int // hint for pdqsort when choosing the pivot
|
||||
@ -125,3 +187,9 @@ func (r *xorshift) Next() uint64 {
|
||||
func nextPowerOfTwo(length int) uint {
|
||||
return 1 << bits.Len(uint(length))
|
||||
}
|
||||
|
||||
// isNaN reports whether x is a NaN without requiring the math package.
|
||||
// This will always return false if T is not floating-point.
|
||||
func isNaN[T constraints.Ordered](x T) bool {
|
||||
return x != x
|
||||
}
|
||||
|
||||
154
vendor/golang.org/x/exp/slices/zsortfunc.go → vendor/golang.org/x/exp/slices/zsortanyfunc.go
generated
vendored
154
vendor/golang.org/x/exp/slices/zsortfunc.go → vendor/golang.org/x/exp/slices/zsortanyfunc.go
generated
vendored
@ -6,28 +6,28 @@
|
||||
|
||||
package slices
|
||||
|
||||
// insertionSortLessFunc sorts data[a:b] using insertion sort.
|
||||
func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
// insertionSortCmpFunc sorts data[a:b] using insertion sort.
|
||||
func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && less(data[j], data[j-1]); j-- {
|
||||
for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownLessFunc implements the heap property on data[lo:hi].
|
||||
// siftDownCmpFunc implements the heap property on data[lo:hi].
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) {
|
||||
func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && less(data[first+child], data[first+child+1]) {
|
||||
if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
|
||||
child++
|
||||
}
|
||||
if !less(data[first+root], data[first+child]) {
|
||||
if !(cmp(data[first+root], data[first+child]) < 0) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool
|
||||
}
|
||||
}
|
||||
|
||||
func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDownLessFunc(data, i, hi, first, less)
|
||||
siftDownCmpFunc(data, i, hi, first, cmp)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDownLessFunc(data, lo, i, first, less)
|
||||
siftDownCmpFunc(data, lo, i, first, cmp)
|
||||
}
|
||||
}
|
||||
|
||||
// pdqsortLessFunc sorts data[a:b].
|
||||
// pdqsortCmpFunc sorts data[a:b].
|
||||
// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
|
||||
// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
|
||||
// C++ implementation: https://github.com/orlp/pdqsort
|
||||
// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
|
||||
// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
|
||||
func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
|
||||
func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
|
||||
const maxInsertion = 12
|
||||
|
||||
var (
|
||||
@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
|
||||
length := b - a
|
||||
|
||||
if length <= maxInsertion {
|
||||
insertionSortLessFunc(data, a, b, less)
|
||||
insertionSortCmpFunc(data, a, b, cmp)
|
||||
return
|
||||
}
|
||||
|
||||
// Fall back to heapsort if too many bad choices were made.
|
||||
if limit == 0 {
|
||||
heapSortLessFunc(data, a, b, less)
|
||||
heapSortCmpFunc(data, a, b, cmp)
|
||||
return
|
||||
}
|
||||
|
||||
// If the last partitioning was imbalanced, we need to breaking patterns.
|
||||
if !wasBalanced {
|
||||
breakPatternsLessFunc(data, a, b, less)
|
||||
breakPatternsCmpFunc(data, a, b, cmp)
|
||||
limit--
|
||||
}
|
||||
|
||||
pivot, hint := choosePivotLessFunc(data, a, b, less)
|
||||
pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
|
||||
if hint == decreasingHint {
|
||||
reverseRangeLessFunc(data, a, b, less)
|
||||
reverseRangeCmpFunc(data, a, b, cmp)
|
||||
// The chosen pivot was pivot-a elements after the start of the array.
|
||||
// After reversing it is pivot-a elements before the end of the array.
|
||||
// The idea came from Rust's implementation.
|
||||
@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) {
|
||||
|
||||
// The slice is likely already sorted.
|
||||
if wasBalanced && wasPartitioned && hint == increasingHint {
|
||||
if partialInsertionSortLessFunc(data, a, b, less) {
|
||||
if partialInsertionSortCmpFunc(data, a, b, cmp) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !less(data[a-1], data[pivot]) {
|
||||
mid := partitionEqualLessFunc(data, a, b, pivot, less)
|
||||
if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
|
||||
mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
|
||||
a = mid
|
||||
continue
|
||||
}
|
||||
|
||||
mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less)
|
||||
mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
|
||||
wasPartitioned = alreadyPartitioned
|
||||
|
||||
leftLen, rightLen := mid-a, b-mid
|
||||
balanceThreshold := length / 8
|
||||
if leftLen < rightLen {
|
||||
wasBalanced = leftLen >= balanceThreshold
|
||||
pdqsortLessFunc(data, a, mid, limit, less)
|
||||
pdqsortCmpFunc(data, a, mid, limit, cmp)
|
||||
a = mid + 1
|
||||
} else {
|
||||
wasBalanced = rightLen >= balanceThreshold
|
||||
pdqsortLessFunc(data, mid+1, b, limit, less)
|
||||
pdqsortCmpFunc(data, mid+1, b, limit, cmp)
|
||||
b = mid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// partitionLessFunc does one quicksort partition.
|
||||
// partitionCmpFunc does one quicksort partition.
|
||||
// Let p = data[pivot]
|
||||
// Moves elements in data[a:b] around, so that data[i]<p and data[j]>=p for i<newpivot and j>newpivot.
|
||||
// On return, data[newpivot] = p
|
||||
func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) {
|
||||
func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && less(data[i], data[a]) {
|
||||
for i <= j && (cmp(data[i], data[a]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !less(data[j], data[a]) {
|
||||
for i <= j && !(cmp(data[j], data[a]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && less(data[i], data[a]) {
|
||||
for i <= j && (cmp(data[i], data[a]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !less(data[j], data[a]) {
|
||||
for i <= j && !(cmp(data[j], data[a]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool)
|
||||
return j, false
|
||||
}
|
||||
|
||||
// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
|
||||
// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
|
||||
func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) {
|
||||
func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !less(data[a], data[i]) {
|
||||
for i <= j && !(cmp(data[a], data[i]) < 0) {
|
||||
i++
|
||||
}
|
||||
for i <= j && less(data[a], data[j]) {
|
||||
for i <= j && (cmp(data[a], data[j]) < 0) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E)
|
||||
return i
|
||||
}
|
||||
|
||||
// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool {
|
||||
// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
|
||||
func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
|
||||
const (
|
||||
maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
|
||||
shortestShifting = 50 // don't shift any elements on short arrays
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !less(data[i], data[i-1]) {
|
||||
for i < b && !(cmp(data[i], data[i-1]) < 0) {
|
||||
i++
|
||||
}
|
||||
|
||||
@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !less(data[j], data[j-1]) {
|
||||
if !(cmp(data[j], data[j-1]) < 0) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !less(data[j], data[j-1]) {
|
||||
if !(cmp(data[j], data[j-1]) < 0) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b
|
||||
return false
|
||||
}
|
||||
|
||||
// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns
|
||||
// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
|
||||
// that might cause imbalanced partitions in quicksort.
|
||||
func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
length := b - a
|
||||
if length >= 8 {
|
||||
random := xorshift(length)
|
||||
@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// choosePivotLessFunc chooses a pivot in data[a:b].
|
||||
// choosePivotCmpFunc chooses a pivot in data[a:b].
|
||||
//
|
||||
// [0,8): chooses a static pivot.
|
||||
// [8,shortestNinther): uses the simple median-of-three method.
|
||||
// [shortestNinther,∞): uses the Tukey ninther method.
|
||||
func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) {
|
||||
func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
|
||||
const (
|
||||
shortestNinther = 50
|
||||
maxSwaps = 4 * 3
|
||||
@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
|
||||
if l >= 8 {
|
||||
if l >= shortestNinther {
|
||||
// Tukey ninther method, the idea came from Rust's implementation.
|
||||
i = medianAdjacentLessFunc(data, i, &swaps, less)
|
||||
j = medianAdjacentLessFunc(data, j, &swaps, less)
|
||||
k = medianAdjacentLessFunc(data, k, &swaps, less)
|
||||
i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
|
||||
j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
|
||||
k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
|
||||
}
|
||||
// Find the median among i, j, k and stores it into j.
|
||||
j = medianLessFunc(data, i, j, k, &swaps, less)
|
||||
j = medianCmpFunc(data, i, j, k, &swaps, cmp)
|
||||
}
|
||||
|
||||
switch swaps {
|
||||
@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv
|
||||
}
|
||||
}
|
||||
|
||||
// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) {
|
||||
if less(data[b], data[a]) {
|
||||
// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
|
||||
if cmp(data[b], data[a]) < 0 {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
return a, b
|
||||
}
|
||||
|
||||
// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int {
|
||||
a, b = order2LessFunc(data, a, b, swaps, less)
|
||||
b, c = order2LessFunc(data, b, c, swaps, less)
|
||||
a, b = order2LessFunc(data, a, b, swaps, less)
|
||||
// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
|
||||
func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
|
||||
a, b = order2CmpFunc(data, a, b, swaps, cmp)
|
||||
b, c = order2CmpFunc(data, b, c, swaps, cmp)
|
||||
a, b = order2CmpFunc(data, a, b, swaps, cmp)
|
||||
return b
|
||||
}
|
||||
|
||||
// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int {
|
||||
return medianLessFunc(data, a-1, a, a+1, swaps, less)
|
||||
// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
|
||||
func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
|
||||
return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
|
||||
}
|
||||
|
||||
func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
|
||||
i := a
|
||||
j := b - 1
|
||||
for i < j {
|
||||
@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) {
|
||||
func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
|
||||
for i := 0; i < n; i++ {
|
||||
data[a+i], data[b+i] = data[b+i], data[a+i]
|
||||
}
|
||||
}
|
||||
|
||||
func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
|
||||
func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
|
||||
blockSize := 20 // must be > 0
|
||||
a, b := 0, blockSize
|
||||
for b <= n {
|
||||
insertionSortLessFunc(data, a, b, less)
|
||||
insertionSortCmpFunc(data, a, b, cmp)
|
||||
a = b
|
||||
b += blockSize
|
||||
}
|
||||
insertionSortLessFunc(data, a, n, less)
|
||||
insertionSortCmpFunc(data, a, n, cmp)
|
||||
|
||||
for blockSize < n {
|
||||
a, b = 0, 2*blockSize
|
||||
for b <= n {
|
||||
symMergeLessFunc(data, a, a+blockSize, b, less)
|
||||
symMergeCmpFunc(data, a, a+blockSize, b, cmp)
|
||||
a = b
|
||||
b += 2 * blockSize
|
||||
}
|
||||
if m := a + blockSize; m < n {
|
||||
symMergeLessFunc(data, a, m, n, less)
|
||||
symMergeCmpFunc(data, a, m, n, cmp)
|
||||
}
|
||||
blockSize *= 2
|
||||
}
|
||||
}
|
||||
|
||||
// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
|
||||
// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
|
||||
// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
|
||||
// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
|
||||
@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) {
|
||||
// symMerge assumes non-degenerate arguments: a < m && m < b.
|
||||
// Having the caller check this condition eliminates many leaf recursion calls,
|
||||
// which improves performance.
|
||||
func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
|
||||
// Avoid unnecessary recursions of symMerge
|
||||
// by direct insertion of data[a] into data[m:b]
|
||||
// if data[a:m] only contains one element.
|
||||
@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if less(data[h], data[a]) {
|
||||
if cmp(data[h], data[a]) < 0 {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !less(data[m], data[h]) {
|
||||
if !(cmp(data[m], data[h]) < 0) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !less(data[p-c], data[c]) {
|
||||
if !(cmp(data[p-c], data[c]) < 0) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
|
||||
end := n - start
|
||||
if start < m && m < end {
|
||||
rotateLessFunc(data, start, m, end, less)
|
||||
rotateCmpFunc(data, start, m, end, cmp)
|
||||
}
|
||||
if a < start && start < mid {
|
||||
symMergeLessFunc(data, a, start, mid, less)
|
||||
symMergeCmpFunc(data, a, start, mid, cmp)
|
||||
}
|
||||
if mid < end && end < b {
|
||||
symMergeLessFunc(data, mid, end, b, less)
|
||||
symMergeCmpFunc(data, mid, end, b, cmp)
|
||||
}
|
||||
}
|
||||
|
||||
// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
|
||||
// Data of the form 'x u v y' is changed to 'x v u y'.
|
||||
// rotate performs at most b-a many calls to data.Swap,
|
||||
// and it assumes non-degenerate arguments: a < m && m < b.
|
||||
func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) {
|
||||
func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
|
||||
i := m - a
|
||||
j := b - m
|
||||
|
||||
for i != j {
|
||||
if i > j {
|
||||
swapRangeLessFunc(data, m-i, m, j, less)
|
||||
swapRangeCmpFunc(data, m-i, m, j, cmp)
|
||||
i -= j
|
||||
} else {
|
||||
swapRangeLessFunc(data, m-i, m+j-i, i, less)
|
||||
swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
|
||||
j -= i
|
||||
}
|
||||
}
|
||||
// i == j
|
||||
swapRangeLessFunc(data, m-i, m, i, less)
|
||||
swapRangeCmpFunc(data, m-i, m, i, cmp)
|
||||
}
|
||||
34
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
34
vendor/golang.org/x/exp/slices/zsortordered.go
generated
vendored
@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints"
|
||||
// insertionSortOrdered sorts data[a:b] using insertion sort.
|
||||
func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && (data[j] < data[j-1]); j-- {
|
||||
for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && (data[first+child] < data[first+child+1]) {
|
||||
if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
|
||||
child++
|
||||
}
|
||||
if !(data[first+root] < data[first+child]) {
|
||||
if !cmpLess(data[first+root], data[first+child]) {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
|
||||
|
||||
// Probably the slice contains many duplicate elements, partition the slice into
|
||||
// elements equal to and elements greater than the pivot.
|
||||
if a > 0 && !(data[a-1] < data[pivot]) {
|
||||
if a > 0 && !cmpLess(data[a-1], data[pivot]) {
|
||||
mid := partitionEqualOrdered(data, a, b, pivot)
|
||||
a = mid
|
||||
continue
|
||||
@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
|
||||
data[a], data[pivot] = data[pivot], data[a]
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for i <= j && (data[i] < data[a]) {
|
||||
for i <= j && cmpLess(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(data[j] < data[a]) {
|
||||
for i <= j && !cmpLess(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo
|
||||
j--
|
||||
|
||||
for {
|
||||
for i <= j && (data[i] < data[a]) {
|
||||
for i <= j && cmpLess(data[i], data[a]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && !(data[j] < data[a]) {
|
||||
for i <= j && !cmpLess(data[j], data[a]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne
|
||||
i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
|
||||
|
||||
for {
|
||||
for i <= j && !(data[a] < data[i]) {
|
||||
for i <= j && !cmpLess(data[a], data[i]) {
|
||||
i++
|
||||
}
|
||||
for i <= j && (data[a] < data[j]) {
|
||||
for i <= j && cmpLess(data[a], data[j]) {
|
||||
j--
|
||||
}
|
||||
if i > j {
|
||||
@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
|
||||
)
|
||||
i := a + 1
|
||||
for j := 0; j < maxSteps; j++ {
|
||||
for i < b && !(data[i] < data[i-1]) {
|
||||
for i < b && !cmpLess(data[i], data[i-1]) {
|
||||
i++
|
||||
}
|
||||
|
||||
@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
|
||||
// Shift the smaller one to the left.
|
||||
if i-a >= 2 {
|
||||
for j := i - 1; j >= 1; j-- {
|
||||
if !(data[j] < data[j-1]) {
|
||||
if !cmpLess(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool
|
||||
// Shift the greater one to the right.
|
||||
if b-i >= 2 {
|
||||
for j := i + 1; j < b; j++ {
|
||||
if !(data[j] < data[j-1]) {
|
||||
if !cmpLess(data[j], data[j-1]) {
|
||||
break
|
||||
}
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h
|
||||
|
||||
// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
|
||||
func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
|
||||
if data[b] < data[a] {
|
||||
if cmpLess(data[b], data[a]) {
|
||||
*swaps++
|
||||
return b, a
|
||||
}
|
||||
@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
j := b
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if data[h] < data[a] {
|
||||
if cmpLess(data[h], data[a]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
j := m
|
||||
for i < j {
|
||||
h := int(uint(i+j) >> 1)
|
||||
if !(data[m] < data[h]) {
|
||||
if !cmpLess(data[m], data[h]) {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
|
||||
|
||||
for start < r {
|
||||
c := int(uint(start+r) >> 1)
|
||||
if !(data[p-c] < data[c]) {
|
||||
if !cmpLess(data[p-c], data[c]) {
|
||||
start = c + 1
|
||||
} else {
|
||||
r = c
|
||||
|
||||
2
vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go
generated
vendored
@ -177,7 +177,7 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val {
|
||||
return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error)
|
||||
}
|
||||
|
||||
v, details, err := a.result.Program.Eval(a.activation)
|
||||
v, details, err := a.result.Program.ContextEval(a.context, a.activation)
|
||||
if details == nil {
|
||||
return types.NewErr("unable to get evaluation details of variable %q", a.name)
|
||||
}
|
||||
|
||||
2
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go
generated
vendored
@ -229,7 +229,6 @@ func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName st
|
||||
}
|
||||
|
||||
func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) {
|
||||
klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version)
|
||||
|
||||
if rdm.apiGroups == nil {
|
||||
rdm.apiGroups = make(map[groupKey]*apidiscoveryv2beta1.APIGroupDiscovery)
|
||||
@ -273,6 +272,7 @@ func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupN
|
||||
}
|
||||
rdm.apiGroups[key] = group
|
||||
}
|
||||
klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version)
|
||||
|
||||
gv := metav1.GroupVersion{Group: groupName, Version: value.Version}
|
||||
gvKey := groupVersionKey{
|
||||
|
||||
17
vendor/k8s.io/apiserver/pkg/features/kube_features.go
generated
vendored
17
vendor/k8s.io/apiserver/pkg/features/kube_features.go
generated
vendored
@ -166,6 +166,13 @@ const (
|
||||
// Deprecates and removes SelfLink from ObjectMeta and ListMeta.
|
||||
RemoveSelfLink featuregate.Feature = "RemoveSelfLink"
|
||||
|
||||
// owner: @serathius
|
||||
// beta: v1.30
|
||||
//
|
||||
// Allow watch cache to create a watch on a dedicated RPC.
|
||||
// This prevents watch cache from being starved by other watches.
|
||||
SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC"
|
||||
|
||||
// owner: @apelisse, @lavalamp
|
||||
// alpha: v1.14
|
||||
// beta: v1.16
|
||||
@ -222,6 +229,12 @@ const (
|
||||
// Enables support for watch bookmark events.
|
||||
WatchBookmark featuregate.Feature = "WatchBookmark"
|
||||
|
||||
// owner: @serathius
|
||||
// beta: 1.30
|
||||
// Enables watches without resourceVersion to be served from storage.
|
||||
// Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue.
|
||||
WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion"
|
||||
|
||||
// owner: @vinaykul
|
||||
// kep: http://kep.k8s.io/1287
|
||||
// alpha: v1.27
|
||||
@ -286,6 +299,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
|
||||
SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
@ -298,6 +313,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
|
||||
WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta},
|
||||
|
||||
InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
WatchList: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
2
vendor/k8s.io/apiserver/pkg/server/config.go
generated
vendored
2
vendor/k8s.io/apiserver/pkg/server/config.go
generated
vendored
@ -915,7 +915,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler {
|
||||
requestWorkEstimator := flowcontrolrequest.NewWorkEstimator(
|
||||
c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg, c.FlowControl.GetMaxSeats)
|
||||
handler = filterlatency.TrackCompleted(handler)
|
||||
handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator)
|
||||
handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator, c.RequestTimeout/4)
|
||||
handler = filterlatency.TrackStarted(handler, c.TracerProvider, "priorityandfairness")
|
||||
} else {
|
||||
handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc)
|
||||
|
||||
70
vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go
generated
vendored
70
vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go
generated
vendored
@ -35,6 +35,7 @@ import (
|
||||
fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics"
|
||||
flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request"
|
||||
"k8s.io/klog/v2"
|
||||
utilsclock "k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
// PriorityAndFairnessClassification identifies the results of
|
||||
@ -78,6 +79,10 @@ type priorityAndFairnessHandler struct {
|
||||
// the purpose of computing RetryAfter header to avoid system
|
||||
// overload.
|
||||
droppedRequests utilflowcontrol.DroppedRequestsTracker
|
||||
|
||||
// newReqWaitCtxFn creates a derived context with a deadline
|
||||
// of how long a given request can wait in its queue.
|
||||
newReqWaitCtxFn func(context.Context) (context.Context, context.CancelFunc)
|
||||
}
|
||||
|
||||
func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) {
|
||||
@ -240,8 +245,9 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque
|
||||
resultCh <- err
|
||||
}()
|
||||
|
||||
// We create handleCtx with explicit cancelation function.
|
||||
// The reason for it is that Handle() underneath may start additional goroutine
|
||||
// We create handleCtx with an adjusted deadline, for two reasons.
|
||||
// One is to limit the time the request waits before its execution starts.
|
||||
// The other reason for it is that Handle() underneath may start additional goroutine
|
||||
// that is blocked on context cancellation. However, from APF point of view,
|
||||
// we don't want to wait until the whole watch request is processed (which is
|
||||
// when it context is actually cancelled) - we want to unblock the goroutine as
|
||||
@ -249,7 +255,7 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque
|
||||
//
|
||||
// Note that we explicitly do NOT call the actuall handler using that context
|
||||
// to avoid cancelling request too early.
|
||||
handleCtx, handleCtxCancel := context.WithCancel(ctx)
|
||||
handleCtx, handleCtxCancel := h.newReqWaitCtxFn(ctx)
|
||||
defer handleCtxCancel()
|
||||
|
||||
// Note that Handle will return irrespective of whether the request
|
||||
@ -286,7 +292,11 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque
|
||||
h.handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute)
|
||||
func() {
|
||||
handleCtx, cancelFn := h.newReqWaitCtxFn(ctx)
|
||||
defer cancelFn()
|
||||
h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute)
|
||||
}()
|
||||
}
|
||||
|
||||
if !served {
|
||||
@ -309,6 +319,7 @@ func WithPriorityAndFairness(
|
||||
longRunningRequestCheck apirequest.LongRunningRequestCheck,
|
||||
fcIfc utilflowcontrol.Interface,
|
||||
workEstimator flowcontrolrequest.WorkEstimatorFunc,
|
||||
defaultRequestWaitLimit time.Duration,
|
||||
) http.Handler {
|
||||
if fcIfc == nil {
|
||||
klog.Warningf("priority and fairness support not found, skipping")
|
||||
@ -322,12 +333,18 @@ func WithPriorityAndFairness(
|
||||
waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency()
|
||||
})
|
||||
|
||||
clock := &utilsclock.RealClock{}
|
||||
newReqWaitCtxFn := func(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
return getRequestWaitContext(ctx, defaultRequestWaitLimit, clock)
|
||||
}
|
||||
|
||||
priorityAndFairnessHandler := &priorityAndFairnessHandler{
|
||||
handler: handler,
|
||||
longRunningRequestCheck: longRunningRequestCheck,
|
||||
fcIfc: fcIfc,
|
||||
workEstimator: workEstimator,
|
||||
droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(),
|
||||
newReqWaitCtxFn: newReqWaitCtxFn,
|
||||
}
|
||||
return http.HandlerFunc(priorityAndFairnessHandler.Handle)
|
||||
}
|
||||
@ -356,3 +373,48 @@ func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string
|
||||
w.Header().Set("Retry-After", retryAfter)
|
||||
http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests)
|
||||
}
|
||||
|
||||
// getRequestWaitContext returns a new context with a deadline of how
|
||||
// long the request is allowed to wait before it is removed from its
|
||||
// queue and rejected.
|
||||
// The context.CancelFunc returned must never be nil and the caller is
|
||||
// responsible for calling the CancelFunc function for cleanup.
|
||||
// - ctx: the context associated with the request (it may or may
|
||||
// not have a deadline).
|
||||
// - defaultRequestWaitLimit: the default wait duration that is used
|
||||
// if the request context does not have any deadline.
|
||||
// (a) initialization of a watch or
|
||||
// (b) a request whose context has no deadline
|
||||
//
|
||||
// clock comes in handy for testing the function
|
||||
func getRequestWaitContext(ctx context.Context, defaultRequestWaitLimit time.Duration, clock utilsclock.PassiveClock) (context.Context, context.CancelFunc) {
|
||||
if ctx.Err() != nil {
|
||||
return ctx, func() {}
|
||||
}
|
||||
|
||||
reqArrivedAt := clock.Now()
|
||||
if reqReceivedTimestamp, ok := apirequest.ReceivedTimestampFrom(ctx); ok {
|
||||
reqArrivedAt = reqReceivedTimestamp
|
||||
}
|
||||
|
||||
// a) we will allow the request to wait in the queue for one
|
||||
// fourth of the time of its allotted deadline.
|
||||
// b) if the request context does not have any deadline
|
||||
// then we default to 'defaultRequestWaitLimit'
|
||||
// in any case, the wait limit for any request must not
|
||||
// exceed the hard limit of 1m
|
||||
//
|
||||
// request has deadline:
|
||||
// wait-limit = min(remaining deadline / 4, 1m)
|
||||
// request has no deadline:
|
||||
// wait-limit = min(defaultRequestWaitLimit, 1m)
|
||||
thisReqWaitLimit := defaultRequestWaitLimit
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
thisReqWaitLimit = deadline.Sub(reqArrivedAt) / 4
|
||||
}
|
||||
if thisReqWaitLimit > time.Minute {
|
||||
thisReqWaitLimit = time.Minute
|
||||
}
|
||||
|
||||
return context.WithDeadline(ctx, reqArrivedAt.Add(thisReqWaitLimit))
|
||||
}
|
||||
|
||||
1
vendor/k8s.io/apiserver/pkg/server/options/recommended.go
generated
vendored
1
vendor/k8s.io/apiserver/pkg/server/options/recommended.go
generated
vendored
@ -154,7 +154,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error {
|
||||
config.SharedInformerFactory,
|
||||
kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta3(),
|
||||
config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight,
|
||||
config.RequestTimeout/4,
|
||||
)
|
||||
} else {
|
||||
klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled")
|
||||
|
||||
15
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
15
vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@ -401,10 +402,18 @@ func NewCacherFromConfig(config Config) (*Cacher, error) {
|
||||
// so that future reuse does not get a spurious timeout.
|
||||
<-cacher.timer.C
|
||||
}
|
||||
progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock)
|
||||
var contextMetadata metadata.MD
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SeparateCacheWatchRPC) {
|
||||
// Add grpc context metadata to watch and progress notify requests done by cacher to:
|
||||
// * Prevent starvation of watch opened by cacher, by moving it to separate Watch RPC than watch request that bypass cacher.
|
||||
// * Ensure that progress notification requests are executed on the same Watch RPC as their watch, which is required for it to work.
|
||||
contextMetadata = metadata.New(map[string]string{"source": "cache"})
|
||||
}
|
||||
|
||||
progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock, contextMetadata)
|
||||
watchCache := newWatchCache(
|
||||
config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester)
|
||||
listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc)
|
||||
listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc, contextMetadata)
|
||||
reflectorName := "storage/cacher.go:" + config.ResourcePrefix
|
||||
|
||||
reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0)
|
||||
@ -517,7 +526,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && opts.SendInitialEvents != nil {
|
||||
opts.SendInitialEvents = nil
|
||||
}
|
||||
if opts.SendInitialEvents == nil && opts.ResourceVersion == "" {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.WatchFromStorageWithoutResourceVersion) && opts.SendInitialEvents == nil && opts.ResourceVersion == "" {
|
||||
return c.storage.Watch(ctx, key, opts)
|
||||
}
|
||||
requestedWatchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion)
|
||||
|
||||
30
vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
generated
vendored
30
vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go
generated
vendored
@ -19,6 +19,8 @@ package cacher
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -30,17 +32,19 @@ import (
|
||||
|
||||
// listerWatcher opaques storage.Interface to expose cache.ListerWatcher.
|
||||
type listerWatcher struct {
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
storage storage.Interface
|
||||
resourcePrefix string
|
||||
newListFunc func() runtime.Object
|
||||
contextMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewListerWatcher returns a storage.Interface backed ListerWatcher.
|
||||
func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher {
|
||||
func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object, contextMetadata metadata.MD) cache.ListerWatcher {
|
||||
return &listerWatcher{
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
newListFunc: newListFunc,
|
||||
storage: storage,
|
||||
resourcePrefix: resourcePrefix,
|
||||
newListFunc: newListFunc,
|
||||
contextMetadata: contextMetadata,
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,7 +63,11 @@ func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error
|
||||
Predicate: pred,
|
||||
Recursive: true,
|
||||
}
|
||||
if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil {
|
||||
ctx := context.Background()
|
||||
if lw.contextMetadata != nil {
|
||||
ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata)
|
||||
}
|
||||
if err := lw.storage.GetList(ctx, lw.resourcePrefix, storageOpts, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return list, nil
|
||||
@ -73,5 +81,9 @@ func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, err
|
||||
Recursive: true,
|
||||
ProgressNotify: true,
|
||||
}
|
||||
return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts)
|
||||
ctx := context.Background()
|
||||
if lw.contextMetadata != nil {
|
||||
ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata)
|
||||
}
|
||||
return lw.storage.Watch(ctx, lw.resourcePrefix, opts)
|
||||
}
|
||||
|
||||
9
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
generated
vendored
9
vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go
generated
vendored
@ -21,6 +21,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
@ -34,10 +36,11 @@ const (
|
||||
progressRequestPeriod = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester {
|
||||
func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory, contextMetadata metadata.MD) *conditionalProgressRequester {
|
||||
pr := &conditionalProgressRequester{
|
||||
clock: clock,
|
||||
requestWatchProgress: requestWatchProgress,
|
||||
contextMetadata: contextMetadata,
|
||||
}
|
||||
pr.cond = sync.NewCond(pr.mux.RLocker())
|
||||
return pr
|
||||
@ -54,6 +57,7 @@ type TickerFactory interface {
|
||||
type conditionalProgressRequester struct {
|
||||
clock TickerFactory
|
||||
requestWatchProgress WatchProgressRequester
|
||||
contextMetadata metadata.MD
|
||||
|
||||
mux sync.RWMutex
|
||||
cond *sync.Cond
|
||||
@ -63,6 +67,9 @@ type conditionalProgressRequester struct {
|
||||
|
||||
func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) {
|
||||
ctx := wait.ContextForChannel(stopCh)
|
||||
if pr.contextMetadata != nil {
|
||||
ctx = metadata.NewOutgoingContext(ctx, pr.contextMetadata)
|
||||
}
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
<-stopCh
|
||||
|
||||
34
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
34
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go
generated
vendored
@ -150,9 +150,6 @@ type configController struct {
|
||||
// from server configuration.
|
||||
serverConcurrencyLimit int
|
||||
|
||||
// requestWaitLimit comes from server configuration.
|
||||
requestWaitLimit time.Duration
|
||||
|
||||
// watchTracker implements the necessary WatchTracker interface.
|
||||
WatchTracker
|
||||
|
||||
@ -263,9 +260,15 @@ type seatDemandStats struct {
|
||||
}
|
||||
|
||||
func (stats *seatDemandStats) update(obs fq.IntegratorResults) {
|
||||
stats.highWatermark = obs.Max
|
||||
if obs.Duration <= 0 {
|
||||
return
|
||||
}
|
||||
if math.IsNaN(obs.Deviation) {
|
||||
obs.Deviation = 0
|
||||
}
|
||||
stats.avg = obs.Average
|
||||
stats.stdDev = obs.Deviation
|
||||
stats.highWatermark = obs.Max
|
||||
envelope := obs.Average + obs.Deviation
|
||||
stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope)
|
||||
}
|
||||
@ -281,13 +284,12 @@ func newTestableController(config TestableConfig) *configController {
|
||||
asFieldManager: config.AsFieldManager,
|
||||
foundToDangling: config.FoundToDangling,
|
||||
serverConcurrencyLimit: config.ServerConcurrencyLimit,
|
||||
requestWaitLimit: config.RequestWaitLimit,
|
||||
flowcontrolClient: config.FlowcontrolClient,
|
||||
priorityLevelStates: make(map[string]*priorityLevelState),
|
||||
WatchTracker: NewWatchTracker(),
|
||||
MaxSeatsTracker: NewMaxSeatsTracker(),
|
||||
}
|
||||
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, requestWaitLimit=%s, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.requestWaitLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
|
||||
klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager)
|
||||
// Start with longish delay because conflicts will be between
|
||||
// different processes, so take some time to go away.
|
||||
cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue")
|
||||
@ -427,7 +429,7 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta
|
||||
plState := plStates[plName]
|
||||
if setCompleters {
|
||||
qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
plState.pl, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl)
|
||||
@ -651,10 +653,10 @@ func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontro
|
||||
|
||||
// Supply missing mandatory PriorityLevelConfiguration objects
|
||||
if !meal.haveExemptPL {
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit)
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt)
|
||||
}
|
||||
if !meal.haveCatchAllPL {
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit)
|
||||
meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll)
|
||||
}
|
||||
|
||||
meal.finishQueueSetReconfigsLocked()
|
||||
@ -686,7 +688,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi
|
||||
}
|
||||
}
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues,
|
||||
pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs,
|
||||
pl, state.reqsGaugePair, state.execSeatsObs,
|
||||
metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err)
|
||||
@ -792,7 +794,7 @@ func (meal *cfgMeal) processOldPLsLocked() {
|
||||
}
|
||||
var err error
|
||||
plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues,
|
||||
plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
plState.pl, plState.reqsGaugePair, plState.execSeatsObs,
|
||||
metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because queueSetCompleterForPL already approved this config
|
||||
@ -874,7 +876,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() {
|
||||
// queueSetCompleterForPL returns an appropriate QueueSetCompleter for the
|
||||
// given priority level configuration. Returns nil and an error if the given
|
||||
// object is malformed in a way that is a problem for this package.
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) {
|
||||
if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementLimited) != (pl.Spec.Limited != nil) {
|
||||
return nil, errors.New("broken union structure at the top, for Limited")
|
||||
}
|
||||
@ -896,7 +898,6 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow
|
||||
DesiredNumQueues: int(qcAPI.Queues),
|
||||
QueueLengthLimit: int(qcAPI.QueueLengthLimit),
|
||||
HandSize: int(qcAPI.HandSize),
|
||||
RequestWaitLimit: requestWaitLimit,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -950,16 +951,15 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl
|
||||
|
||||
// imaginePL adds a priority level based on one of the mandatory ones
|
||||
// that does not actually exist (right now) as a real API object.
|
||||
func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) {
|
||||
func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) {
|
||||
klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name)
|
||||
labelValues := []string{proto.Name}
|
||||
reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues)
|
||||
execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues)
|
||||
seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name)
|
||||
seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name})
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto,
|
||||
requestWaitLimit, reqsGaugePair, execSeatsObs,
|
||||
metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge))
|
||||
qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, reqsGaugePair,
|
||||
execSeatsObs, metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge))
|
||||
if err != nil {
|
||||
// This can not happen because proto is one of the mandatory
|
||||
// objects and these are not erroneous
|
||||
|
||||
5
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go
generated
vendored
5
vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go
generated
vendored
@ -90,7 +90,6 @@ func New(
|
||||
informerFactory kubeinformers.SharedInformerFactory,
|
||||
flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface,
|
||||
serverConcurrencyLimit int,
|
||||
requestWaitLimit time.Duration,
|
||||
) Interface {
|
||||
clk := eventclock.Real{}
|
||||
return NewTestable(TestableConfig{
|
||||
@ -101,7 +100,6 @@ func New(
|
||||
InformerFactory: informerFactory,
|
||||
FlowcontrolClient: flowcontrolClient,
|
||||
ServerConcurrencyLimit: serverConcurrencyLimit,
|
||||
RequestWaitLimit: requestWaitLimit,
|
||||
ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec,
|
||||
ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec,
|
||||
QueueSetFactory: fqs.NewQueueSetFactory(clk),
|
||||
@ -139,9 +137,6 @@ type TestableConfig struct {
|
||||
// ServerConcurrencyLimit for the controller to enforce
|
||||
ServerConcurrencyLimit int
|
||||
|
||||
// RequestWaitLimit configured on the server
|
||||
RequestWaitLimit time.Duration
|
||||
|
||||
// GaugeVec for metrics about requests, broken down by phase and priority_level
|
||||
ReqsGaugeVec metrics.RatioedGaugeVec
|
||||
|
||||
|
||||
7
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
7
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go
generated
vendored
@ -18,7 +18,6 @@ package fairqueuing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/debug"
|
||||
"k8s.io/apiserver/pkg/util/flowcontrol/metrics"
|
||||
@ -117,7 +116,7 @@ type QueuingConfig struct {
|
||||
|
||||
// DesiredNumQueues is the number of queues that the API says
|
||||
// should exist now. This may be non-positive, in which case
|
||||
// QueueLengthLimit, HandSize, and RequestWaitLimit are ignored.
|
||||
// QueueLengthLimit, and HandSize are ignored.
|
||||
// A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig.
|
||||
// A negative value means to always dispatch immediately upon arrival
|
||||
// (i.e., the requests are "exempt" from limitation).
|
||||
@ -129,10 +128,6 @@ type QueuingConfig struct {
|
||||
// HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly
|
||||
// dealing a "hand" of this many queues and then picking one of minimum length.
|
||||
HandSize int
|
||||
|
||||
// RequestWaitLimit is the maximum amount of time that a request may wait in a queue.
|
||||
// If, by the end of that time, the request has not been dispatched then it is rejected.
|
||||
RequestWaitLimit time.Duration
|
||||
}
|
||||
|
||||
// DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet.
|
||||
|
||||
77
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
77
vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go
generated
vendored
@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig,
|
||||
} else {
|
||||
qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit
|
||||
qCfg.HandSize = qs.qCfg.HandSize
|
||||
qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit
|
||||
}
|
||||
|
||||
qs.qCfg = qCfg
|
||||
@ -300,9 +299,6 @@ const (
|
||||
// Serve this one
|
||||
decisionExecute requestDecision = iota
|
||||
|
||||
// Reject this one due to APF queuing considerations
|
||||
decisionReject
|
||||
|
||||
// This one's context timed out / was canceled
|
||||
decisionCancel
|
||||
)
|
||||
@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo
|
||||
// ========================================================================
|
||||
// Step 1:
|
||||
// 1) Start with shuffle sharding, to pick a queue.
|
||||
// 2) Reject old requests that have been waiting too long
|
||||
// 3) Reject current request if there is not enough concurrency shares and
|
||||
// 2) Reject current request if there is not enough concurrency shares and
|
||||
// we are at max queue length
|
||||
// 4) If not rejected, create a request and enqueue
|
||||
req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn)
|
||||
// 3) If not rejected, create a request and enqueue
|
||||
req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn)
|
||||
// req == nil means that the request was rejected - no remaining
|
||||
// concurrency shares and at max queue length already
|
||||
if req == nil {
|
||||
@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) {
|
||||
}
|
||||
req.waitStarted = true
|
||||
switch decisionAny {
|
||||
case decisionReject:
|
||||
klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
qs.totRequestsRejected++
|
||||
qs.totRequestsTimedout++
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out")
|
||||
return false, qs.isIdleLocked()
|
||||
case decisionCancel:
|
||||
case decisionCancel: // handle in code following this switch
|
||||
case decisionExecute:
|
||||
klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
return true, false
|
||||
@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) {
|
||||
}
|
||||
// TODO(aaron-prindle) add metrics for this case
|
||||
klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2)
|
||||
// remove the request from the queue as it has timed out
|
||||
// remove the request from the queue as its queue wait time has exceeded
|
||||
queue := req.queue
|
||||
if req.removeFromQueueLocked() != nil {
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
@ -446,8 +435,9 @@ func (req *request) wait() (bool, bool) {
|
||||
qs.totSeatsWaiting -= req.MaxSeats()
|
||||
qs.totRequestsRejected++
|
||||
qs.totRequestsCancelled++
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled")
|
||||
metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out")
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats())
|
||||
req.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -555,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 {
|
||||
return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues)
|
||||
}
|
||||
|
||||
// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required
|
||||
// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required
|
||||
// to validate and enqueue a request for the queueSet/QueueSet:
|
||||
// 1) Start with shuffle sharding, to pick a queue.
|
||||
// 2) Reject old requests that have been waiting too long
|
||||
// 3) Reject current request if there is not enough concurrency shares and
|
||||
// 2) Reject current request if there is not enough concurrency shares and
|
||||
// we are at max queue length
|
||||
// 4) If not rejected, create a request and enqueue
|
||||
// 3) If not rejected, create a request and enqueue
|
||||
// returns the enqueud request on a successful enqueue
|
||||
// returns nil in the case that there is no available concurrency or
|
||||
// the queuelengthlimit has been reached
|
||||
func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request {
|
||||
func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request {
|
||||
// Start with the shuffle sharding, to pick a queue.
|
||||
queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2)
|
||||
queue := qs.queues[queueIdx]
|
||||
// The next step is the logic to reject requests that have been waiting too long
|
||||
qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName)
|
||||
// NOTE: currently timeout is only checked for each new request. This means that there can be
|
||||
// requests that are in the queue longer than the timeout if there are no new requests
|
||||
// We prefer the simplicity over the promptness, at least for now.
|
||||
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
|
||||
@ -632,43 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
return bestQueueIdx
|
||||
}
|
||||
|
||||
// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued
|
||||
// past the requestWaitLimit
|
||||
func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) {
|
||||
timeoutCount := 0
|
||||
disqueueSeats := 0
|
||||
now := qs.clock.Now()
|
||||
reqs := queue.requestsWaiting
|
||||
// reqs are sorted oldest -> newest
|
||||
// can short circuit loop (break) if oldest requests are not timing out
|
||||
// as newer requests also will not have timed out
|
||||
|
||||
// now - requestWaitLimit = arrivalLimit
|
||||
arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit)
|
||||
reqs.Walk(func(req *request) bool {
|
||||
if arrivalLimit.After(req.arrivalTime) {
|
||||
if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil {
|
||||
timeoutCount++
|
||||
disqueueSeats += req.MaxSeats()
|
||||
req.NoteQueued(false)
|
||||
metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1)
|
||||
}
|
||||
// we need to check if the next request has timed out.
|
||||
return true
|
||||
}
|
||||
// since reqs are sorted oldest -> newest, we are done here.
|
||||
return false
|
||||
})
|
||||
|
||||
// remove timed out requests from queue
|
||||
if timeoutCount > 0 {
|
||||
qs.totRequestsWaiting -= timeoutCount
|
||||
qs.totSeatsWaiting -= disqueueSeats
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount))
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
}
|
||||
}
|
||||
|
||||
// rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived
|
||||
// request, which has been assigned to a queue. If up against the
|
||||
// queue length limit and the concurrency limit then returns false.
|
||||
@ -702,6 +649,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) {
|
||||
qs.totRequestsWaiting++
|
||||
qs.totSeatsWaiting += request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1)
|
||||
metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, request.MaxSeats())
|
||||
request.NoteQueued(true)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(1)
|
||||
qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting))
|
||||
@ -760,6 +708,7 @@ func (qs *queueSet) dispatchLocked() bool {
|
||||
qs.totRequestsWaiting--
|
||||
qs.totSeatsWaiting -= request.MaxSeats()
|
||||
metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1)
|
||||
metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -request.MaxSeats())
|
||||
request.NoteQueued(false)
|
||||
qs.reqsGaugePair.RequestsWaiting.Add(-1)
|
||||
defer qs.boundNextDispatchLocked(queue)
|
||||
|
||||
16
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
16
vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
generated
vendored
@ -210,6 +210,16 @@ var (
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverCurrentInqueueSeats = compbasemetrics.NewGaugeVec(
|
||||
&compbasemetrics.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "current_inqueue_seats",
|
||||
Help: "Number of seats currently pending in queues of the API Priority and Fairness subsystem",
|
||||
StabilityLevel: compbasemetrics.ALPHA,
|
||||
},
|
||||
[]string{priorityLevel, flowSchema},
|
||||
)
|
||||
apiserverRequestQueueLength = compbasemetrics.NewHistogramVec(
|
||||
&compbasemetrics.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
@ -455,6 +465,7 @@ var (
|
||||
apiserverNextSBounds,
|
||||
apiserverNextDiscountedSBounds,
|
||||
apiserverCurrentInqueueRequests,
|
||||
apiserverCurrentInqueueSeats,
|
||||
apiserverRequestQueueLength,
|
||||
apiserverRequestConcurrencyLimit,
|
||||
apiserverRequestConcurrencyInUse,
|
||||
@ -518,6 +529,11 @@ func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string,
|
||||
apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// AddSeatsInQueues adds the given delta to the gauge of the # of seats in the queues of the specified flowSchema and priorityLevel
|
||||
func AddSeatsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) {
|
||||
apiserverCurrentInqueueSeats.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
}
|
||||
|
||||
// AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel
|
||||
func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string, delta int) {
|
||||
apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta))
|
||||
|
||||
1
vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go
generated
vendored
1
vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go
generated
vendored
@ -57,6 +57,7 @@ var (
|
||||
func Register(registry k8smetrics.KubeRegistry) {
|
||||
registry.Register(healthcheck)
|
||||
registry.Register(healthchecksTotal)
|
||||
_ = k8smetrics.RegisterProcessStartTime(registry.Register)
|
||||
}
|
||||
|
||||
func ResetHealthMetrics() {
|
||||
|
||||
14
vendor/k8s.io/component-helpers/storage/volume/helpers.go
generated
vendored
14
vendor/k8s.io/component-helpers/storage/volume/helpers.go
generated
vendored
@ -24,6 +24,20 @@ import (
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
)
|
||||
|
||||
// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field.
|
||||
func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool {
|
||||
// Use beta annotation first
|
||||
if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found {
|
||||
return true
|
||||
}
|
||||
|
||||
if claim.Spec.StorageClassName != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was
|
||||
// requested, it returns "".
|
||||
func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string {
|
||||
|
||||
42
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
42
vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go
generated
vendored
@ -4940,6 +4940,46 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted
|
||||
func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
// If we should always restart, containers are allowed to leave the terminated state
|
||||
if podSpec.RestartPolicy == core.RestartPolicyAlways {
|
||||
return allErrs
|
||||
}
|
||||
for i, oldStatus := range oldStatuses {
|
||||
// Skip any container that is not terminated
|
||||
if oldStatus.State.Terminated == nil {
|
||||
continue
|
||||
}
|
||||
// Skip any container that failed but is allowed to restart
|
||||
if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip any restartable init container that is allowed to restart
|
||||
isRestartableInitContainer := false
|
||||
for _, c := range podSpec.InitContainers {
|
||||
if oldStatus.Name == c.Name {
|
||||
if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways {
|
||||
isRestartableInitContainer = true
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if isRestartableInitContainer {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, newStatus := range newStatuses {
|
||||
if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation.
|
||||
func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
fldPath := field.NewPath("metadata")
|
||||
@ -4961,7 +5001,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions
|
||||
// If pod should not restart, make sure the status update does not transition
|
||||
// any terminated containers to a non-terminated state.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...)
|
||||
allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...)
|
||||
// The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever.
|
||||
allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...)
|
||||
allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...)
|
||||
|
||||
4
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
4
vendor/k8s.io/kubernetes/pkg/features/kube_features.go
generated
vendored
@ -1205,12 +1205,16 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
|
||||
|
||||
genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta},
|
||||
|
||||
genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta},
|
||||
|
||||
// inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed
|
||||
// unintentionally on either side:
|
||||
|
||||
|
||||
17
vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go
generated
vendored
17
vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go
generated
vendored
@ -102,6 +102,23 @@ func IsFailedPreconditionError(err error) bool {
|
||||
return errors.As(err, &failedPreconditionError)
|
||||
}
|
||||
|
||||
type OperationNotSupported struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (err *OperationNotSupported) Error() string {
|
||||
return err.msg
|
||||
}
|
||||
|
||||
func NewOperationNotSupportedError(msg string) *OperationNotSupported {
|
||||
return &OperationNotSupported{msg: msg}
|
||||
}
|
||||
|
||||
func IsOperationNotSupportedError(err error) bool {
|
||||
var operationNotSupportedError *OperationNotSupported
|
||||
return errors.As(err, &operationNotSupportedError)
|
||||
}
|
||||
|
||||
// TransientOperationFailure indicates operation failed with a transient error
|
||||
// and may fix itself when retried.
|
||||
type TransientOperationFailure struct {
|
||||
|
||||
6
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
6
vendor/k8s.io/kubernetes/test/utils/image/manifest.go
generated
vendored
@ -232,7 +232,7 @@ const (
|
||||
|
||||
func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) {
|
||||
configs := map[ImageID]Config{}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"}
|
||||
configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"}
|
||||
configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"}
|
||||
configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"}
|
||||
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
|
||||
@ -241,8 +241,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
|
||||
configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
|
||||
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
|
||||
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.5"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.10-0"}
|
||||
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.4.7"}
|
||||
configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.12-0"}
|
||||
configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"}
|
||||
configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"}
|
||||
configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"}
|
||||
|
||||
70
vendor/modules.txt
vendored
70
vendor/modules.txt
vendored
@ -40,7 +40,7 @@ github.com/docker/distribution/reference
|
||||
## explicit; go 1.13
|
||||
github.com/emicklei/go-restful/v3
|
||||
github.com/emicklei/go-restful/v3/log
|
||||
# github.com/evanphx/json-patch v5.7.0+incompatible
|
||||
# github.com/evanphx/json-patch v5.9.0+incompatible
|
||||
## explicit
|
||||
github.com/evanphx/json-patch
|
||||
# github.com/felixge/httpsnoop v1.0.4
|
||||
@ -201,7 +201,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support
|
||||
github.com/onsi/ginkgo/v2/internal/testingtproxy
|
||||
github.com/onsi/ginkgo/v2/reporters
|
||||
github.com/onsi/ginkgo/v2/types
|
||||
# github.com/onsi/gomega v1.32.0
|
||||
# github.com/onsi/gomega v1.33.0
|
||||
## explicit; go 1.20
|
||||
github.com/onsi/gomega
|
||||
github.com/onsi/gomega/format
|
||||
@ -374,8 +374,8 @@ golang.org/x/crypto/nacl/secretbox
|
||||
golang.org/x/crypto/salsa20/salsa
|
||||
golang.org/x/crypto/ssh
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
# golang.org/x/exp v0.0.0-20220827204233-334a2380cb91
|
||||
## explicit; go 1.18
|
||||
# golang.org/x/exp v0.0.0-20230905200255-921286631fa9
|
||||
## explicit; go 1.20
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/net v0.24.0
|
||||
@ -397,7 +397,7 @@ golang.org/x/net/websocket
|
||||
## explicit; go 1.18
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
# golang.org/x/sync v0.6.0
|
||||
# golang.org/x/sync v0.7.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sync/singleflight
|
||||
# golang.org/x/sys v0.19.0
|
||||
@ -580,7 +580,7 @@ gopkg.in/yaml.v2
|
||||
# gopkg.in/yaml.v3 v3.0.1
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.28.8
|
||||
# k8s.io/api v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
@ -636,12 +636,12 @@ k8s.io/api/scheduling/v1beta1
|
||||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.28.4
|
||||
# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
k8s.io/apiextensions-apiserver/pkg/features
|
||||
# k8s.io/apimachinery v0.28.8
|
||||
# k8s.io/apimachinery v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
@ -703,7 +703,7 @@ k8s.io/apimachinery/pkg/watch
|
||||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/apiserver v0.28.4
|
||||
# k8s.io/apiserver v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/apiserver/pkg/admission
|
||||
k8s.io/apiserver/pkg/admission/cel
|
||||
@ -848,7 +848,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate
|
||||
k8s.io/apiserver/plugin/pkg/audit/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authenticator/token/webhook
|
||||
k8s.io/apiserver/plugin/pkg/authorizer/webhook
|
||||
# k8s.io/client-go v0.28.8
|
||||
# k8s.io/client-go v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1
|
||||
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1
|
||||
@ -1114,7 +1114,7 @@ k8s.io/client-go/util/homedir
|
||||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.28.4 => k8s.io/cloud-provider v0.28.4
|
||||
# k8s.io/cloud-provider v0.28.9 => k8s.io/cloud-provider v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/app/config
|
||||
@ -1127,7 +1127,7 @@ k8s.io/cloud-provider/controllers/service/config
|
||||
k8s.io/cloud-provider/controllers/service/config/v1alpha1
|
||||
k8s.io/cloud-provider/names
|
||||
k8s.io/cloud-provider/options
|
||||
# k8s.io/component-base v0.28.4
|
||||
# k8s.io/component-base v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/component-base/cli/flag
|
||||
k8s.io/component-base/config
|
||||
@ -1149,12 +1149,12 @@ k8s.io/component-base/metrics/testutil
|
||||
k8s.io/component-base/tracing
|
||||
k8s.io/component-base/tracing/api/v1
|
||||
k8s.io/component-base/version
|
||||
# k8s.io/component-helpers v0.28.4
|
||||
# k8s.io/component-helpers v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/component-helpers/scheduling/corev1
|
||||
k8s.io/component-helpers/scheduling/corev1/nodeaffinity
|
||||
k8s.io/component-helpers/storage/volume
|
||||
# k8s.io/controller-manager v0.28.4
|
||||
# k8s.io/controller-manager v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/controller-manager/config
|
||||
k8s.io/controller-manager/config/v1
|
||||
@ -1175,7 +1175,7 @@ k8s.io/klog/v2/internal/dbg
|
||||
k8s.io/klog/v2/internal/serialize
|
||||
k8s.io/klog/v2/internal/severity
|
||||
k8s.io/klog/v2/internal/sloghandler
|
||||
# k8s.io/kms v0.28.4
|
||||
# k8s.io/kms v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/kms/apis/v1beta1
|
||||
k8s.io/kms/apis/v2
|
||||
@ -1203,14 +1203,14 @@ k8s.io/kube-openapi/pkg/validation/errors
|
||||
k8s.io/kube-openapi/pkg/validation/spec
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt
|
||||
k8s.io/kube-openapi/pkg/validation/strfmt/bson
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.28.4
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/kubectl/pkg/scale
|
||||
k8s.io/kubectl/pkg/util/podutils
|
||||
# k8s.io/kubelet v0.28.4 => k8s.io/kubelet v0.28.4
|
||||
# k8s.io/kubelet v0.28.9 => k8s.io/kubelet v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/kubelet/pkg/apis
|
||||
# k8s.io/kubernetes v1.28.7
|
||||
# k8s.io/kubernetes v1.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/kubernetes/pkg/api/legacyscheme
|
||||
k8s.io/kubernetes/pkg/api/service
|
||||
@ -1273,7 +1273,7 @@ k8s.io/kubernetes/test/utils/kubeconfig
|
||||
# k8s.io/mount-utils v0.29.4
|
||||
## explicit; go 1.21
|
||||
k8s.io/mount-utils
|
||||
# k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.28.4
|
||||
# k8s.io/pod-security-admission v0.0.0 => k8s.io/pod-security-admission v0.28.9
|
||||
## explicit; go 1.20
|
||||
k8s.io/pod-security-admission/api
|
||||
k8s.io/pod-security-admission/policy
|
||||
@ -1303,7 +1303,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
|
||||
# sigs.k8s.io/cloud-provider-azure v1.28.4
|
||||
# sigs.k8s.io/cloud-provider-azure v1.28.9
|
||||
## explicit; go 1.20
|
||||
sigs.k8s.io/cloud-provider-azure/pkg/cache
|
||||
sigs.k8s.io/cloud-provider-azure/pkg/util/deepcopy
|
||||
@ -1322,18 +1322,18 @@ sigs.k8s.io/structured-merge-diff/v4/value
|
||||
## explicit; go 1.12
|
||||
sigs.k8s.io/yaml
|
||||
sigs.k8s.io/yaml/goyaml.v2
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.4
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.4
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.4
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.4
|
||||
# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.4
|
||||
# k8s.io/endpointslice => k8s.io/endpointslice v0.28.4
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.4
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.4
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.4
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.4
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.28.4
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.28.4
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.4
|
||||
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.4
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.4
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.9
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.9
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.9
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.9
|
||||
# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.9
|
||||
# k8s.io/endpointslice => k8s.io/endpointslice v0.28.9
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.9
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.9
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.9
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.9
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.28.9
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.28.9
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.9
|
||||
# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.9
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.9
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user