chore(deps): bump google.golang.org/grpc from 1.40.0 to 1.56.2
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.40.0 to 1.56.2. - [Release notes](https://github.com/grpc/grpc-go/releases) - [Commits](https://github.com/grpc/grpc-go/compare/v1.40.0...v1.56.2) --- updated-dependencies: - dependency-name: google.golang.org/grpc dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
346af08047
commit
6d41f01685
8
go.mod
8
go.mod
@ -11,7 +11,7 @@ require (
|
||||
github.com/pborman/uuid v1.2.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
golang.org/x/net v0.12.0
|
||||
google.golang.org/grpc v1.40.0
|
||||
google.golang.org/grpc v1.56.2
|
||||
google.golang.org/protobuf v1.31.0
|
||||
k8s.io/api v0.24.15
|
||||
k8s.io/apimachinery v0.24.15
|
||||
@ -30,7 +30,7 @@ require (
|
||||
github.com/aws/aws-sdk-go v1.38.49 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/emicklei/go-restful v2.16.0+incompatible // indirect
|
||||
@ -82,13 +82,13 @@ require (
|
||||
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/oauth2 v0.7.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/term v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
||||
12
go.sum
12
go.sum
@ -75,8 +75,9 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@ -522,8 +523,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
|
||||
golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -753,8 +755,9 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA=
|
||||
google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
|
||||
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
@ -776,8 +779,9 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI=
|
||||
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
||||
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -3,8 +3,7 @@
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
||||
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -16,19 +16,11 @@ const (
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array of the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
@ -50,10 +42,10 @@ func New() *Digest {
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v1 = primes[0] + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.v4 = -primes[0]
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
|
||||
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,215 +1,209 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
blockLoop()
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
ADDQ n, h
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
||||
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
||||
@ -1,3 +1,5 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
b = b[4:]
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
|
||||
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
@ -11,7 +12,7 @@ import (
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
|
||||
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
71
vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
|
||||
package ctxhttp // import "golang.org/x/net/context/ctxhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Do sends an HTTP request with the provided http.Client and returns
|
||||
// an HTTP response.
|
||||
//
|
||||
// If the client is nil, http.DefaultClient is used.
|
||||
//
|
||||
// The provided ctx must be non-nil. If it is canceled or times out,
|
||||
// ctx.Err() will be returned.
|
||||
func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Get issues a GET request via the Do function.
|
||||
func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Head issues a HEAD request via the Do function.
|
||||
func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// Post issues a POST request via the Do function.
|
||||
func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", url, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", bodyType)
|
||||
return Do(ctx, client, req)
|
||||
}
|
||||
|
||||
// PostForm issues a POST request via the Do function.
|
||||
func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
|
||||
return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
|
||||
}
|
||||
3
vendor/golang.org/x/oauth2/AUTHORS
generated
vendored
3
vendor/golang.org/x/oauth2/AUTHORS
generated
vendored
@ -1,3 +0,0 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
3
vendor/golang.org/x/oauth2/CONTRIBUTORS
generated
vendored
@ -1,3 +0,0 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
12
vendor/golang.org/x/oauth2/README.md
generated
vendored
12
vendor/golang.org/x/oauth2/README.md
generated
vendored
@ -19,7 +19,7 @@ See pkg.go.dev for further documentation and examples.
|
||||
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
|
||||
* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google)
|
||||
|
||||
## Policy for new packages
|
||||
## Policy for new endpoints
|
||||
|
||||
We no longer accept new provider-specific packages in this repo if all
|
||||
they do is add a single endpoint variable. If you just want to add a
|
||||
@ -29,8 +29,12 @@ package.
|
||||
|
||||
## Report Issues / Send Patches
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html.
|
||||
|
||||
The main issue tracker for the oauth2 repository is located at
|
||||
https://github.com/golang/oauth2/issues.
|
||||
|
||||
This repository uses Gerrit for code changes. To learn how to submit changes to
|
||||
this repository, see https://golang.org/doc/contribute.html. In particular:
|
||||
|
||||
* Excluding trivial changes, all contributions should be connected to an existing issue.
|
||||
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
|
||||
* The code owners are listed at [dev.golang.org/owners](https://dev.golang.org/owners#:~:text=x/oauth2).
|
||||
|
||||
4
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
4
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
@ -19,8 +19,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string,
|
||||
}
|
||||
|
||||
func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) {
|
||||
r, err := ctxhttp.Do(ctx, ContextClient(ctx), req)
|
||||
r, err := ContextClient(ctx).Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
33
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
33
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
@ -140,7 +141,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-empty string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
@ -290,6 +291,8 @@ type reuseTokenSource struct {
|
||||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
|
||||
expiryDelta time.Duration
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
@ -305,6 +308,7 @@ func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.expiryDelta = s.expiryDelta
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
@ -379,3 +383,30 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
new: src,
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource that acts in the same manner as the
|
||||
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
|
||||
// configurable. The expiration time of a token is calculated as
|
||||
// t.Expiry.Add(-earlyExpiry).
|
||||
func ReuseTokenSourceWithExpiry(t *Token, src TokenSource, earlyExpiry time.Duration) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly, but set the expiryDelta to earlyExpiry,
|
||||
// so the behavior matches what the user expects.
|
||||
rt.expiryDelta = earlyExpiry
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
if t != nil {
|
||||
t.expiryDelta = earlyExpiry
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
expiryDelta: earlyExpiry,
|
||||
}
|
||||
}
|
||||
|
||||
14
vendor/golang.org/x/oauth2/token.go
generated
vendored
14
vendor/golang.org/x/oauth2/token.go
generated
vendored
@ -16,10 +16,10 @@ import (
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// defaultExpiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
const defaultExpiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the credentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
@ -52,6 +52,11 @@ type Token struct {
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
|
||||
// expiryDelta is used to calculate when a token is considered
|
||||
// expired, by subtracting from Expiry. If zero, defaultExpiryDelta
|
||||
// is used.
|
||||
expiryDelta time.Duration
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
@ -127,6 +132,11 @@ func (t *Token) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
|
||||
expiryDelta := defaultExpiryDelta
|
||||
if t.expiryDelta != 0 {
|
||||
expiryDelta = t.expiryDelta
|
||||
}
|
||||
return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow())
|
||||
}
|
||||
|
||||
|
||||
41
vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
generated
vendored
41
vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2015 Google LLC
|
||||
// Copyright 2023 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.12.2
|
||||
// protoc v3.21.9
|
||||
// source: google/api/httpbody.proto
|
||||
|
||||
package httpbody
|
||||
@ -40,7 +40,6 @@ const (
|
||||
// payload formats that can't be represented as JSON, such as raw binary or
|
||||
// an HTML page.
|
||||
//
|
||||
//
|
||||
// This message can be used both in streaming and non-streaming API methods in
|
||||
// the request as well as the response.
|
||||
//
|
||||
@ -50,32 +49,32 @@ const (
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message GetResourceRequest {
|
||||
// // A unique request id.
|
||||
// string request_id = 1;
|
||||
// message GetResourceRequest {
|
||||
// // A unique request id.
|
||||
// string request_id = 1;
|
||||
//
|
||||
// // The raw HTTP body is bound to this field.
|
||||
// google.api.HttpBody http_body = 2;
|
||||
// // The raw HTTP body is bound to this field.
|
||||
// google.api.HttpBody http_body = 2;
|
||||
//
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// service ResourceService {
|
||||
// rpc GetResource(GetResourceRequest)
|
||||
// returns (google.api.HttpBody);
|
||||
// rpc UpdateResource(google.api.HttpBody)
|
||||
// returns (google.protobuf.Empty);
|
||||
// service ResourceService {
|
||||
// rpc GetResource(GetResourceRequest)
|
||||
// returns (google.api.HttpBody);
|
||||
// rpc UpdateResource(google.api.HttpBody)
|
||||
// returns (google.protobuf.Empty);
|
||||
//
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Example with streaming methods:
|
||||
//
|
||||
// service CaldavService {
|
||||
// rpc GetCalendar(stream google.api.HttpBody)
|
||||
// returns (stream google.api.HttpBody);
|
||||
// rpc UpdateCalendar(stream google.api.HttpBody)
|
||||
// returns (stream google.api.HttpBody);
|
||||
// service CaldavService {
|
||||
// rpc GetCalendar(stream google.api.HttpBody)
|
||||
// returns (stream google.api.HttpBody);
|
||||
// rpc UpdateCalendar(stream google.api.HttpBody)
|
||||
// returns (stream google.api.HttpBody);
|
||||
//
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Use of this type only changes how the request and response bodies are
|
||||
// handled, all other features will continue to work unchanged.
|
||||
|
||||
10
vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
generated
vendored
10
vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 Google LLC
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -15,7 +15,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.12.2
|
||||
// protoc v3.21.9
|
||||
// source: google/rpc/status.proto
|
||||
|
||||
package status
|
||||
@ -48,11 +48,13 @@ type Status struct {
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
|
||||
// The status code, which should be an enum value of
|
||||
// [google.rpc.Code][google.rpc.Code].
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
// A developer-facing error message, which should be in English. Any
|
||||
// user-facing error message should be localized and sent in the
|
||||
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
|
||||
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized
|
||||
// by the client.
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
// A list of messages that carry the error details. There is a common set of
|
||||
// message types for APIs to use.
|
||||
|
||||
32
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
32
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly.
|
||||
both author's & review's time is wasted. Create more PRs to address different
|
||||
concerns and everyone will be happy.
|
||||
|
||||
- If you are searching for features to work on, issues labeled [Status: Help
|
||||
Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22)
|
||||
is a great place to start. These issues are well-documented and usually can be
|
||||
resolved with a single pull request.
|
||||
|
||||
- If you are adding a new file, make sure it has the copyright message template
|
||||
at the top as a comment. You can copy over the message from an existing file
|
||||
and update the year.
|
||||
|
||||
- The grpc package should only depend on standard Go packages and a small number
|
||||
of exceptions. If your contribution introduces new dependencies which are NOT
|
||||
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
|
||||
@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly.
|
||||
- Provide a good **PR description** as a record of **what** change is being made
|
||||
and **why** it was made. Link to a github issue if it exists.
|
||||
|
||||
- Don't fix code style and formatting unless you are already changing that line
|
||||
to address an issue. PRs with irrelevant changes won't be merged. If you do
|
||||
want to fix formatting or style, do that in a separate PR.
|
||||
- If you want to fix formatting or style, consider whether your changes are an
|
||||
obvious improvement or might be considered a personal preference. If a style
|
||||
change is based on preference, it likely will not be accepted. If it corrects
|
||||
widely agreed-upon anti-patterns, then please do create a PR and explain the
|
||||
benefits of the change.
|
||||
|
||||
- Unless your PR is trivial, you should expect there will be reviewer comments
|
||||
that you'll need to address before merging. We expect you to be reasonably
|
||||
responsive to those comments, otherwise the PR will be closed after 2-3 weeks
|
||||
of inactivity.
|
||||
that you'll need to address before merging. We'll mark it as `Status: Requires
|
||||
Reporter Clarification` if we expect you to respond to these comments in a
|
||||
timely manner. If the PR remains inactive for 6 days, it will be marked as
|
||||
`stale` and automatically close 7 days after that if we don't hear back from
|
||||
you.
|
||||
|
||||
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
|
||||
with messy commit history are difficult to review and won't be merged. Use
|
||||
@ -53,9 +66,8 @@ How to get your contributions merged smoothly and quickly.
|
||||
- **All tests need to be passing** before your change can be merged. We
|
||||
recommend you **run tests locally** before creating your PR to catch breakages
|
||||
early on.
|
||||
- `make all` to test everything, OR
|
||||
- `make vet` to catch vet errors
|
||||
- `make test` to run the tests
|
||||
- `make testrace` to run tests in race mode
|
||||
- `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
|
||||
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
|
||||
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason for doing so.
|
||||
|
||||
5
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
5
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
@ -8,17 +8,18 @@ See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIB
|
||||
for general contribution guidelines.
|
||||
|
||||
## Maintainers (in alphabetical order)
|
||||
- [canguler](https://github.com/canguler), Google LLC
|
||||
|
||||
- [cesarghali](https://github.com/cesarghali), Google LLC
|
||||
- [dfawley](https://github.com/dfawley), Google LLC
|
||||
- [easwars](https://github.com/easwars), Google LLC
|
||||
- [jadekler](https://github.com/jadekler), Google LLC
|
||||
- [menghanl](https://github.com/menghanl), Google LLC
|
||||
- [srini100](https://github.com/srini100), Google LLC
|
||||
|
||||
## Emeritus Maintainers (in alphabetical order)
|
||||
- [adelez](https://github.com/adelez), Google LLC
|
||||
- [canguler](https://github.com/canguler), Google LLC
|
||||
- [iamqizhao](https://github.com/iamqizhao), Google LLC
|
||||
- [jadekler](https://github.com/jadekler), Google LLC
|
||||
- [jtattermusch](https://github.com/jtattermusch), Google LLC
|
||||
- [lyuxuan](https://github.com/lyuxuan), Google LLC
|
||||
- [makmukhi](https://github.com/makmukhi), Google LLC
|
||||
|
||||
2
vendor/google.golang.org/grpc/Makefile
generated
vendored
2
vendor/google.golang.org/grpc/Makefile
generated
vendored
@ -41,8 +41,6 @@ vetdeps:
|
||||
clean \
|
||||
proto \
|
||||
test \
|
||||
testappengine \
|
||||
testappenginedeps \
|
||||
testrace \
|
||||
vet \
|
||||
vetdeps
|
||||
|
||||
13
vendor/google.golang.org/grpc/NOTICE.txt
generated
vendored
Normal file
13
vendor/google.golang.org/grpc/NOTICE.txt
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
Copyright 2014 gRPC authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
109
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
109
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
@ -19,61 +19,112 @@
|
||||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
package attributes
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Attributes is an immutable struct for storing and retrieving generic
|
||||
// key/value pairs. Keys must be hashable, and users should define their own
|
||||
// types for keys.
|
||||
// types for keys. Values should not be modified after they are added to an
|
||||
// Attributes or if they were received from one. If values implement 'Equal(o
|
||||
// interface{}) bool', it will be called by (*Attributes).Equal to determine
|
||||
// whether two values with the same key should be considered equal.
|
||||
type Attributes struct {
|
||||
m map[interface{}]interface{}
|
||||
}
|
||||
|
||||
// New returns a new Attributes containing all key/value pairs in kvs. If the
|
||||
// same key appears multiple times, the last value overwrites all previous
|
||||
// values for that key. Panics if len(kvs) is not even.
|
||||
func New(kvs ...interface{}) *Attributes {
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
a.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return a
|
||||
// New returns a new Attributes containing the key/value pair.
|
||||
func New(key, value interface{}) *Attributes {
|
||||
return &Attributes{m: map[interface{}]interface{}{key: value}}
|
||||
}
|
||||
|
||||
// WithValues returns a new Attributes containing all key/value pairs in a and
|
||||
// kvs. Panics if len(kvs) is not even. If the same key appears multiple
|
||||
// times, the last value overwrites all previous values for that key. To
|
||||
// remove an existing key, use a nil value.
|
||||
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||
// WithValue returns a new Attributes containing the previous keys and values
|
||||
// and the new key/value pair. If the same key appears multiple times, the
|
||||
// last value overwrites all previous values for that key. To remove an
|
||||
// existing key, use a nil value. value should not be modified later.
|
||||
func (a *Attributes) WithValue(key, value interface{}) *Attributes {
|
||||
if a == nil {
|
||||
return New(kvs...)
|
||||
return New(key, value)
|
||||
}
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
|
||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)}
|
||||
for k, v := range a.m {
|
||||
n.m[k] = v
|
||||
}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
n.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
n.m[key] = value
|
||||
return n
|
||||
}
|
||||
|
||||
// Value returns the value associated with these attributes for key, or nil if
|
||||
// no value is associated with key.
|
||||
// no value is associated with key. The returned value should not be modified.
|
||||
func (a *Attributes) Value(key interface{}) interface{} {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return a.m[key]
|
||||
}
|
||||
|
||||
// Equal returns whether a and o are equivalent. If 'Equal(o interface{})
|
||||
// bool' is implemented for a value in the attributes, it is called to
|
||||
// determine if the value matches the one stored in the other attributes. If
|
||||
// Equal is not implemented, standard equality is used to determine if the two
|
||||
// values are equal. Note that some types (e.g. maps) aren't comparable by
|
||||
// default, so they must be wrapped in a struct, or in an alias type, with Equal
|
||||
// defined.
|
||||
func (a *Attributes) Equal(o *Attributes) bool {
|
||||
if a == nil && o == nil {
|
||||
return true
|
||||
}
|
||||
if a == nil || o == nil {
|
||||
return false
|
||||
}
|
||||
if len(a.m) != len(o.m) {
|
||||
return false
|
||||
}
|
||||
for k, v := range a.m {
|
||||
ov, ok := o.m[k]
|
||||
if !ok {
|
||||
// o missing element of a
|
||||
return false
|
||||
}
|
||||
if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok {
|
||||
if !eq.Equal(ov) {
|
||||
return false
|
||||
}
|
||||
} else if v != ov {
|
||||
// Fallback to a standard equality check if Value is unimplemented.
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// String prints the attribute map. If any key or values throughout the map
|
||||
// implement fmt.Stringer, it calls that method and appends.
|
||||
func (a *Attributes) String() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("{")
|
||||
first := true
|
||||
for k, v := range a.m {
|
||||
var key, val string
|
||||
if str, ok := k.(interface{ String() string }); ok {
|
||||
key = str.String()
|
||||
}
|
||||
if str, ok := v.(interface{ String() string }); ok {
|
||||
val = str.String()
|
||||
}
|
||||
if !first {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("%q: %q, ", key, val))
|
||||
first = false
|
||||
}
|
||||
sb.WriteString("}")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
2
vendor/google.golang.org/grpc/backoff.go
generated
vendored
2
vendor/google.golang.org/grpc/backoff.go
generated
vendored
@ -48,7 +48,7 @@ type BackoffConfig struct {
|
||||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
|
||||
142
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
142
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
@ -27,6 +27,7 @@ import (
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/channelz"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
@ -75,24 +76,26 @@ func Get(name string) Builder {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubConn represents a gRPC sub connection.
|
||||
// Each sub connection contains a list of addresses. gRPC will
|
||||
// try to connect to them (in sequence), and stop trying the
|
||||
// remainder once one connection is successful.
|
||||
// A SubConn represents a single connection to a gRPC backend service.
|
||||
//
|
||||
// The reconnect backoff will be applied on the list, not a single address.
|
||||
// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
|
||||
// Each SubConn contains a list of addresses.
|
||||
//
|
||||
// All SubConns start in IDLE, and will not try to connect. To trigger
|
||||
// the connecting, Balancers must call Connect.
|
||||
// When the connection encounters an error, it will reconnect immediately.
|
||||
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
||||
// called.
|
||||
// All SubConns start in IDLE, and will not try to connect. To trigger the
|
||||
// connecting, Balancers must call Connect. If a connection re-enters IDLE,
|
||||
// Balancers must call Connect again to trigger a new connection attempt.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
// gRPC will try to connect to the addresses in sequence, and stop trying the
|
||||
// remainder once the first connection is successful. If an attempt to connect
|
||||
// to all addresses encounters an error, the SubConn will enter
|
||||
// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE.
|
||||
//
|
||||
// Once established, if a connection is lost, the SubConn will transition
|
||||
// directly to IDLE.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need their own
|
||||
// implementation of this interface. For situations like testing, any
|
||||
// implementations should embed this interface. This allows gRPC to add new
|
||||
// methods to this interface.
|
||||
type SubConn interface {
|
||||
// UpdateAddresses updates the addresses used in this SubConn.
|
||||
// gRPC checks if currently-connected address is still in the new list.
|
||||
@ -107,6 +110,11 @@ type SubConn interface {
|
||||
UpdateAddresses([]resolver.Address)
|
||||
// Connect starts the connecting for this SubConn.
|
||||
Connect()
|
||||
// GetOrBuildProducer returns a reference to the existing Producer for this
|
||||
// ProducerBuilder in this SubConn, or, if one does not currently exist,
|
||||
// creates a new one and returns it. Returns a close function which must
|
||||
// be called when the Producer is no longer needed.
|
||||
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
|
||||
}
|
||||
|
||||
// NewSubConnOptions contains options to create new SubConn.
|
||||
@ -172,25 +180,32 @@ type ClientConn interface {
|
||||
|
||||
// BuildOptions contains additional information for Build.
|
||||
type BuildOptions struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it does not need to talk to another party securely.
|
||||
// DialCreds is the transport credentials to use when communicating with a
|
||||
// remote load balancer server. Balancer implementations which do not
|
||||
// communicate with a remote load balancer server can ignore this field.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// CredsBundle is the credentials bundle that the Balancer can use.
|
||||
// CredsBundle is the credentials bundle to use when communicating with a
|
||||
// remote load balancer server. Balancer implementations which do not
|
||||
// communicate with a remote load balancer server can ignore this field.
|
||||
CredsBundle credentials.Bundle
|
||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
||||
// to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it doesn't need to talk to remote balancer.
|
||||
// Dialer is the custom dialer to use when communicating with a remote load
|
||||
// balancer server. Balancer implementations which do not communicate with a
|
||||
// remote load balancer server can ignore this field.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// ChannelzParentID is the entity parent's channelz unique identification number.
|
||||
ChannelzParentID int64
|
||||
// Authority is the server name to use as part of the authentication
|
||||
// handshake when communicating with a remote load balancer server. Balancer
|
||||
// implementations which do not communicate with a remote load balancer
|
||||
// server can ignore this field.
|
||||
Authority string
|
||||
// ChannelzParentID is the parent ClientConn's channelz ID.
|
||||
ChannelzParentID *channelz.Identifier
|
||||
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
||||
// The balancer should set the same custom user agent if it creates a
|
||||
// ClientConn.
|
||||
CustomUserAgent string
|
||||
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
|
||||
// passed to the resolver.
|
||||
// See the documentation for the resolver.Target type for details about what it contains.
|
||||
// Target contains the parsed address info of the dial target. It is the
|
||||
// same resolver.Target as passed to the resolver. See the documentation for
|
||||
// the resolver.Target type for details about what it contains.
|
||||
Target resolver.Target
|
||||
}
|
||||
|
||||
@ -234,7 +249,7 @@ type DoneInfo struct {
|
||||
// ServerLoad is the load received from server. It's usually sent as part of
|
||||
// trailing metadata.
|
||||
//
|
||||
// The only supported type now is *orca_v1.LoadReport.
|
||||
// The only supported type now is *orca_v3.LoadReport.
|
||||
ServerLoad interface{}
|
||||
}
|
||||
|
||||
@ -264,6 +279,14 @@ type PickResult struct {
|
||||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
|
||||
// Metadata provides a way for LB policies to inject arbitrary per-call
|
||||
// metadata. Any metadata returned here will be merged with existing
|
||||
// metadata added by the client application.
|
||||
//
|
||||
// LB policies with child policies are responsible for propagating metadata
|
||||
// injected by their children to the ClientConn, as part of Pick().
|
||||
Metadata metadata.MD
|
||||
}
|
||||
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
@ -326,6 +349,20 @@ type Balancer interface {
|
||||
Close()
|
||||
}
|
||||
|
||||
// ExitIdler is an optional interface for balancers to implement. If
|
||||
// implemented, ExitIdle will be called when ClientConn.Connect is called, if
|
||||
// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause
|
||||
// all SubConns to connect.
|
||||
//
|
||||
// Notice: it will be required for all balancers to implement this in a future
|
||||
// release.
|
||||
type ExitIdler interface {
|
||||
// ExitIdle instructs the LB policy to reconnect to backends / exit the
|
||||
// IDLE state, if appropriate and possible. Note that SubConns that enter
|
||||
// the IDLE state will not reconnect until SubConn.Connect is called.
|
||||
ExitIdle()
|
||||
}
|
||||
|
||||
// SubConnState describes the state of a SubConn.
|
||||
type SubConnState struct {
|
||||
// ConnectivityState is the connectivity state of the SubConn.
|
||||
@ -348,41 +385,20 @@ type ClientConnState struct {
|
||||
// problem with the provided name resolver data.
|
||||
var ErrBadResolverState = errors.New("bad resolver state")
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
// A ProducerBuilder is a simple constructor for a Producer. It is used by the
|
||||
// SubConn to create producers when needed.
|
||||
type ProducerBuilder interface {
|
||||
// Build creates a Producer. The first parameter is always a
|
||||
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
|
||||
// associated SubConn), but is declared as interface{} to avoid a
|
||||
// dependency cycle. Should also return a close function that will be
|
||||
// called when all references to the Producer have been given up.
|
||||
Build(grpcClientConnInterface interface{}) (p Producer, close func())
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
//
|
||||
// Idle and Shutdown are not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
// A Producer is a type shared among potentially many consumers. It is
|
||||
// associated with a SubConn, and an implementation will typically contain
|
||||
// other methods to provide additional functionality, e.g. configuration or
|
||||
// subscription registration.
|
||||
type Producer interface {
|
||||
}
|
||||
|
||||
88
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
88
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
@ -42,10 +41,11 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]subConnInfo),
|
||||
subConns: resolver.NewAddressMap(),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
config: bb.config,
|
||||
state: connectivity.Connecting,
|
||||
}
|
||||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
@ -58,11 +58,6 @@ func (bb *baseBuilder) Name() string {
|
||||
return bb.name
|
||||
}
|
||||
|
||||
type subConnInfo struct {
|
||||
subConn balancer.SubConn
|
||||
attrs *attributes.Attributes
|
||||
}
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
@ -70,7 +65,7 @@ type baseBalancer struct {
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
subConns map[resolver.Address]subConnInfo // `attributes` is stripped from the keys of this map (the addresses)
|
||||
subConns *resolver.AddressMap
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
config Config
|
||||
@ -81,7 +76,7 @@ type baseBalancer struct {
|
||||
|
||||
func (b *baseBalancer) ResolverError(err error) {
|
||||
b.resolverErr = err
|
||||
if len(b.subConns) == 0 {
|
||||
if b.subConns.Len() == 0 {
|
||||
b.state = connectivity.TransientFailure
|
||||
}
|
||||
|
||||
@ -105,52 +100,29 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
addrsSet := resolver.NewAddressMap()
|
||||
for _, a := range s.ResolverState.Addresses {
|
||||
// Strip attributes from addresses before using them as map keys. So
|
||||
// that when two addresses only differ in attributes pointers (but with
|
||||
// the same attribute content), they are considered the same address.
|
||||
//
|
||||
// Note that this doesn't handle the case where the attribute content is
|
||||
// different. So if users want to set different attributes to create
|
||||
// duplicate connections to the same backend, it doesn't work. This is
|
||||
// fine for now, because duplicate is done by setting Metadata today.
|
||||
//
|
||||
// TODO: read attributes to handle duplicate connections.
|
||||
aNoAttrs := a
|
||||
aNoAttrs.Attributes = nil
|
||||
addrsSet[aNoAttrs] = struct{}{}
|
||||
if scInfo, ok := b.subConns[aNoAttrs]; !ok {
|
||||
addrsSet.Set(a, nil)
|
||||
if _, ok := b.subConns.Get(a); !ok {
|
||||
// a is a new address (not existing in b.subConns).
|
||||
//
|
||||
// When creating SubConn, the original address with attributes is
|
||||
// passed through. So that connection configurations in attributes
|
||||
// (like creds) will be used.
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
|
||||
if err != nil {
|
||||
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
}
|
||||
b.subConns[aNoAttrs] = subConnInfo{subConn: sc, attrs: a.Attributes}
|
||||
b.subConns.Set(a, sc)
|
||||
b.scStates[sc] = connectivity.Idle
|
||||
b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
|
||||
sc.Connect()
|
||||
} else {
|
||||
// Always update the subconn's address in case the attributes
|
||||
// changed.
|
||||
//
|
||||
// The SubConn does a reflect.DeepEqual of the new and old
|
||||
// addresses. So this is a noop if the current address is the same
|
||||
// as the old one (including attributes).
|
||||
scInfo.attrs = a.Attributes
|
||||
b.subConns[aNoAttrs] = scInfo
|
||||
b.cc.UpdateAddresses(scInfo.subConn, []resolver.Address{a})
|
||||
}
|
||||
}
|
||||
for a, scInfo := range b.subConns {
|
||||
for _, a := range b.subConns.Keys() {
|
||||
sci, _ := b.subConns.Get(a)
|
||||
sc := sci.(balancer.SubConn)
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet[a]; !ok {
|
||||
b.cc.RemoveSubConn(scInfo.subConn)
|
||||
delete(b.subConns, a)
|
||||
if _, ok := addrsSet.Get(a); !ok {
|
||||
b.cc.RemoveSubConn(sc)
|
||||
b.subConns.Delete(a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in UpdateSubConnState.
|
||||
}
|
||||
@ -163,6 +135,9 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
b.regeneratePicker()
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -182,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error {
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(b.mergeErrors())
|
||||
@ -192,10 +167,11 @@ func (b *baseBalancer) regeneratePicker() {
|
||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, scInfo := range b.subConns {
|
||||
if st, ok := b.scStates[scInfo.subConn]; ok && st == connectivity.Ready {
|
||||
addr.Attributes = scInfo.attrs
|
||||
readySCs[scInfo.subConn] = SubConnInfo{Address: addr}
|
||||
for _, addr := range b.subConns.Keys() {
|
||||
sci, _ := b.subConns.Get(addr)
|
||||
sc := sci.(balancer.SubConn)
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[sc] = SubConnInfo{Address: addr}
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
@ -213,10 +189,14 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
}
|
||||
return
|
||||
}
|
||||
if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
|
||||
// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
|
||||
if oldS == connectivity.TransientFailure &&
|
||||
(s == connectivity.Connecting || s == connectivity.Idle) {
|
||||
// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
|
||||
// CONNECTING transitions to prevent the aggregated state from being
|
||||
// always CONNECTING when many backends exist but are all down.
|
||||
if s == connectivity.Idle {
|
||||
sc.Connect()
|
||||
}
|
||||
return
|
||||
}
|
||||
b.scStates[sc] = s
|
||||
@ -242,7 +222,6 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
b.state == connectivity.TransientFailure {
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||
}
|
||||
|
||||
@ -251,6 +230,11 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||
func (b *baseBalancer) Close() {
|
||||
}
|
||||
|
||||
// ExitIdle is a nop because the base balancer attempts to stay connected to
|
||||
// all SubConns at all times.
|
||||
func (b *baseBalancer) ExitIdle() {
|
||||
}
|
||||
|
||||
// NewErrPicker returns a Picker that always returns err on Pick().
|
||||
func NewErrPicker(err error) balancer.Picker {
|
||||
return &errPicker{err: err}
|
||||
|
||||
74
vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
generated
vendored
Normal file
74
vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package balancer
|
||||
|
||||
import "google.golang.org/grpc/connectivity"
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
numTransientFailure uint64 // Number of addrConns in transient failure state.
|
||||
numIdle uint64 // Number of addrConns in idle state.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else if at least one SubConn is Idle, the aggregated state is Idle;
|
||||
// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
|
||||
//
|
||||
// Shutdown is not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
case connectivity.TransientFailure:
|
||||
cse.numTransientFailure += updateVal
|
||||
case connectivity.Idle:
|
||||
cse.numIdle += updateVal
|
||||
}
|
||||
}
|
||||
return cse.CurrentState()
|
||||
}
|
||||
|
||||
// CurrentState returns the current aggregate conn state by evaluating the counters
|
||||
func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
if cse.numIdle > 0 {
|
||||
return connectivity.Idle
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
||||
2
vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
generated
vendored
2
vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
generated
vendored
@ -39,7 +39,7 @@ type State struct {
|
||||
// Set returns a copy of the provided state with attributes containing s. s's
|
||||
// data should not be mutated after calling Set.
|
||||
func Set(state resolver.State, s *State) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValues(key, s)
|
||||
state.Attributes = state.Attributes.WithValue(key, s)
|
||||
return state
|
||||
}
|
||||
|
||||
|
||||
20
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
20
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
@ -22,7 +22,7 @@
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
@ -47,11 +47,11 @@ func init() {
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
logger.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||
logger.Infof("roundrobinPicker: Build called with info: %v", info)
|
||||
if len(info.ReadySCs) == 0 {
|
||||
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
var scs []balancer.SubConn
|
||||
scs := make([]balancer.SubConn, 0, len(info.ReadySCs))
|
||||
for sc := range info.ReadySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
// Start at a random index, as the same RR balancer rebuilds a new
|
||||
// picker when SubConn states change, and we don't want to apply excess
|
||||
// load to the first server in the list.
|
||||
next: grpcrand.Intn(len(scs)),
|
||||
next: uint32(grpcrand.Intn(len(scs))),
|
||||
}
|
||||
}
|
||||
|
||||
@ -69,15 +69,13 @@ type rrPicker struct {
|
||||
// created. The slice is immutable. Each Get() will do a round robin
|
||||
// selection from it and return the selected SubConn.
|
||||
subConns []balancer.SubConn
|
||||
|
||||
mu sync.Mutex
|
||||
next int
|
||||
next uint32
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
subConnsLen := uint32(len(p.subConns))
|
||||
nextIndex := atomic.AddUint32(&p.next, 1)
|
||||
|
||||
sc := p.subConns[nextIndex%subConnsLen]
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
||||
|
||||
520
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
520
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
@ -19,164 +19,331 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// scStateUpdate contains the subConn and the new state it changed to.
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
err error
|
||||
}
|
||||
type ccbMode int
|
||||
|
||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||
// It implements balancer.ClientConn interface.
|
||||
const (
|
||||
ccbModeActive = iota
|
||||
ccbModeIdle
|
||||
ccbModeClosed
|
||||
ccbModeExitingIdle
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
//
|
||||
// ccBalancerWrapper implements methods corresponding to the ones on the
|
||||
// balancer.Balancer interface. The ClientConn is free to call these methods
|
||||
// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
|
||||
// to the Balancer happen synchronously and in order.
|
||||
//
|
||||
// ccBalancerWrapper also implements the balancer.ClientConn interface and is
|
||||
// passed to the Balancer implementations. It invokes unexported methods on the
|
||||
// ClientConn to handle these calls from the Balancer.
|
||||
//
|
||||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
balancerMu sync.Mutex // synchronizes calls to the balancer
|
||||
balancer balancer.Balancer
|
||||
updateCh *buffer.Unbounded
|
||||
closed *grpcsync.Event
|
||||
done *grpcsync.Event
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
opts balancer.BuildOptions
|
||||
|
||||
mu sync.Mutex
|
||||
subConns map[*acBalancerWrapper]struct{}
|
||||
// Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
|
||||
// mutually exclusive manner as they are scheduled in the serializer. Fields
|
||||
// accessed *only* in these serializer callbacks, can therefore be accessed
|
||||
// without a mutex.
|
||||
balancer *gracefulswitch.Balancer
|
||||
curBalancerName string
|
||||
|
||||
// mu guards access to the below fields. Access to the serializer and its
|
||||
// cancel function needs to be mutex protected because they are overwritten
|
||||
// when the wrapper exits idle mode.
|
||||
mu sync.Mutex
|
||||
serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
|
||||
serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
|
||||
mode ccbMode // Tracks the current mode of the wrapper.
|
||||
}
|
||||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
|
||||
// is not created until the switchTo() method is invoked.
|
||||
func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
updateCh: buffer.NewUnbounded(),
|
||||
closed: grpcsync.NewEvent(),
|
||||
done: grpcsync.NewEvent(),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
cc: cc,
|
||||
opts: bopts,
|
||||
serializer: grpcsync.NewCallbackSerializer(ctx),
|
||||
serializerCancel: cancel,
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// watcher balancer functions sequentially, so the balancer can be implemented
|
||||
// lock-free.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.updateCh.Get():
|
||||
ccb.updateCh.Load()
|
||||
if ccb.closed.HasFired() {
|
||||
break
|
||||
}
|
||||
switch u := t.(type) {
|
||||
case *scStateUpdate:
|
||||
ccb.balancerMu.Lock()
|
||||
ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err})
|
||||
ccb.balancerMu.Unlock()
|
||||
case *acBalancerWrapper:
|
||||
ccb.mu.Lock()
|
||||
if ccb.subConns != nil {
|
||||
delete(ccb.subConns, u)
|
||||
ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain)
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
default:
|
||||
logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t)
|
||||
}
|
||||
case <-ccb.closed.Done():
|
||||
}
|
||||
|
||||
if ccb.closed.HasFired() {
|
||||
ccb.balancerMu.Lock()
|
||||
ccb.balancer.Close()
|
||||
ccb.balancerMu.Unlock()
|
||||
ccb.mu.Lock()
|
||||
scs := ccb.subConns
|
||||
ccb.subConns = nil
|
||||
ccb.mu.Unlock()
|
||||
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
|
||||
ccb.done.Fire()
|
||||
// Fire done before removing the addr conns. We can safely unblock
|
||||
// ccb.close and allow the removeAddrConns to happen
|
||||
// asynchronously.
|
||||
for acbw := range scs {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
ccb.closed.Fire()
|
||||
<-ccb.done.Done()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
// don't want the balancer to receive this state change. So before
|
||||
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
||||
// this function will be called with (nil, Shutdown). We don't need to call
|
||||
// balancer method in this case.
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.updateCh.Put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
err: err,
|
||||
})
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.balancerMu.Lock()
|
||||
defer ccb.balancerMu.Unlock()
|
||||
return ccb.balancer.UpdateClientConnState(*ccs)
|
||||
ccb.mu.Lock()
|
||||
errCh := make(chan error, 1)
|
||||
// Here and everywhere else where Schedule() is called, it is done with the
|
||||
// lock held. But the lock guards only the scheduling part. The actual
|
||||
// callback is called asynchronously without the lock being held.
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// If the addresses specified in the update contain addresses of type
|
||||
// "grpclb" and the selected LB policy is not "grpclb", these addresses
|
||||
// will be filtered out and ccs will be modified with the updated
|
||||
// address list.
|
||||
if ccb.curBalancerName != grpclbName {
|
||||
var addrs []resolver.Address
|
||||
for _, addr := range ccs.ResolverState.Addresses {
|
||||
if addr.Type == resolver.GRPCLB {
|
||||
continue
|
||||
}
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
ccs.ResolverState.Addresses = addrs
|
||||
}
|
||||
errCh <- ccb.balancer.UpdateClientConnState(*ccs)
|
||||
})
|
||||
if !ok {
|
||||
// If we are unable to schedule a function with the serializer, it
|
||||
// indicates that it has been closed. A serializer is only closed when
|
||||
// the wrapper is closed or is in idle.
|
||||
ccb.mu.Unlock()
|
||||
return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// We get here only if the above call to Schedule succeeds, in which case it
|
||||
// is guaranteed that the scheduled function will run. Therefore it is safe
|
||||
// to block on this channel.
|
||||
err := <-errCh
|
||||
if logger.V(2) && err != nil {
|
||||
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// updateSubConnState is invoked by grpc to push a subConn state update to the
|
||||
// underlying balancer.
|
||||
func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.balancerMu.Lock()
|
||||
ccb.balancer.ResolverError(err)
|
||||
ccb.balancerMu.Unlock()
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
ccb.balancer.ResolverError(err)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
|
||||
// LB policy identified by name.
|
||||
//
|
||||
// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
|
||||
// first good update from the name resolver, it determines the LB policy to use
|
||||
// and invokes the switchTo() method. Upon receipt of every subsequent update
|
||||
// from the name resolver, it invokes this method.
|
||||
//
|
||||
// the ccBalancerWrapper keeps track of the current LB policy name, and skips
|
||||
// the graceful balancer switching process if the name does not change.
|
||||
func (ccb *ccBalancerWrapper) switchTo(name string) {
|
||||
ccb.mu.Lock()
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// TODO: Other languages use case-sensitive balancer registries. We should
|
||||
// switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
|
||||
if strings.EqualFold(ccb.curBalancerName, name) {
|
||||
return
|
||||
}
|
||||
ccb.buildLoadBalancingPolicy(name)
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
}
|
||||
|
||||
// buildLoadBalancingPolicy performs the following:
|
||||
// - retrieve a balancer builder for the given name. Use the default LB
|
||||
// policy, pick_first, if no LB policy with name is found in the registry.
|
||||
// - instruct the gracefulswitch balancer to switch to the above builder. This
|
||||
// will actually build the new balancer.
|
||||
// - update the `curBalancerName` field
|
||||
//
|
||||
// Must be called from a serializer callback.
|
||||
func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
|
||||
builder = newPickfirstBuilder()
|
||||
} else {
|
||||
channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
|
||||
}
|
||||
|
||||
if err := ccb.balancer.SwitchTo(builder); err != nil {
|
||||
channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
|
||||
return
|
||||
}
|
||||
ccb.curBalancerName = builder.Name()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
|
||||
ccb.closeBalancer(ccbModeClosed)
|
||||
}
|
||||
|
||||
// enterIdleMode is invoked by grpc when the channel enters idle mode upon
|
||||
// expiry of idle_timeout. This call blocks until the balancer is closed.
|
||||
func (ccb *ccBalancerWrapper) enterIdleMode() {
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
|
||||
ccb.closeBalancer(ccbModeIdle)
|
||||
}
|
||||
|
||||
// closeBalancer is invoked when the channel is being closed or when it enters
|
||||
// idle mode upon expiry of idle_timeout.
|
||||
func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
ccb.mode = m
|
||||
done := ccb.serializer.Done
|
||||
b := ccb.balancer
|
||||
ok := ccb.serializer.Schedule(func(_ context.Context) {
|
||||
// Close the serializer to ensure that no more calls from gRPC are sent
|
||||
// to the balancer.
|
||||
ccb.serializerCancel()
|
||||
// Empty the current balancer name because we don't have a balancer
|
||||
// anymore and also so that we act on the next call to switchTo by
|
||||
// creating a new balancer specified by the new resolver.
|
||||
ccb.curBalancerName = ""
|
||||
})
|
||||
if !ok {
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
ccb.mu.Unlock()
|
||||
|
||||
// Give enqueued callbacks a chance to finish.
|
||||
<-done
|
||||
// Spawn a goroutine to close the balancer (since it may block trying to
|
||||
// cleanup all allocated resources) and return early.
|
||||
go b.Close()
|
||||
}
|
||||
|
||||
// exitIdleMode is invoked by grpc when the channel exits idle mode either
|
||||
// because of an RPC or because of an invocation of the Connect() API. This
|
||||
// recreates the balancer that was closed previously when entering idle mode.
|
||||
//
|
||||
// If the channel is not in idle mode, we know for a fact that we are here as a
|
||||
// result of the user calling the Connect() method on the ClientConn. In this
|
||||
// case, we can simply forward the call to the underlying balancer, instructing
|
||||
// it to reconnect to the backends.
|
||||
func (ccb *ccBalancerWrapper) exitIdleMode() {
|
||||
ccb.mu.Lock()
|
||||
if ccb.mode == ccbModeClosed {
|
||||
// Request to exit idle is a no-op when wrapper is already closed.
|
||||
ccb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if ccb.mode == ccbModeIdle {
|
||||
// Recreate the serializer which was closed when we entered idle.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
|
||||
ccb.serializerCancel = cancel
|
||||
}
|
||||
|
||||
// The ClientConn guarantees that mutual exclusion between close() and
|
||||
// exitIdleMode(), and since we just created a new serializer, we can be
|
||||
// sure that the below function will be scheduled.
|
||||
done := make(chan struct{})
|
||||
ccb.serializer.Schedule(func(_ context.Context) {
|
||||
defer close(done)
|
||||
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
|
||||
if ccb.mode != ccbModeIdle {
|
||||
ccb.balancer.ExitIdle()
|
||||
return
|
||||
}
|
||||
|
||||
// Gracefulswitch balancer does not support a switchTo operation after
|
||||
// being closed. Hence we need to create a new one here.
|
||||
ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
|
||||
ccb.mode = ccbModeActive
|
||||
channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
|
||||
|
||||
})
|
||||
ccb.mu.Unlock()
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if len(addrs) <= 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
if ccb.isIdleOrClosed() {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
||||
|
||||
if len(addrs) == 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac}
|
||||
acbw.ac.mu.Lock()
|
||||
acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)}
|
||||
ac.acbw = acbw
|
||||
acbw.ac.mu.Unlock()
|
||||
ccb.subConns[acbw] = struct{}{}
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
// The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock
|
||||
// during switchBalancer() if the old balancer calls RemoveSubConn() in its
|
||||
// Close().
|
||||
ccb.updateCh.Put(sc)
|
||||
if ccb.isIdleOrClosed() {
|
||||
// It it safe to ignore this call when the balancer is closed or in idle
|
||||
// because the ClientConn takes care of closing the connections.
|
||||
//
|
||||
// Not returning early from here when the balancer is closed or in idle
|
||||
// leads to a deadlock though, because of the following sequence of
|
||||
// calls when holding cc.mu:
|
||||
// cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
|
||||
// ccb.RemoveAddrConn --> cc.removeAddrConn
|
||||
return
|
||||
}
|
||||
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
@ -185,11 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
@ -200,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
if ccb.isIdleOrClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
@ -210,58 +380,80 @@ func (ccb *ccBalancerWrapper) Target() string {
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
mu sync.Mutex
|
||||
ac *addrConn
|
||||
ac *addrConn // read-only
|
||||
|
||||
mu sync.Mutex
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) String() string {
|
||||
return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
opts := acbw.ac.scopts
|
||||
acbw.ac.mu.Lock()
|
||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||
// by balancer.
|
||||
//
|
||||
// TODO(bar) the state transition could be wrong when tearDown() old ac
|
||||
// and creating new ac, fix the transition.
|
||||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
ac, err := cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = ac
|
||||
ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
ac.mu.Unlock()
|
||||
if acState != connectivity.Idle {
|
||||
ac.connect()
|
||||
}
|
||||
}
|
||||
acbw.ac.updateAddrs(addrs)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
acbw.ac.connect()
|
||||
go acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
|
||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||
// expires or the addrConn is shut down.
|
||||
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
transport, err := acbw.ac.getTransport(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||||
}
|
||||
|
||||
// Invoke performs a unary RPC. If the addrConn is not ready, returns
|
||||
// errSubConnNotReady.
|
||||
func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error {
|
||||
cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cs.SendMsg(args); err != nil {
|
||||
return err
|
||||
}
|
||||
return cs.RecvMsg(reply)
|
||||
}
|
||||
|
||||
type refCountedProducer struct {
|
||||
producer balancer.Producer
|
||||
refs int // number of current refs to the producer
|
||||
close func() // underlying producer's close function
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
return acbw.ac
|
||||
|
||||
// Look up existing producer from this builder.
|
||||
pData := acbw.producers[pb]
|
||||
if pData == nil {
|
||||
// Not found; create a new one and add it to the producers map.
|
||||
p, close := pb.Build(acbw)
|
||||
pData = &refCountedProducer{producer: p, close: close}
|
||||
acbw.producers[pb] = pData
|
||||
}
|
||||
// Account for this new reference.
|
||||
pData.refs++
|
||||
|
||||
// Return a cleanup function wrapped in a OnceFunc to remove this reference
|
||||
// and delete the refCountedProducer from the map if the total reference
|
||||
// count goes to zero.
|
||||
unref := func() {
|
||||
acbw.mu.Lock()
|
||||
pData.refs--
|
||||
if pData.refs == 0 {
|
||||
defer pData.close() // Run outside the acbw mutex
|
||||
delete(acbw.producers, pb)
|
||||
}
|
||||
acbw.mu.Unlock()
|
||||
}
|
||||
return pData.producer, grpcsync.OnceFunc(unref)
|
||||
}
|
||||
|
||||
22
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
22
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
@ -18,14 +18,13 @@
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.14.0
|
||||
// protoc-gen-go v1.30.0
|
||||
// protoc v4.22.0
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
package grpc_binarylog_v1
|
||||
|
||||
import (
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
durationpb "google.golang.org/protobuf/types/known/durationpb"
|
||||
@ -41,10 +40,6 @@ const (
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// Enumerates the type of event
|
||||
// Note the terminology is different from the RPC semantics
|
||||
// definition, but the same meaning is expressed here.
|
||||
@ -261,6 +256,7 @@ type GrpcLogEntry struct {
|
||||
// according to the type of the log entry.
|
||||
//
|
||||
// Types that are assignable to Payload:
|
||||
//
|
||||
// *GrpcLogEntry_ClientHeader
|
||||
// *GrpcLogEntry_ServerHeader
|
||||
// *GrpcLogEntry_Message
|
||||
@ -694,12 +690,12 @@ func (x *Message) GetData() []byte {
|
||||
// Header keys added by gRPC are omitted. To be more specific,
|
||||
// implementations will not log the following entries, and this is
|
||||
// not to be treated as a truncation:
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
//
|
||||
// Implementations must always log grpc-trace-bin if it is present.
|
||||
// Practically speaking it will only be visible on server side because
|
||||
|
||||
5
vendor/google.golang.org/grpc/call.go
generated
vendored
5
vendor/google.golang.org/grpc/call.go
generated
vendored
@ -27,6 +27,11 @@ import (
|
||||
//
|
||||
// All errors returned by Invoke are compatible with the status package.
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||
if err := cc.idlenessMgr.onCallBegin(); err != nil {
|
||||
return err
|
||||
}
|
||||
defer cc.idlenessMgr.onCallEnd()
|
||||
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
||||
36
vendor/google.golang.org/grpc/channelz/channelz.go
generated
vendored
Normal file
36
vendor/google.golang.org/grpc/channelz/channelz.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package channelz exports internals of the channelz implementation as required
|
||||
// by other gRPC packages.
|
||||
//
|
||||
// The implementation of the channelz spec as defined in
|
||||
// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
|
||||
// the `internal/channelz` package.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: All APIs in this package are experimental and may be removed in a
|
||||
// later release.
|
||||
package channelz
|
||||
|
||||
import "google.golang.org/grpc/internal/channelz"
|
||||
|
||||
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||
// channelz database.
|
||||
type Identifier = channelz.Identifier
|
||||
1357
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
1357
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
File diff suppressed because it is too large
Load Diff
51
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
51
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
@ -18,7 +18,15 @@
|
||||
|
||||
package codes
|
||||
|
||||
import "strconv"
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.CanonicalString = canonicalString
|
||||
}
|
||||
|
||||
func (c Code) String() string {
|
||||
switch c {
|
||||
@ -60,3 +68,44 @@ func (c Code) String() string {
|
||||
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
}
|
||||
|
||||
func canonicalString(c Code) string {
|
||||
switch c {
|
||||
case OK:
|
||||
return "OK"
|
||||
case Canceled:
|
||||
return "CANCELLED"
|
||||
case Unknown:
|
||||
return "UNKNOWN"
|
||||
case InvalidArgument:
|
||||
return "INVALID_ARGUMENT"
|
||||
case DeadlineExceeded:
|
||||
return "DEADLINE_EXCEEDED"
|
||||
case NotFound:
|
||||
return "NOT_FOUND"
|
||||
case AlreadyExists:
|
||||
return "ALREADY_EXISTS"
|
||||
case PermissionDenied:
|
||||
return "PERMISSION_DENIED"
|
||||
case ResourceExhausted:
|
||||
return "RESOURCE_EXHAUSTED"
|
||||
case FailedPrecondition:
|
||||
return "FAILED_PRECONDITION"
|
||||
case Aborted:
|
||||
return "ABORTED"
|
||||
case OutOfRange:
|
||||
return "OUT_OF_RANGE"
|
||||
case Unimplemented:
|
||||
return "UNIMPLEMENTED"
|
||||
case Internal:
|
||||
return "INTERNAL"
|
||||
case Unavailable:
|
||||
return "UNAVAILABLE"
|
||||
case DataLoss:
|
||||
return "DATA_LOSS"
|
||||
case Unauthenticated:
|
||||
return "UNAUTHENTICATED"
|
||||
default:
|
||||
return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
}
|
||||
|
||||
35
vendor/google.golang.org/grpc/connectivity/connectivity.go
generated
vendored
35
vendor/google.golang.org/grpc/connectivity/connectivity.go
generated
vendored
@ -18,7 +18,6 @@
|
||||
|
||||
// Package connectivity defines connectivity semantics.
|
||||
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
|
||||
// All APIs in this package are experimental.
|
||||
package connectivity
|
||||
|
||||
import (
|
||||
@ -45,7 +44,7 @@ func (s State) String() string {
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
logger.Errorf("unknown connectivity state: %d", s)
|
||||
return "Invalid-State"
|
||||
return "INVALID_STATE"
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,3 +60,35 @@ const (
|
||||
// Shutdown indicates the ClientConn has started shutting down.
|
||||
Shutdown
|
||||
)
|
||||
|
||||
// ServingMode indicates the current mode of operation of the server.
|
||||
//
|
||||
// Only xDS enabled gRPC servers currently report their serving mode.
|
||||
type ServingMode int
|
||||
|
||||
const (
|
||||
// ServingModeStarting indicates that the server is starting up.
|
||||
ServingModeStarting ServingMode = iota
|
||||
// ServingModeServing indicates that the server contains all required
|
||||
// configuration and is serving RPCs.
|
||||
ServingModeServing
|
||||
// ServingModeNotServing indicates that the server is not accepting new
|
||||
// connections. Existing connections will be closed gracefully, allowing
|
||||
// in-progress RPCs to complete. A server enters this mode when it does not
|
||||
// contain the required configuration to serve RPCs.
|
||||
ServingModeNotServing
|
||||
)
|
||||
|
||||
func (s ServingMode) String() string {
|
||||
switch s {
|
||||
case ServingModeStarting:
|
||||
return "STARTING"
|
||||
case ServingModeServing:
|
||||
return "SERVING"
|
||||
case ServingModeNotServing:
|
||||
return "NOT_SERVING"
|
||||
default:
|
||||
logger.Errorf("unknown serving mode: %d", s)
|
||||
return "INVALID_MODE"
|
||||
}
|
||||
}
|
||||
|
||||
45
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
45
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
@ -36,16 +36,16 @@ import (
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
// attach security information to every RPC (e.g., oauth2).
|
||||
type PerRPCCredentials interface {
|
||||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation. Additionally, RequestInfo data will be
|
||||
// available via ctx to this call.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
// GetRequestMetadata gets the current request metadata, refreshing tokens
|
||||
// if required. This should be called by the transport layer on each
|
||||
// request, and the data should be populated in headers or other
|
||||
// context. If a status code is returned, it will be used as the status for
|
||||
// the RPC (restricted to an allowable set of codes as defined by gRFC
|
||||
// A54). uri is the URI of the entry point for the request. When supported
|
||||
// by the underlying implementation, ctx can be used for timeout and
|
||||
// cancellation. Additionally, RequestInfo data will be available via ctx
|
||||
// to this call. TODO(zhaoq): Define the set of the qualified keys instead
|
||||
// of leaving it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
// RequireTransportSecurity indicates whether the credentials requires
|
||||
// transport security.
|
||||
@ -140,6 +140,11 @@ type TransportCredentials interface {
|
||||
// Additionally, ClientHandshakeInfo data will be available via the context
|
||||
// passed to this call.
|
||||
//
|
||||
// The second argument to this method is the `:authority` header value used
|
||||
// while creating new streams on this connection after authentication
|
||||
// succeeds. Implementations must use this as the server name during the
|
||||
// authentication handshake.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
@ -153,9 +158,13 @@ type TransportCredentials interface {
|
||||
Info() ProtocolInfo
|
||||
// Clone makes a copy of this TransportCredentials.
|
||||
Clone() TransportCredentials
|
||||
// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
|
||||
// gRPC internals also use it to override the virtual hosting name if it is set.
|
||||
// It must be called before dialing. Currently, this is only used by grpclb.
|
||||
// OverrideServerName specifies the value used for the following:
|
||||
// - verifying the hostname on the returned certificates
|
||||
// - as SNI in the client's handshake to support virtual hosting
|
||||
// - as the value for `:authority` header at stream creation time
|
||||
//
|
||||
// Deprecated: use grpc.WithAuthority instead. Will be supported
|
||||
// throughout 1.x.
|
||||
OverrideServerName(string) error
|
||||
}
|
||||
|
||||
@ -169,8 +178,18 @@ type TransportCredentials interface {
|
||||
//
|
||||
// This API is experimental.
|
||||
type Bundle interface {
|
||||
// TransportCredentials returns the transport credentials from the Bundle.
|
||||
//
|
||||
// Implementations must return non-nil transport credentials. If transport
|
||||
// security is not needed by the Bundle, implementations may choose to
|
||||
// return insecure.NewCredentials().
|
||||
TransportCredentials() TransportCredentials
|
||||
|
||||
// PerRPCCredentials returns the per-RPC credentials from the Bundle.
|
||||
//
|
||||
// May be nil if per-RPC credentials are not needed.
|
||||
PerRPCCredentials() PerRPCCredentials
|
||||
|
||||
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
|
||||
// existing Bundle may cause races.
|
||||
//
|
||||
|
||||
30
vendor/google.golang.org/grpc/credentials/go12.go
generated
vendored
30
vendor/google.golang.org/grpc/credentials/go12.go
generated
vendored
@ -1,30 +0,0 @@
|
||||
// +build go1.12
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// This init function adds cipher suite constants only defined in Go 1.12.
|
||||
func init() {
|
||||
cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
|
||||
cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
|
||||
cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
|
||||
}
|
||||
98
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
Normal file
98
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package insecure provides an implementation of the
|
||||
// credentials.TransportCredentials interface which disables transport security.
|
||||
package insecure
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// NewCredentials returns a credentials which disables transport security.
|
||||
//
|
||||
// Note that using this credentials with per-RPC credentials which require
|
||||
// transport security is incompatible and will cause grpc.Dial() to fail.
|
||||
func NewCredentials() credentials.TransportCredentials {
|
||||
return insecureTC{}
|
||||
}
|
||||
|
||||
// insecureTC implements the insecure transport credentials. The handshake
|
||||
// methods simply return the passed in net.Conn and set the security level to
|
||||
// NoSecurity.
|
||||
type insecureTC struct{}
|
||||
|
||||
func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
|
||||
}
|
||||
|
||||
func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
|
||||
}
|
||||
|
||||
func (insecureTC) Info() credentials.ProtocolInfo {
|
||||
return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
|
||||
}
|
||||
|
||||
func (insecureTC) Clone() credentials.TransportCredentials {
|
||||
return insecureTC{}
|
||||
}
|
||||
|
||||
func (insecureTC) OverrideServerName(string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// info contains the auth information for an insecure connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type info struct {
|
||||
credentials.CommonAuthInfo
|
||||
}
|
||||
|
||||
// AuthType returns the type of info as a string.
|
||||
func (info) AuthType() string {
|
||||
return "insecure"
|
||||
}
|
||||
|
||||
// insecureBundle implements an insecure bundle.
|
||||
// An insecure bundle provides a thin wrapper around insecureTC to support
|
||||
// the credentials.Bundle interface.
|
||||
type insecureBundle struct{}
|
||||
|
||||
// NewBundle returns a bundle with disabled transport security and no per rpc credential.
|
||||
func NewBundle() credentials.Bundle {
|
||||
return insecureBundle{}
|
||||
}
|
||||
|
||||
// NewWithMode returns a new insecure Bundle. The mode is ignored.
|
||||
func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
|
||||
return insecureBundle{}, nil
|
||||
}
|
||||
|
||||
// PerRPCCredentials returns an nil implementation as insecure
|
||||
// bundle does not support a per rpc credential.
|
||||
func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransportCredentials returns the underlying insecure transport credential.
|
||||
func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
|
||||
return NewCredentials()
|
||||
}
|
||||
9
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
9
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
@ -23,9 +23,9 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
b, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -230,4 +230,7 @@ var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
|
||||
tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
|
||||
tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
|
||||
}
|
||||
|
||||
212
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
212
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
@ -20,22 +20,34 @@ package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/channelz"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/internal"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.AddGlobalDialOptions = func(opt ...DialOption) {
|
||||
globalDialOptions = append(globalDialOptions, opt...)
|
||||
}
|
||||
internal.ClearGlobalDialOptions = func() {
|
||||
globalDialOptions = nil
|
||||
}
|
||||
internal.WithBinaryLogger = withBinaryLogger
|
||||
internal.JoinDialOptions = newJoinDialOption
|
||||
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
|
||||
}
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
@ -45,20 +57,18 @@ type dialOptions struct {
|
||||
chainUnaryInts []UnaryClientInterceptor
|
||||
chainStreamInts []StreamClientInterceptor
|
||||
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
returnLastError bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
// This is used by WithBalancerName dial option.
|
||||
balancerBuilder balancer.Builder
|
||||
channelzParentID int64
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
returnLastError bool
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
binaryLogger binarylog.Logger
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
channelzParentID *channelz.Identifier
|
||||
disableServiceConfig bool
|
||||
disableRetry bool
|
||||
disableHealthCheck bool
|
||||
@ -67,6 +77,7 @@ type dialOptions struct {
|
||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
resolvers []resolver.Builder
|
||||
idleTimeout time.Duration
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
@ -74,10 +85,12 @@ type DialOption interface {
|
||||
apply(*dialOptions)
|
||||
}
|
||||
|
||||
var globalDialOptions []DialOption
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -85,6 +98,16 @@ type EmptyDialOption struct{}
|
||||
|
||||
func (EmptyDialOption) apply(*dialOptions) {}
|
||||
|
||||
type disableGlobalDialOptions struct{}
|
||||
|
||||
func (disableGlobalDialOptions) apply(*dialOptions) {}
|
||||
|
||||
// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
|
||||
// from applying the global DialOptions (set via AddGlobalDialOptions).
|
||||
func newDisableGlobalDialOptions() DialOption {
|
||||
return &disableGlobalDialOptions{}
|
||||
}
|
||||
|
||||
// funcDialOption wraps a function that modifies dialOptions into an
|
||||
// implementation of the DialOption interface.
|
||||
type funcDialOption struct {
|
||||
@ -101,13 +124,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||||
}
|
||||
}
|
||||
|
||||
type joinDialOption struct {
|
||||
opts []DialOption
|
||||
}
|
||||
|
||||
func (jdo *joinDialOption) apply(do *dialOptions) {
|
||||
for _, opt := range jdo.opts {
|
||||
opt.apply(do)
|
||||
}
|
||||
}
|
||||
|
||||
func newJoinDialOption(opts ...DialOption) DialOption {
|
||||
return &joinDialOption{opts: opts}
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB.
|
||||
//
|
||||
// Zero will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
// Zero or negative values will disable the write buffer such that each write
|
||||
// will be on underlying connection. Note: A Send call may not directly
|
||||
// translate to a write.
|
||||
func WithWriteBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.WriteBufferSize = s
|
||||
@ -117,8 +155,9 @@ func WithWriteBufferSize(s int) DialOption {
|
||||
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||||
// much data can be read at most for each read syscall.
|
||||
//
|
||||
// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||||
// a connection so data framer can access the underlying conn directly.
|
||||
// The default value for this buffer is 32KB. Zero or negative values will
|
||||
// disable read buffer for a connection so data framer can access the
|
||||
// underlying conn directly.
|
||||
func WithReadBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.ReadBufferSize = s
|
||||
@ -196,25 +235,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||
// with. Balancer registered with balancerName will be used. This function
|
||||
// panics if no balancer was registered by balancerName.
|
||||
//
|
||||
// The balancer cannot be overridden by balancer option specified by service
|
||||
// config.
|
||||
//
|
||||
// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
|
||||
// instead. Will be removed in a future 1.x release.
|
||||
func WithBalancerName(balancerName string) DialOption {
|
||||
builder := balancer.Get(balancerName)
|
||||
if builder == nil {
|
||||
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
||||
}
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.balancerBuilder = builder
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||
// service configuration.
|
||||
//
|
||||
@ -228,18 +248,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithConnectParams configures the dialer to use the provided ConnectParams.
|
||||
// WithConnectParams configures the ClientConn to use the provided ConnectParams
|
||||
// for creating and maintaining connections to servers.
|
||||
//
|
||||
// The backoff configuration specified as part of the ConnectParams overrides
|
||||
// all defaults specified in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
|
||||
// using the backoff.DefaultConfig as a base, in cases where you want to
|
||||
// override only a subset of the backoff configuration.
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithConnectParams(p ConnectParams) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
||||
@ -277,9 +293,12 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the
|
||||
// WithBlock returns a DialOption which makes callers of Dial block until the
|
||||
// underlying connection is up. Without this, Dial returns immediately and
|
||||
// connecting the server happens in background.
|
||||
//
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
func WithBlock() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
@ -291,7 +310,10 @@ func WithBlock() DialOption {
|
||||
// the context.DeadlineExceeded error.
|
||||
// Implies WithBlock()
|
||||
//
|
||||
// Experimental
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -303,18 +325,24 @@ func WithReturnConnectionError() DialOption {
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this
|
||||
// ClientConn. Note that transport security is required unless WithInsecure is
|
||||
// set.
|
||||
// ClientConn. Under the hood, it uses insecure.NewCredentials().
|
||||
//
|
||||
// Note that using this DialOption with per-RPC credentials (through
|
||||
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||
// security is incompatible and will cause grpc.Dial() to fail.
|
||||
//
|
||||
// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
|
||||
// instead. Will be supported throughout 1.x.
|
||||
func WithInsecure() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
o.copts.TransportCredentials = insecure.NewCredentials()
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoProxy returns a DialOption which disables the use of proxies for this
|
||||
// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -345,7 +373,7 @@ func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||
// the ClientConn.WithCreds. This should not be used together with
|
||||
// WithTransportCredentials.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -401,7 +429,21 @@ func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||
// all the RPCs and underlying network connections in this ClientConn.
|
||||
func WithStatsHandler(h stats.Handler) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.StatsHandler = h
|
||||
if h == nil {
|
||||
logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
|
||||
// Do not allow a nil stats handler, which would otherwise cause
|
||||
// panics.
|
||||
return
|
||||
}
|
||||
o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
|
||||
})
|
||||
}
|
||||
|
||||
// withBinaryLogger returns a DialOption that specifies the binary logger for
|
||||
// this ClientConn.
|
||||
func withBinaryLogger(bl binarylog.Logger) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.binaryLogger = bl
|
||||
})
|
||||
}
|
||||
|
||||
@ -413,7 +455,10 @@ func WithStatsHandler(h stats.Handler) DialOption {
|
||||
// FailOnNonTempDialError only affects the initial dial, and does not do
|
||||
// anything useful unless you are also using WithBlock().
|
||||
//
|
||||
// Experimental
|
||||
// Use of this feature is not recommended. For more information, please see:
|
||||
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -482,8 +527,7 @@ func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOpt
|
||||
}
|
||||
|
||||
// WithAuthority returns a DialOption that specifies the value to be used as the
|
||||
// :authority pseudo-header. This value only works with WithInsecure and has no
|
||||
// effect if TransportCredentials are present.
|
||||
// :authority pseudo-header and as the server name in authentication handshake.
|
||||
func WithAuthority(a string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.authority = a
|
||||
@ -494,11 +538,11 @@ func WithAuthority(a string) DialOption {
|
||||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithChannelzParentID(id int64) DialOption {
|
||||
func WithChannelzParentID(id *channelz.Identifier) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
})
|
||||
@ -519,14 +563,16 @@ func WithDisableServiceConfig() DialOption {
|
||||
// WithDefaultServiceConfig returns a DialOption that configures the default
|
||||
// service config, which will be used in cases where:
|
||||
//
|
||||
// 1. WithDisableServiceConfig is also used.
|
||||
// 2. Resolver does not return a service config or if the resolver returns an
|
||||
// invalid service config.
|
||||
// 1. WithDisableServiceConfig is also used, or
|
||||
//
|
||||
// Experimental
|
||||
// 2. The name resolver does not provide a service config or provides an
|
||||
// invalid service config.
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
// The parameter s is the JSON representation of the default service config.
|
||||
// For more information about service configs, see:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
|
||||
// For a simple example of usage, see:
|
||||
// examples/features/load_balancing/client/main.go
|
||||
func WithDefaultServiceConfig(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.defaultServiceConfigRawJSON = &s
|
||||
@ -537,15 +583,6 @@ func WithDefaultServiceConfig(s string) DialOption {
|
||||
// service config enables them. This does not impact transparent retries, which
|
||||
// will happen automatically if no data is written to the wire or if the RPC is
|
||||
// unprocessed by the remote server.
|
||||
//
|
||||
// Retry support is currently disabled by default, but will be enabled by
|
||||
// default in the future. Until then, it may be enabled by setting the
|
||||
// environment variable "GRPC_GO_RETRY" to "on".
|
||||
//
|
||||
// Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithDisableRetry() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableRetry = true
|
||||
@ -563,7 +600,7 @@ func WithMaxHeaderListSize(s uint32) DialOption {
|
||||
// WithDisableHealthCheck disables the LB channel health checking for all
|
||||
// SubConns of this ClientConn.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -585,7 +622,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
|
||||
|
||||
func defaultDialOptions() dialOptions {
|
||||
return dialOptions{
|
||||
disableRetry: !envconfig.Retry,
|
||||
healthCheckFunc: internal.HealthCheckFunc,
|
||||
copts: transport.ConnectOptions{
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
@ -611,7 +647,7 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
// current Dial only, and will take precedence over the global registry.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -620,3 +656,23 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
|
||||
o.resolvers = append(o.resolvers, rs...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithIdleTimeout returns a DialOption that configures an idle timeout for the
|
||||
// channel. If the channel is idle for the configured timeout, i.e there are no
|
||||
// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
|
||||
// and as a result the name resolver and load balancer will be shut down. The
|
||||
// channel will exit idle mode when the Connect() method is called or when an
|
||||
// RPC is initiated.
|
||||
//
|
||||
// By default this feature is disabled, which can also be explicitly configured
|
||||
// by passing zero to this function.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WithIdleTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.idleTimeout = d
|
||||
})
|
||||
}
|
||||
|
||||
9
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
9
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
@ -19,7 +19,7 @@
|
||||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -28,6 +28,8 @@ package encoding
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Identity specifies the optional encoding for uncompressed streams.
|
||||
@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor)
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
if !grpcutil.IsCompressorNameRegistered(c.Name()) {
|
||||
grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
@ -108,7 +113,7 @@ var registeredCodecs = make(map[string]Codec)
|
||||
// more details.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// an init() function), and is not thread-safe. If multiple Codecs are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCodec(codec Codec) {
|
||||
if codec == nil {
|
||||
|
||||
7
vendor/google.golang.org/grpc/encoding/gzip/gzip.go
generated
vendored
7
vendor/google.golang.org/grpc/encoding/gzip/gzip.go
generated
vendored
@ -19,7 +19,7 @@
|
||||
// Package gzip implements and registers the gzip compressor
|
||||
// during the initialization.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
@ -30,7 +30,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/encoding"
|
||||
@ -42,7 +41,7 @@ const Name = "gzip"
|
||||
func init() {
|
||||
c := &compressor{}
|
||||
c.poolCompressor.New = func() interface{} {
|
||||
return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor}
|
||||
return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor}
|
||||
}
|
||||
encoding.RegisterCompressor(c)
|
||||
}
|
||||
@ -63,7 +62,7 @@ func SetLevel(level int) error {
|
||||
}
|
||||
c := encoding.GetCompressor(Name).(*compressor)
|
||||
c.poolCompressor.New = func() interface{} {
|
||||
w, err := gzip.NewWriterLevel(ioutil.Discard, level)
|
||||
w, err := gzip.NewWriterLevel(io.Discard, level)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
103
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
103
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
@ -19,11 +19,13 @@
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
@ -95,8 +97,9 @@ var severityName = []string{
|
||||
|
||||
// loggerT is the default logger used by grpclog.
|
||||
type loggerT struct {
|
||||
m []*log.Logger
|
||||
v int
|
||||
m []*log.Logger
|
||||
v int
|
||||
jsonFormat bool
|
||||
}
|
||||
|
||||
// NewLoggerV2 creates a loggerV2 with the provided writers.
|
||||
@ -105,27 +108,40 @@ type loggerT struct {
|
||||
// Warning logs will be written to warningW and infoW.
|
||||
// Info logs will be written to infoW.
|
||||
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
|
||||
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
|
||||
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
|
||||
}
|
||||
|
||||
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
|
||||
// verbosity level.
|
||||
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
|
||||
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
|
||||
}
|
||||
|
||||
type loggerV2Config struct {
|
||||
verbose int
|
||||
jsonFormat bool
|
||||
}
|
||||
|
||||
func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
|
||||
var m []*log.Logger
|
||||
m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
|
||||
m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
|
||||
flag := log.LstdFlags
|
||||
if c.jsonFormat {
|
||||
flag = 0
|
||||
}
|
||||
m = append(m, log.New(infoW, "", flag))
|
||||
m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
|
||||
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
|
||||
m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
|
||||
m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
|
||||
return &loggerT{m: m, v: v}
|
||||
m = append(m, log.New(ew, "", flag))
|
||||
m = append(m, log.New(ew, "", flag))
|
||||
return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
|
||||
}
|
||||
|
||||
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
||||
// All logs are written to stderr.
|
||||
func newLoggerV2() LoggerV2 {
|
||||
errorW := ioutil.Discard
|
||||
warningW := ioutil.Discard
|
||||
infoW := ioutil.Discard
|
||||
errorW := io.Discard
|
||||
warningW := io.Discard
|
||||
infoW := io.Discard
|
||||
|
||||
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
||||
switch logLevel {
|
||||
@ -142,58 +158,79 @@ func newLoggerV2() LoggerV2 {
|
||||
if vl, err := strconv.Atoi(vLevel); err == nil {
|
||||
v = vl
|
||||
}
|
||||
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
|
||||
|
||||
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
|
||||
|
||||
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
|
||||
verbose: v,
|
||||
jsonFormat: jsonFormat,
|
||||
})
|
||||
}
|
||||
|
||||
func (g *loggerT) output(severity int, s string) {
|
||||
sevStr := severityName[severity]
|
||||
if !g.jsonFormat {
|
||||
g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
|
||||
return
|
||||
}
|
||||
// TODO: we can also include the logging component, but that needs more
|
||||
// (API) changes.
|
||||
b, _ := json.Marshal(map[string]string{
|
||||
"severity": sevStr,
|
||||
"message": s,
|
||||
})
|
||||
g.m[severity].Output(2, string(b))
|
||||
}
|
||||
|
||||
func (g *loggerT) Info(args ...interface{}) {
|
||||
g.m[infoLog].Print(args...)
|
||||
g.output(infoLog, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Infoln(args ...interface{}) {
|
||||
g.m[infoLog].Println(args...)
|
||||
g.output(infoLog, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Infof(format string, args ...interface{}) {
|
||||
g.m[infoLog].Printf(format, args...)
|
||||
g.output(infoLog, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Warning(args ...interface{}) {
|
||||
g.m[warningLog].Print(args...)
|
||||
g.output(warningLog, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Warningln(args ...interface{}) {
|
||||
g.m[warningLog].Println(args...)
|
||||
g.output(warningLog, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Warningf(format string, args ...interface{}) {
|
||||
g.m[warningLog].Printf(format, args...)
|
||||
g.output(warningLog, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Error(args ...interface{}) {
|
||||
g.m[errorLog].Print(args...)
|
||||
g.output(errorLog, fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Errorln(args ...interface{}) {
|
||||
g.m[errorLog].Println(args...)
|
||||
g.output(errorLog, fmt.Sprintln(args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Errorf(format string, args ...interface{}) {
|
||||
g.m[errorLog].Printf(format, args...)
|
||||
g.output(errorLog, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatal(args ...interface{}) {
|
||||
g.m[fatalLog].Fatal(args...)
|
||||
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
|
||||
g.output(fatalLog, fmt.Sprint(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatalln(args ...interface{}) {
|
||||
g.m[fatalLog].Fatalln(args...)
|
||||
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
|
||||
g.output(fatalLog, fmt.Sprintln(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatalf(format string, args ...interface{}) {
|
||||
g.m[fatalLog].Fatalf(format, args...)
|
||||
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
|
||||
g.output(fatalLog, fmt.Sprintf(format, args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *loggerT) V(l int) bool {
|
||||
@ -204,18 +241,18 @@ func (g *loggerT) V(l int) bool {
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
LoggerV2
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
||||
|
||||
287
vendor/google.golang.org/grpc/idle.go
generated
vendored
Normal file
287
vendor/google.golang.org/grpc/idle.go
generated
vendored
Normal file
@ -0,0 +1,287 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// For overriding in unit tests.
|
||||
var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
|
||||
return time.AfterFunc(d, f)
|
||||
}
|
||||
|
||||
// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter
|
||||
// and exit from idle mode.
|
||||
type idlenessEnforcer interface {
|
||||
exitIdleMode() error
|
||||
enterIdleMode() error
|
||||
}
|
||||
|
||||
// idlenessManager defines the functionality required to track RPC activity on a
|
||||
// channel.
|
||||
type idlenessManager interface {
|
||||
onCallBegin() error
|
||||
onCallEnd()
|
||||
close()
|
||||
}
|
||||
|
||||
type noopIdlenessManager struct{}
|
||||
|
||||
func (noopIdlenessManager) onCallBegin() error { return nil }
|
||||
func (noopIdlenessManager) onCallEnd() {}
|
||||
func (noopIdlenessManager) close() {}
|
||||
|
||||
// idlenessManagerImpl implements the idlenessManager interface. It uses atomic
|
||||
// operations to synchronize access to shared state and a mutex to guarantee
|
||||
// mutual exclusion in a critical section.
|
||||
type idlenessManagerImpl struct {
|
||||
// State accessed atomically.
|
||||
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
|
||||
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
|
||||
activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
|
||||
closed int32 // Boolean; True when the manager is closed.
|
||||
|
||||
// Can be accessed without atomics or mutex since these are set at creation
|
||||
// time and read-only after that.
|
||||
enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn.
|
||||
timeout int64 // Idle timeout duration nanos stored as an int64.
|
||||
|
||||
// idleMu is used to guarantee mutual exclusion in two scenarios:
|
||||
// - Opposing intentions:
|
||||
// - a: Idle timeout has fired and handleIdleTimeout() is trying to put
|
||||
// the channel in idle mode because the channel has been inactive.
|
||||
// - b: At the same time an RPC is made on the channel, and onCallBegin()
|
||||
// is trying to prevent the channel from going idle.
|
||||
// - Competing intentions:
|
||||
// - The channel is in idle mode and there are multiple RPCs starting at
|
||||
// the same time, all trying to move the channel out of idle. Only one
|
||||
// of them should succeed in doing so, while the other RPCs should
|
||||
// piggyback on the first one and be successfully handled.
|
||||
idleMu sync.RWMutex
|
||||
actuallyIdle bool
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
// newIdlenessManager creates a new idleness manager implementation for the
|
||||
// given idle timeout.
|
||||
func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager {
|
||||
if idleTimeout == 0 {
|
||||
return noopIdlenessManager{}
|
||||
}
|
||||
|
||||
i := &idlenessManagerImpl{
|
||||
enforcer: enforcer,
|
||||
timeout: int64(idleTimeout),
|
||||
}
|
||||
i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout)
|
||||
return i
|
||||
}
|
||||
|
||||
// resetIdleTimer resets the idle timer to the given duration. This method
|
||||
// should only be called from the timer callback.
|
||||
func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {
|
||||
i.idleMu.Lock()
|
||||
defer i.idleMu.Unlock()
|
||||
|
||||
if i.timer == nil {
|
||||
// Only close sets timer to nil. We are done.
|
||||
return
|
||||
}
|
||||
|
||||
// It is safe to ignore the return value from Reset() because this method is
|
||||
// only ever called from the timer callback, which means the timer has
|
||||
// already fired.
|
||||
i.timer.Reset(d)
|
||||
}
|
||||
|
||||
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
|
||||
// configured idle timeout. The channel is considered inactive if there are no
|
||||
// ongoing calls and no RPC activity since the last time the timer fired.
|
||||
func (i *idlenessManagerImpl) handleIdleTimeout() {
|
||||
if i.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&i.activeCallsCount) > 0 {
|
||||
i.resetIdleTimer(time.Duration(i.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// There has been activity on the channel since we last got here. Reset the
|
||||
// timer and return.
|
||||
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
||||
// Set the timer to fire after a duration of idle timeout, calculated
|
||||
// from the time the most recent RPC completed.
|
||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0)
|
||||
i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano()))
|
||||
return
|
||||
}
|
||||
|
||||
// This CAS operation is extremely likely to succeed given that there has
|
||||
// been no activity since the last time we were here. Setting the
|
||||
// activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the
|
||||
// channel is either in idle mode or is trying to get there.
|
||||
if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) {
|
||||
// This CAS operation can fail if an RPC started after we checked for
|
||||
// activity at the top of this method, or one was ongoing from before
|
||||
// the last time we were here. In both case, reset the timer and return.
|
||||
i.resetIdleTimer(time.Duration(i.timeout))
|
||||
return
|
||||
}
|
||||
|
||||
// Now that we've set the active calls count to -math.MaxInt32, it's time to
|
||||
// actually move to idle mode.
|
||||
if i.tryEnterIdleMode() {
|
||||
// Successfully entered idle mode. No timer needed until we exit idle.
|
||||
return
|
||||
}
|
||||
|
||||
// Failed to enter idle mode due to a concurrent RPC that kept the channel
|
||||
// active, or because of an error from the channel. Undo the attempt to
|
||||
// enter idle, and reset the timer to try again later.
|
||||
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
||||
i.resetIdleTimer(time.Duration(i.timeout))
|
||||
}
|
||||
|
||||
// tryEnterIdleMode instructs the channel to enter idle mode. But before
|
||||
// that, it performs a last minute check to ensure that no new RPC has come in,
|
||||
// making the channel active.
|
||||
//
|
||||
// Return value indicates whether or not the channel moved to idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
|
||||
func (i *idlenessManagerImpl) tryEnterIdleMode() bool {
|
||||
i.idleMu.Lock()
|
||||
defer i.idleMu.Unlock()
|
||||
|
||||
if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 {
|
||||
// We raced and lost to a new RPC. Very rare, but stop entering idle.
|
||||
return false
|
||||
}
|
||||
if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 {
|
||||
// An very short RPC could have come in (and also finished) after we
|
||||
// checked for calls count and activity in handleIdleTimeout(), but
|
||||
// before the CAS operation. So, we need to check for activity again.
|
||||
return false
|
||||
}
|
||||
|
||||
// No new RPCs have come in since we last set the active calls count value
|
||||
// -math.MaxInt32 in the timer callback. And since we have the lock, it is
|
||||
// safe to enter idle mode now.
|
||||
if err := i.enforcer.enterIdleMode(); err != nil {
|
||||
logger.Errorf("Failed to enter idle mode: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Successfully entered idle mode.
|
||||
i.actuallyIdle = true
|
||||
return true
|
||||
}
|
||||
|
||||
// onCallBegin is invoked at the start of every RPC.
|
||||
func (i *idlenessManagerImpl) onCallBegin() error {
|
||||
if i.isClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if atomic.AddInt32(&i.activeCallsCount, 1) > 0 {
|
||||
// Channel is not idle now. Set the activity bit and allow the call.
|
||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Channel is either in idle mode or is in the process of moving to idle
|
||||
// mode. Attempt to exit idle mode to allow this RPC.
|
||||
if err := i.exitIdleMode(); err != nil {
|
||||
// Undo the increment to calls count, and return an error causing the
|
||||
// RPC to fail.
|
||||
atomic.AddInt32(&i.activeCallsCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// exitIdleMode instructs the channel to exit idle mode.
|
||||
//
|
||||
// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
|
||||
func (i *idlenessManagerImpl) exitIdleMode() error {
|
||||
i.idleMu.Lock()
|
||||
defer i.idleMu.Unlock()
|
||||
|
||||
if !i.actuallyIdle {
|
||||
// This can happen in two scenarios:
|
||||
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
|
||||
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
|
||||
// came in and onCallBegin() noticed that the calls count is negative.
|
||||
// - Channel is in idle mode, and multiple new RPCs come in at the same
|
||||
// time, all of them notice a negative calls count in onCallBegin and get
|
||||
// here. The first one to get the lock would got the channel to exit idle.
|
||||
//
|
||||
// Either way, nothing to do here.
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := i.enforcer.exitIdleMode(); err != nil {
|
||||
return fmt.Errorf("channel failed to exit idle mode: %v", err)
|
||||
}
|
||||
|
||||
// Undo the idle entry process. This also respects any new RPC attempts.
|
||||
atomic.AddInt32(&i.activeCallsCount, math.MaxInt32)
|
||||
i.actuallyIdle = false
|
||||
|
||||
// Start a new timer to fire after the configured idle timeout.
|
||||
i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout)
|
||||
return nil
|
||||
}
|
||||
|
||||
// onCallEnd is invoked at the end of every RPC.
|
||||
func (i *idlenessManagerImpl) onCallEnd() {
|
||||
if i.isClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
// Record the time at which the most recent call finished.
|
||||
atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano())
|
||||
|
||||
// Decrement the active calls count. This count can temporarily go negative
|
||||
// when the timer callback is in the process of moving the channel to idle
|
||||
// mode, but one or more RPCs come in and complete before the timer callback
|
||||
// can get done with the process of moving to idle mode.
|
||||
atomic.AddInt32(&i.activeCallsCount, -1)
|
||||
}
|
||||
|
||||
func (i *idlenessManagerImpl) isClosed() bool {
|
||||
return atomic.LoadInt32(&i.closed) == 1
|
||||
}
|
||||
|
||||
func (i *idlenessManagerImpl) close() {
|
||||
atomic.StoreInt32(&i.closed, 1)
|
||||
|
||||
i.idleMu.Lock()
|
||||
i.timer.Stop()
|
||||
i.timer = nil
|
||||
i.idleMu.Unlock()
|
||||
}
|
||||
6
vendor/google.golang.org/grpc/install_gae.sh
generated
vendored
6
vendor/google.golang.org/grpc/install_gae.sh
generated
vendored
@ -1,6 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
TMP=$(mktemp -d /tmp/sdk.XXX) \
|
||||
&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
|
||||
&& unzip -q $TMP.zip -d $TMP \
|
||||
&& export PATH="$PATH:$TMP/go_appengine"
|
||||
9
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
9
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
@ -72,9 +72,12 @@ type UnaryServerInfo struct {
|
||||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||
// the status message of the RPC.
|
||||
// execution of a unary RPC.
|
||||
//
|
||||
// If a UnaryHandler returns an error, it should either be produced by the
|
||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||
// RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
|
||||
384
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
384
vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
generated
vendored
Normal file
@ -0,0 +1,384 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package gracefulswitch implements a graceful switch load balancer.
|
||||
package gracefulswitch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
|
||||
var _ balancer.Balancer = (*Balancer)(nil)
|
||||
|
||||
// NewBalancer returns a graceful switch Balancer.
|
||||
func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
|
||||
return &Balancer{
|
||||
cc: cc,
|
||||
bOpts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
// Balancer is a utility to gracefully switch from one balancer to
|
||||
// a new balancer. It implements the balancer.Balancer interface.
|
||||
type Balancer struct {
|
||||
bOpts balancer.BuildOptions
|
||||
cc balancer.ClientConn
|
||||
|
||||
// mu protects the following fields and all fields within balancerCurrent
|
||||
// and balancerPending. mu does not need to be held when calling into the
|
||||
// child balancers, as all calls into these children happen only as a direct
|
||||
// result of a call into the gracefulSwitchBalancer, which are also
|
||||
// guaranteed to be synchronous. There is one exception: an UpdateState call
|
||||
// from a child balancer when current and pending are populated can lead to
|
||||
// calling Close() on the current. To prevent that racing with an
|
||||
// UpdateSubConnState from the channel, we hold currentMu during Close and
|
||||
// UpdateSubConnState calls.
|
||||
mu sync.Mutex
|
||||
balancerCurrent *balancerWrapper
|
||||
balancerPending *balancerWrapper
|
||||
closed bool // set to true when this balancer is closed
|
||||
|
||||
// currentMu must be locked before mu. This mutex guards against this
|
||||
// sequence of events: UpdateSubConnState() called, finds the
|
||||
// balancerCurrent, gives up lock, updateState comes in, causes Close() on
|
||||
// balancerCurrent before the UpdateSubConnState is called on the
|
||||
// balancerCurrent.
|
||||
currentMu sync.Mutex
|
||||
}
|
||||
|
||||
// swap swaps out the current lb with the pending lb and updates the ClientConn.
|
||||
// The caller must hold gsb.mu.
|
||||
func (gsb *Balancer) swap() {
|
||||
gsb.cc.UpdateState(gsb.balancerPending.lastState)
|
||||
cur := gsb.balancerCurrent
|
||||
gsb.balancerCurrent = gsb.balancerPending
|
||||
gsb.balancerPending = nil
|
||||
go func() {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
cur.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
// Helper function that checks if the balancer passed in is current or pending.
|
||||
// The caller must hold gsb.mu.
|
||||
func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
|
||||
return bw == gsb.balancerCurrent || bw == gsb.balancerPending
|
||||
}
|
||||
|
||||
// SwitchTo initializes the graceful switch process, which completes based on
|
||||
// connectivity state changes on the current/pending balancer. Thus, the switch
|
||||
// process is not complete when this method returns. This method must be called
|
||||
// synchronously alongside the rest of the balancer.Balancer methods this
|
||||
// Graceful Switch Balancer implements.
|
||||
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
|
||||
gsb.mu.Lock()
|
||||
if gsb.closed {
|
||||
gsb.mu.Unlock()
|
||||
return errBalancerClosed
|
||||
}
|
||||
bw := &balancerWrapper{
|
||||
gsb: gsb,
|
||||
lastState: balancer.State{
|
||||
ConnectivityState: connectivity.Connecting,
|
||||
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
|
||||
},
|
||||
subconns: make(map[balancer.SubConn]bool),
|
||||
}
|
||||
balToClose := gsb.balancerPending // nil if there is no pending balancer
|
||||
if gsb.balancerCurrent == nil {
|
||||
gsb.balancerCurrent = bw
|
||||
} else {
|
||||
gsb.balancerPending = bw
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
balToClose.Close()
|
||||
// This function takes a builder instead of a balancer because builder.Build
|
||||
// can call back inline, and this utility needs to handle the callbacks.
|
||||
newBalancer := builder.Build(bw, gsb.bOpts)
|
||||
if newBalancer == nil {
|
||||
// This is illegal and should never happen; we clear the balancerWrapper
|
||||
// we were constructing if it happens to avoid a potential panic.
|
||||
gsb.mu.Lock()
|
||||
if gsb.balancerPending != nil {
|
||||
gsb.balancerPending = nil
|
||||
} else {
|
||||
gsb.balancerCurrent = nil
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
|
||||
// This write doesn't need to take gsb.mu because this field never gets read
|
||||
// or written to on any calls from the current or pending. Calls from grpc
|
||||
// to this balancer are guaranteed to be called synchronously, so this
|
||||
// bw.Balancer field will never be forwarded to until this SwitchTo()
|
||||
// function returns.
|
||||
bw.Balancer = newBalancer
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns nil if the graceful switch balancer is closed.
|
||||
func (gsb *Balancer) latestBalancer() *balancerWrapper {
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
if gsb.balancerPending != nil {
|
||||
return gsb.balancerPending
|
||||
}
|
||||
return gsb.balancerCurrent
|
||||
}
|
||||
|
||||
// UpdateClientConnState forwards the update to the latest balancer created.
|
||||
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return errBalancerClosed
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
return balToUpdate.UpdateClientConnState(state)
|
||||
}
|
||||
|
||||
// ResolverError forwards the error to the latest balancer created.
|
||||
func (gsb *Balancer) ResolverError(err error) {
|
||||
// The resolver data is only relevant to the most recent LB Policy.
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return
|
||||
}
|
||||
// Perform this call without gsb.mu to prevent deadlocks if the child calls
|
||||
// back into the channel. The latest balancer can never be closed during a
|
||||
// call from the channel, even without gsb.mu held.
|
||||
balToUpdate.ResolverError(err)
|
||||
}
|
||||
|
||||
// ExitIdle forwards the call to the latest balancer created.
|
||||
//
|
||||
// If the latest balancer does not support ExitIdle, the subConns are
|
||||
// re-connected to manually.
|
||||
func (gsb *Balancer) ExitIdle() {
|
||||
balToUpdate := gsb.latestBalancer()
|
||||
if balToUpdate == nil {
|
||||
return
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok {
|
||||
ei.ExitIdle()
|
||||
return
|
||||
}
|
||||
gsb.mu.Lock()
|
||||
defer gsb.mu.Unlock()
|
||||
for sc := range balToUpdate.subconns {
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSubConnState forwards the update to the appropriate child.
|
||||
func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
gsb.currentMu.Lock()
|
||||
defer gsb.currentMu.Unlock()
|
||||
gsb.mu.Lock()
|
||||
// Forward update to the appropriate child. Even if there is a pending
|
||||
// balancer, the current balancer should continue to get SubConn updates to
|
||||
// maintain the proper state while the pending is still connecting.
|
||||
var balToUpdate *balancerWrapper
|
||||
if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
|
||||
balToUpdate = gsb.balancerCurrent
|
||||
} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
|
||||
balToUpdate = gsb.balancerPending
|
||||
}
|
||||
gsb.mu.Unlock()
|
||||
if balToUpdate == nil {
|
||||
// SubConn belonged to a stale lb policy that has not yet fully closed,
|
||||
// or the balancer was already closed.
|
||||
return
|
||||
}
|
||||
balToUpdate.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes any active child balancers.
|
||||
func (gsb *Balancer) Close() {
|
||||
gsb.mu.Lock()
|
||||
gsb.closed = true
|
||||
currentBalancerToClose := gsb.balancerCurrent
|
||||
gsb.balancerCurrent = nil
|
||||
pendingBalancerToClose := gsb.balancerPending
|
||||
gsb.balancerPending = nil
|
||||
gsb.mu.Unlock()
|
||||
|
||||
currentBalancerToClose.Close()
|
||||
pendingBalancerToClose.Close()
|
||||
}
|
||||
|
||||
// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
|
||||
// methods to help cleanup SubConns created by the wrapped balancer.
|
||||
//
|
||||
// It implements the balancer.ClientConn interface and is passed down in that
|
||||
// capacity to the wrapped balancer. It maintains a set of subConns created by
|
||||
// the wrapped balancer and calls from the latter to create/update/remove
|
||||
// SubConns update this set before being forwarded to the parent ClientConn.
|
||||
// State updates from the wrapped balancer can result in invocation of the
|
||||
// graceful switch logic.
|
||||
type balancerWrapper struct {
|
||||
balancer.Balancer
|
||||
gsb *Balancer
|
||||
|
||||
lastState balancer.State
|
||||
subconns map[balancer.SubConn]bool // subconns created by this balancer
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
if state.ConnectivityState == connectivity.Shutdown {
|
||||
bw.gsb.mu.Lock()
|
||||
delete(bw.subconns, sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as the write to the
|
||||
// Balancer field happens in SwitchTo, which completes before this can be
|
||||
// called.
|
||||
bw.Balancer.UpdateSubConnState(sc, state)
|
||||
}
|
||||
|
||||
// Close closes the underlying LB policy and removes the subconns it created. bw
|
||||
// must not be referenced via balancerCurrent or balancerPending in gsb when
|
||||
// called. gsb.mu must not be held. Does not panic with a nil receiver.
|
||||
func (bw *balancerWrapper) Close() {
|
||||
// before Close is called.
|
||||
if bw == nil {
|
||||
return
|
||||
}
|
||||
// There is no need to protect this read with a mutex, as Close() is
|
||||
// impossible to be called concurrently with the write in SwitchTo(). The
|
||||
// callsites of Close() for this balancer in Graceful Switch Balancer will
|
||||
// never be called until SwitchTo() returns.
|
||||
bw.Balancer.Close()
|
||||
bw.gsb.mu.Lock()
|
||||
for sc := range bw.subconns {
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateState(state balancer.State) {
|
||||
// Hold the mutex for this entire call to ensure it cannot occur
|
||||
// concurrently with other updateState() calls. This causes updates to
|
||||
// lastState and calls to cc.UpdateState to happen atomically.
|
||||
bw.gsb.mu.Lock()
|
||||
defer bw.gsb.mu.Unlock()
|
||||
bw.lastState = state
|
||||
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
return
|
||||
}
|
||||
|
||||
if bw == bw.gsb.balancerCurrent {
|
||||
// In the case that the current balancer exits READY, and there is a pending
|
||||
// balancer, you can forward the pending balancer's cached State up to
|
||||
// ClientConn and swap the pending into the current. This is because there
|
||||
// is no reason to gracefully switch from and keep using the old policy as
|
||||
// the ClientConn is not connected to any backends.
|
||||
if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
|
||||
bw.gsb.swap()
|
||||
return
|
||||
}
|
||||
// Even if there is a pending balancer waiting to be gracefully switched to,
|
||||
// continue to forward current balancer updates to the Client Conn. Ignoring
|
||||
// state + picker from the current would cause undefined behavior/cause the
|
||||
// system to behave incorrectly from the current LB policies perspective.
|
||||
// Also, the current LB is still being used by grpc to choose SubConns per
|
||||
// RPC, and thus should use the most updated form of the current balancer.
|
||||
bw.gsb.cc.UpdateState(state)
|
||||
return
|
||||
}
|
||||
// This method is now dealing with a state update from the pending balancer.
|
||||
// If the current balancer is currently in a state other than READY, the new
|
||||
// policy can be swapped into place immediately. This is because there is no
|
||||
// reason to gracefully switch from and keep using the old policy as the
|
||||
// ClientConn is not connected to any backends.
|
||||
if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
|
||||
bw.gsb.swap()
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
|
||||
sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
bw.gsb.mu.Unlock()
|
||||
return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
|
||||
}
|
||||
bw.subconns[sc] = true
|
||||
bw.gsb.mu.Unlock()
|
||||
return sc, nil
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
|
||||
// Ignore ResolveNow requests from anything other than the most recent
|
||||
// balancer, because older balancers were already removed from the config.
|
||||
if bw != bw.gsb.latestBalancer() {
|
||||
return
|
||||
}
|
||||
bw.gsb.cc.ResolveNow(opts)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.RemoveSubConn(sc)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
|
||||
bw.gsb.mu.Lock()
|
||||
if !bw.gsb.balancerCurrentOrPending(bw) {
|
||||
bw.gsb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
bw.gsb.mu.Unlock()
|
||||
bw.gsb.cc.UpdateAddresses(sc, addrs)
|
||||
}
|
||||
|
||||
func (bw *balancerWrapper) Target() string {
|
||||
return bw.gsb.cc.Target()
|
||||
}
|
||||
118
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
118
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
@ -28,38 +28,48 @@ import (
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
// each method.
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// Logger specifies MethodLoggers for method names with a Log call that
|
||||
// takes a context.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type Logger interface {
|
||||
getMethodLogger(methodName string) *MethodLogger
|
||||
GetMethodLogger(methodName string) MethodLogger
|
||||
}
|
||||
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment variable or flags).
|
||||
//
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
// It is used to get a MethodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binarg logger.
|
||||
// SetLogger sets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func SetLogger(l Logger) {
|
||||
binLogger = l
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||
// GetLogger gets the binary logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func GetLogger() Logger {
|
||||
return binLogger
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the MethodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// Each MethodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func GetMethodLogger(methodName string) *MethodLogger {
|
||||
func GetMethodLogger(methodName string) MethodLogger {
|
||||
if binLogger == nil {
|
||||
return nil
|
||||
}
|
||||
return binLogger.getMethodLogger(methodName)
|
||||
return binLogger.GetMethodLogger(methodName)
|
||||
}
|
||||
|
||||
func init() {
|
||||
@ -68,17 +78,29 @@ func init() {
|
||||
binLogger = NewLoggerFromConfigString(configStr)
|
||||
}
|
||||
|
||||
type methodLoggerConfig struct {
|
||||
// MethodLoggerConfig contains the setting for logging behavior of a method
|
||||
// logger. Currently, it contains the max length of header and message.
|
||||
type MethodLoggerConfig struct {
|
||||
// Max length of header and message.
|
||||
hdr, msg uint64
|
||||
Header, Message uint64
|
||||
}
|
||||
|
||||
// LoggerConfig contains the config for loggers to create method loggers.
|
||||
type LoggerConfig struct {
|
||||
All *MethodLoggerConfig
|
||||
Services map[string]*MethodLoggerConfig
|
||||
Methods map[string]*MethodLoggerConfig
|
||||
|
||||
Blacklist map[string]struct{}
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
all *methodLoggerConfig
|
||||
services map[string]*methodLoggerConfig
|
||||
methods map[string]*methodLoggerConfig
|
||||
config LoggerConfig
|
||||
}
|
||||
|
||||
blacklist map[string]struct{}
|
||||
// NewLoggerFromConfig builds a logger with the given LoggerConfig.
|
||||
func NewLoggerFromConfig(config LoggerConfig) Logger {
|
||||
return &logger{config: config}
|
||||
}
|
||||
|
||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||
@ -88,83 +110,83 @@ func newEmptyLogger() *logger {
|
||||
}
|
||||
|
||||
// Set method logger for "*".
|
||||
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
||||
if l.all != nil {
|
||||
func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
|
||||
if l.config.All != nil {
|
||||
return fmt.Errorf("conflicting global rules found")
|
||||
}
|
||||
l.all = ml
|
||||
l.config.All = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/*".
|
||||
//
|
||||
// New methodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.services[service]; ok {
|
||||
// New MethodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Services[service]; ok {
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
}
|
||||
if l.services == nil {
|
||||
l.services = make(map[string]*methodLoggerConfig)
|
||||
if l.config.Services == nil {
|
||||
l.config.Services = make(map[string]*MethodLoggerConfig)
|
||||
}
|
||||
l.services[service] = ml
|
||||
l.config.Services[service] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/method".
|
||||
//
|
||||
// New methodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
// New MethodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
if _, ok := l.config.Methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.methods == nil {
|
||||
l.methods = make(map[string]*methodLoggerConfig)
|
||||
if l.config.Methods == nil {
|
||||
l.config.Methods = make(map[string]*MethodLoggerConfig)
|
||||
}
|
||||
l.methods[method] = ml
|
||||
l.config.Methods[method] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
if _, ok := l.config.Blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
if _, ok := l.config.Methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.blacklist == nil {
|
||||
l.blacklist = make(map[string]struct{})
|
||||
if l.config.Blacklist == nil {
|
||||
l.config.Blacklist = make(map[string]struct{})
|
||||
}
|
||||
l.blacklist[method] = struct{}{}
|
||||
l.config.Blacklist[method] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMethodLogger returns the methodLogger for the given methodName.
|
||||
// getMethodLogger returns the MethodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// Each MethodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
||||
func (l *logger) GetMethodLogger(methodName string) MethodLogger {
|
||||
s, m, err := grpcutil.ParseMethod(methodName)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
if ml, ok := l.config.Methods[s+"/"+m]; ok {
|
||||
return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if _, ok := l.blacklist[s+"/"+m]; ok {
|
||||
if _, ok := l.config.Blacklist[s+"/"+m]; ok {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.services[s]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
if ml, ok := l.config.Services[s]; ok {
|
||||
return NewTruncatingMethodLogger(ml.Header, ml.Message)
|
||||
}
|
||||
if l.all == nil {
|
||||
if l.config.All == nil {
|
||||
return nil
|
||||
}
|
||||
return newMethodLogger(l.all.hdr, l.all.msg)
|
||||
return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
|
||||
}
|
||||
|
||||
26
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
26
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
@ -30,15 +30,15 @@ import (
|
||||
// to build a new logger and assign it to binarylog.Logger.
|
||||
//
|
||||
// Example filter config strings:
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the previous config.
|
||||
@ -57,7 +57,7 @@ func NewLoggerFromConfigString(s string) Logger {
|
||||
return l
|
||||
}
|
||||
|
||||
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
|
||||
// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
|
||||
// it to the right map in the logger.
|
||||
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
// "" is invalid.
|
||||
@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||
}
|
||||
if m == "*" {
|
||||
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
163
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
163
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
@ -19,6 +19,7 @@
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
@ -26,7 +27,7 @@ import (
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -48,7 +49,16 @@ func (g *callIDGenerator) reset() {
|
||||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
type MethodLogger struct {
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type MethodLogger interface {
|
||||
Log(context.Context, LogEntryConfig)
|
||||
}
|
||||
|
||||
// TruncatingMethodLogger is a method logger that truncates headers and messages
|
||||
// based on configured fields.
|
||||
type TruncatingMethodLogger struct {
|
||||
headerMaxLen, messageMaxLen uint64
|
||||
|
||||
callID uint64
|
||||
@ -57,8 +67,12 @@ type MethodLogger struct {
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
}
|
||||
|
||||
func newMethodLogger(h, m uint64) *MethodLogger {
|
||||
return &MethodLogger{
|
||||
// NewTruncatingMethodLogger returns a new truncating method logger.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
|
||||
return &TruncatingMethodLogger{
|
||||
headerMaxLen: h,
|
||||
messageMaxLen: m,
|
||||
|
||||
@ -69,8 +83,10 @@ func newMethodLogger(h, m uint64) *MethodLogger {
|
||||
}
|
||||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||
// Build is an internal only method for building the proto message out of the
|
||||
// input event. It's made public to enable other library to reuse as much logic
|
||||
// in TruncatingMethodLogger as possible.
|
||||
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
@ -78,18 +94,22 @@ func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *pb.GrpcLogEntry_ClientHeader:
|
||||
case *binlogpb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_ServerHeader:
|
||||
case *binlogpb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
case *binlogpb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
|
||||
ml.sink.Write(m)
|
||||
return m
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
|
||||
ml.sink.Write(ml.Build(c))
|
||||
}
|
||||
|
||||
func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -108,7 +128,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
// but not counted towards the size limit.
|
||||
continue
|
||||
}
|
||||
currentEntryLen := uint64(len(entry.Value))
|
||||
currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
|
||||
if currentEntryLen > bytesLimit {
|
||||
break
|
||||
}
|
||||
@ -119,7 +139,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
@ -131,8 +151,11 @@ func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
}
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *pb.GrpcLogEntry
|
||||
toProto() *binlogpb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
@ -146,10 +169,10 @@ type ClientHeader struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &pb.ClientHeader{
|
||||
clientHeader := &binlogpb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
@ -157,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -182,19 +205,19 @@ type ServerHeader struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &binlogpb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &binlogpb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -210,7 +233,7 @@ type ClientMessage struct {
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
@ -225,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -250,7 +273,7 @@ type ServerMessage struct {
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
@ -265,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &binlogpb.GrpcLogEntry_Message{
|
||||
Message: &binlogpb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -287,15 +310,15 @@ type ClientHalfClose struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -311,7 +334,7 @@ type ServerTrailer struct {
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
@ -327,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &binlogpb.GrpcLogEntry_Trailer{
|
||||
Trailer: &binlogpb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
@ -339,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
@ -354,15 +377,15 @@ type Cancel struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
|
||||
ret := &binlogpb.GrpcLogEntry{
|
||||
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
@ -379,15 +402,15 @@ func metadataKeyOmit(key string) bool {
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
ret := &pb.Metadata{}
|
||||
func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
|
||||
ret := &binlogpb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&pb.MetadataEntry{
|
||||
&binlogpb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
@ -397,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *pb.Address {
|
||||
ret := &pb.Address{}
|
||||
func addrToProto(addr net.Addr) *binlogpb.Address {
|
||||
ret := &binlogpb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV4
|
||||
ret.Type = binlogpb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV6
|
||||
ret.Type = binlogpb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = pb.Address_TYPE_UNIX
|
||||
ret.Type = binlogpb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
ret.Type = binlogpb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
12
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -42,15 +42,15 @@ type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
// It should be thread-safe so it can be called in parallel.
|
||||
Write(*pb.GrpcLogEntry) error
|
||||
Write(*binlogpb.GrpcLogEntry) error
|
||||
// Close will be called when the Sink is replaced by a new Sink.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
@ -66,7 +66,7 @@ type writerSink struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
|
||||
@ -96,7 +96,7 @@ type bufferedSink struct {
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error {
|
||||
func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
|
||||
fs.mu.Lock()
|
||||
defer fs.mu.Unlock()
|
||||
if !fs.flusherStarted {
|
||||
|
||||
26
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
26
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
@ -35,6 +35,7 @@ import "sync"
|
||||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
closed bool
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
}
|
||||
@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded {
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) {
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
@ -72,7 +79,6 @@ func (b *Unbounded) Load() {
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
@ -80,6 +86,20 @@ func (b *Unbounded) Load() {
|
||||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
//
|
||||
// If the unbounded buffer is closed, the read channel returned by this method
|
||||
// is closed.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
return b.c
|
||||
}
|
||||
|
||||
// Close closes the unbounded buffer.
|
||||
func (b *Unbounded) Close() {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.closed {
|
||||
return
|
||||
}
|
||||
b.closed = true
|
||||
close(b.c)
|
||||
}
|
||||
|
||||
240
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
240
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@ -24,6 +24,8 @@
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -49,7 +51,8 @@ var (
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
NewChannelzStorage()
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap {
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||
// NewChannelzStorageForTesting initializes channelz data storage and id
|
||||
// generator for testing purposes.
|
||||
//
|
||||
// This function returns a cleanup function to wait for all channelz state to be reset by the
|
||||
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
|
||||
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
|
||||
// to remove some entity just register by the new test, since the id space is the same.
|
||||
//
|
||||
// Note: This function is exported for testing purpose only. User should not call
|
||||
// it in most cases.
|
||||
func NewChannelzStorage() (cleanup func() error) {
|
||||
db.set(&channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
})
|
||||
// Returns a cleanup function to be invoked by the test, which waits for up to
|
||||
// 10s for all channelz state to be reset by the grpc goroutines when those
|
||||
// entities get closed. This cleanup function helps with ensuring that tests
|
||||
// don't mess up each other.
|
||||
func NewChannelzStorageForTesting() (cleanup func() error) {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
|
||||
return func() error {
|
||||
var err error
|
||||
cm := db.get()
|
||||
if cm == nil {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
cm.mu.Lock()
|
||||
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
|
||||
cm.mu.Unlock()
|
||||
// all things stored in the channelz map have been cleared.
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
cm.mu.RLock()
|
||||
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
|
||||
cm.mu.RUnlock()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
|
||||
}
|
||||
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
|
||||
return nil
|
||||
}
|
||||
cm.mu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
<-ticker.C
|
||||
}
|
||||
|
||||
cm.mu.Lock()
|
||||
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
|
||||
cm.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric {
|
||||
return db.get().GetServer(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||
// assigned to this channel.
|
||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||
// RegisterChannel registers the given channel c in the channelz database with
|
||||
// ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). pid == nil means no parent.
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this channel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
var parent int64
|
||||
isTopChannel := true
|
||||
if pid != nil {
|
||||
isTopChannel = false
|
||||
parent = pid.Int()
|
||||
}
|
||||
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
}
|
||||
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
pid: parent,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
if pid == 0 {
|
||||
db.get().addChannel(id, cn, true, pid, ref)
|
||||
} else {
|
||||
db.get().addChannel(id, cn, false, pid, ref)
|
||||
}
|
||||
return id
|
||||
db.get().addChannel(id, cn, isTopChannel, parent)
|
||||
return newIdentifer(RefChannel, id, pid)
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
logger.Error("a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
// RegisterSubChannel registers the given subChannel c in the channelz database
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid).
|
||||
//
|
||||
// Returns a unique channelz identifier assigned to this subChannel.
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a SubChannel's parent id cannot be nil")
|
||||
}
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
pid: pid.Int(),
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid, ref)
|
||||
return id
|
||||
db.get().addSubChannel(id, sc, pid.Int())
|
||||
return newIdentifer(RefSubChannel, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
func RegisterServer(s Server, ref string) int64 {
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterServer(s Server, ref string) *Identifier {
|
||||
id := idGen.genID()
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 {
|
||||
id: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return id
|
||||
return newIdentifer(RefServer, id, nil)
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
logger.Error("a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a ListenSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addListenSocket(id, ls, pid, ref)
|
||||
return id
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addListenSocket(id, ls, pid.Int())
|
||||
return newIdentifer(RefListenSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// with ref as its reference name, and adds it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
logger.Error("a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
//
|
||||
// If channelz is not turned ON, the channelz database is not mutated.
|
||||
func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
|
||||
if pid == nil {
|
||||
return nil, errors.New("a NormalSocket's parent id cannot be 0")
|
||||
}
|
||||
id := idGen.genID()
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addNormalSocket(id, ns, pid, ref)
|
||||
return id
|
||||
if !IsOn() {
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
|
||||
db.get().addNormalSocket(id, ns, pid.Int())
|
||||
return newIdentifer(RefNormalSocket, id, pid), nil
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
||||
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||
// channelz database.
|
||||
func RemoveEntry(id int64) {
|
||||
db.get().removeEntry(id)
|
||||
//
|
||||
// If channelz is not turned ON, this function is a no-op.
|
||||
func RemoveEntry(id *Identifier) {
|
||||
if !IsOn() {
|
||||
return
|
||||
}
|
||||
db.get().removeEntry(id.Int())
|
||||
}
|
||||
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
||||
// to the channel trace.
|
||||
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
||||
// trace also.
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
|
||||
// the event to be added to the channel trace.
|
||||
//
|
||||
// The Parent field is optional. It is used for an event that will be recorded
|
||||
// in the entity's parent trace.
|
||||
type TraceEventDesc struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEventDesc
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
|
||||
for d := desc; d != nil; d = d.Parent {
|
||||
switch d.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, d.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, d.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, d.Desc)
|
||||
}
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the
|
||||
// provided TraceEventDesc.
|
||||
//
|
||||
// If channelz is not turned ON, this will simply log the event descriptions.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
|
||||
// Log only the trace description associated with the bottom most entity.
|
||||
switch desc.Severity {
|
||||
case CtUnknown, CtInfo:
|
||||
l.InfoDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, withParens(id)+desc.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
|
||||
}
|
||||
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
db.get().traceEvent(id, desc)
|
||||
if IsOn() {
|
||||
db.get().traceEvent(id.Int(), desc)
|
||||
}
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
@ -326,6 +367,17 @@ type channelMap struct {
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func newChannelMap() *channelMap {
|
||||
return &channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
@ -333,7 +385,7 @@ func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
cn.trace.cm = c
|
||||
@ -346,7 +398,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
sc.trace.cm = c
|
||||
@ -355,7 +407,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
@ -363,7 +415,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
@ -630,7 +682,7 @@ func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64)
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
var s []*SocketMetric
|
||||
s := make([]*SocketMetric, 0, len(sks))
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
|
||||
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
Normal file
75
vendor/google.golang.org/grpc/internal/channelz/id.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Identifier is an opaque identifier which uniquely identifies an entity in the
|
||||
// channelz database.
|
||||
type Identifier struct {
|
||||
typ RefChannelType
|
||||
id int64
|
||||
str string
|
||||
pid *Identifier
|
||||
}
|
||||
|
||||
// Type returns the entity type corresponding to id.
|
||||
func (id *Identifier) Type() RefChannelType {
|
||||
return id.typ
|
||||
}
|
||||
|
||||
// Int returns the integer identifier corresponding to id.
|
||||
func (id *Identifier) Int() int64 {
|
||||
return id.id
|
||||
}
|
||||
|
||||
// String returns a string representation of the entity corresponding to id.
|
||||
//
|
||||
// This includes some information about the parent as well. Examples:
|
||||
// Top-level channel: [Channel #channel-number]
|
||||
// Nested channel: [Channel #parent-channel-number Channel #channel-number]
|
||||
// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
|
||||
func (id *Identifier) String() string {
|
||||
return id.str
|
||||
}
|
||||
|
||||
// Equal returns true if other is the same as id.
|
||||
func (id *Identifier) Equal(other *Identifier) bool {
|
||||
if (id != nil) != (other != nil) {
|
||||
return false
|
||||
}
|
||||
if id == nil && other == nil {
|
||||
return true
|
||||
}
|
||||
return id.typ == other.typ && id.id == other.id && id.pid == other.pid
|
||||
}
|
||||
|
||||
// NewIdentifierForTesting returns a new opaque identifier to be used only for
|
||||
// testing purposes.
|
||||
func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
return newIdentifer(typ, id, pid)
|
||||
}
|
||||
|
||||
func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
|
||||
str := fmt.Sprintf("%s #%d", typ, id)
|
||||
if pid != nil {
|
||||
str = fmt.Sprintf("%s %s", pid, str)
|
||||
}
|
||||
return &Identifier{typ: typ, id: id, str: str, pid: pid}
|
||||
}
|
||||
91
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
91
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
@ -26,77 +26,54 @@ import (
|
||||
|
||||
var logger = grpclog.Component("channelz")
|
||||
|
||||
func withParens(id *Identifier) string {
|
||||
return "[" + id.String() + "] "
|
||||
}
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
} else {
|
||||
l.InfoDepth(1, args...)
|
||||
}
|
||||
func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtInfo,
|
||||
})
|
||||
} else {
|
||||
l.InfoDepth(1, msg)
|
||||
}
|
||||
func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtInfo,
|
||||
})
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
l.WarningDepth(1, args...)
|
||||
}
|
||||
func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
l.WarningDepth(1, msg)
|
||||
}
|
||||
func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
l.ErrorDepth(1, args...)
|
||||
}
|
||||
func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
l.ErrorDepth(1, msg)
|
||||
}
|
||||
func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprintf(format, args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
}
|
||||
|
||||
39
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
39
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
@ -686,12 +686,33 @@ const (
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefUnknown indicates an unknown entity type, the zero value for this type.
|
||||
RefUnknown RefChannelType = iota
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel RefChannelType = iota
|
||||
RefChannel
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
// RefServer indicates the referenced entity is a Server.
|
||||
RefServer
|
||||
// RefListenSocket indicates the referenced entity is a ListenSocket.
|
||||
RefListenSocket
|
||||
// RefNormalSocket indicates the referenced entity is a NormalSocket.
|
||||
RefNormalSocket
|
||||
)
|
||||
|
||||
var refChannelTypeToString = map[RefChannelType]string{
|
||||
RefUnknown: "Unknown",
|
||||
RefChannel: "Channel",
|
||||
RefSubChannel: "SubChannel",
|
||||
RefServer: "Server",
|
||||
RefListenSocket: "ListenSocket",
|
||||
RefNormalSocket: "NormalSocket",
|
||||
}
|
||||
|
||||
func (r RefChannelType) String() string {
|
||||
return refChannelTypeToString[r]
|
||||
}
|
||||
|
||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||
c.mu.Lock()
|
||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||
|
||||
2
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/channelz/types_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
|
||||
5
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
5
vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !linux appengine
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
@ -37,6 +38,6 @@ type SocketOptionData struct {
|
||||
// Windows OS doesn't support Socket Option
|
||||
func (s *SocketOptionData) Getsockopt(fd uintptr) {
|
||||
once.Do(func() {
|
||||
logger.Warning("Channelz: socket options are not supported on non-linux os and appengine.")
|
||||
logger.Warning("Channelz: socket options are not supported on non-linux environments")
|
||||
})
|
||||
}
|
||||
|
||||
2
vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/channelz/util_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build linux,!appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
|
||||
3
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !linux appengine
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
|
||||
2
vendor/google.golang.org/grpc/internal/credentials/spiffe.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/credentials/spiffe.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
|
||||
2
vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
|
||||
4
vendor/google.golang.org/grpc/internal/credentials/util.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/credentials/util.go
generated
vendored
@ -18,7 +18,9 @@
|
||||
|
||||
package credentials
|
||||
|
||||
import "crypto/tls"
|
||||
import (
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
|
||||
48
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
48
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
@ -21,18 +21,46 @@ package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
prefix = "GRPC_GO_"
|
||||
retryStr = prefix + "RETRY"
|
||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||
var (
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
|
||||
// AdvertiseCompressors is set if registered compressor should be advertised
|
||||
// ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
|
||||
AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
|
||||
// RingHashCap indicates the maximum ring size which defaults to 4096
|
||||
// entries but may be overridden by setting the environment variable
|
||||
// "GRPC_RING_HASH_CAP". This does not override the default bounds
|
||||
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
|
||||
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
|
||||
// PickFirstLBConfig is set if we should support configuration of the
|
||||
// pick_first LB policy, which can be enabled by setting the environment
|
||||
// variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true".
|
||||
PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false)
|
||||
)
|
||||
|
||||
var (
|
||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||
)
|
||||
func boolFromEnv(envVar string, def bool) bool {
|
||||
if def {
|
||||
// The default is true; return true unless the variable is "false".
|
||||
return !strings.EqualFold(os.Getenv(envVar), "false")
|
||||
}
|
||||
// The default is false; return false unless the variable is "true".
|
||||
return strings.EqualFold(os.Getenv(envVar), "true")
|
||||
}
|
||||
|
||||
func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
|
||||
v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
|
||||
if err != nil {
|
||||
return def
|
||||
}
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
42
vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
Normal file
42
vendor/google.golang.org/grpc/internal/envconfig/observability.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package envconfig
|
||||
|
||||
import "os"
|
||||
|
||||
const (
|
||||
envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
|
||||
envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
|
||||
)
|
||||
|
||||
var (
|
||||
// ObservabilityConfig is the json configuration for the gcp/observability
|
||||
// package specified directly in the envObservabilityConfig env var.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ObservabilityConfig = os.Getenv(envObservabilityConfig)
|
||||
// ObservabilityConfigFile is the json configuration for the
|
||||
// gcp/observability specified in a file with the location specified in
|
||||
// envObservabilityConfigFile env var.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
|
||||
)
|
||||
95
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
Normal file
95
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package envconfig
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
|
||||
// Do not use this and read from env directly. Its value is read and kept in
|
||||
// variable XDSBootstrapFileName.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
|
||||
// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
|
||||
// content. Do not use this and read from env directly. Its value is read
|
||||
// and kept in variable XDSBootstrapFileContent.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
)
|
||||
|
||||
var (
|
||||
// XDSBootstrapFileName holds the name of the file which contains xDS
|
||||
// bootstrap configuration. Users can specify the location of the bootstrap
|
||||
// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
|
||||
// XDSBootstrapFileContent holds the content of the xDS bootstrap
|
||||
// configuration. Users can specify the bootstrap config by setting the
|
||||
// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
|
||||
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||
// disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||
XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
|
||||
// XDSClientSideSecurity is used to control processing of security
|
||||
// configuration on the client-side.
|
||||
//
|
||||
// Note that there is no env var protection for the server-side because we
|
||||
// have a brand new API on the server-side and users explicitly need to use
|
||||
// the new API to get security integration on the server.
|
||||
XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
|
||||
// XDSAggregateAndDNS indicates whether processing of aggregated cluster and
|
||||
// DNS cluster is enabled, which can be disabled by setting the environment
|
||||
// variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
// to "false".
|
||||
XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
|
||||
|
||||
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
|
||||
XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
|
||||
// XDSFederation indicates whether federation support is enabled, which can
|
||||
// be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
|
||||
XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
|
||||
|
||||
// XDSRLS indicates whether processing of Cluster Specifier plugins and
|
||||
// support for the RLS CLuster Specifier is enabled, which can be disabled by
|
||||
// setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
|
||||
// "false".
|
||||
XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
|
||||
|
||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
|
||||
// XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
|
||||
// can be disabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
|
||||
XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
|
||||
)
|
||||
10
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
10
vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
generated
vendored
@ -110,17 +110,17 @@ type LoggerV2 interface {
|
||||
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
|
||||
// It is defined here to avoid a circular dependency.
|
||||
//
|
||||
// Experimental
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
type DepthLoggerV2 interface {
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
||||
|
||||
12
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
generated
vendored
@ -63,6 +63,9 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) {
|
||||
|
||||
// Debugf does info logging at verbose level 2.
|
||||
func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||
// `Logger` here, and instead use the `logger` field.
|
||||
if !Logger.V(2) {
|
||||
return
|
||||
}
|
||||
@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) {
|
||||
return
|
||||
}
|
||||
InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
|
||||
}
|
||||
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
func (pl *PrefixLogger) V(l int) bool {
|
||||
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
|
||||
// rewrite PrefixLogger a little to ensure that we don't use the global
|
||||
// `Logger` here, and instead use the `logger` field.
|
||||
return Logger.V(l)
|
||||
}
|
||||
|
||||
// NewPrefixLogger creates a prefix logger with the given prefix.
|
||||
|
||||
21
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
generated
vendored
@ -52,6 +52,13 @@ func Intn(n int) int {
|
||||
return r.Intn(n)
|
||||
}
|
||||
|
||||
// Int31n implements rand.Int31n on the grpcrand global source.
|
||||
func Int31n(n int32) int32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Int31n(n)
|
||||
}
|
||||
|
||||
// Float64 implements rand.Float64 on the grpcrand global source.
|
||||
func Float64() float64 {
|
||||
mu.Lock()
|
||||
@ -65,3 +72,17 @@ func Uint64() uint64 {
|
||||
defer mu.Unlock()
|
||||
return r.Uint64()
|
||||
}
|
||||
|
||||
// Uint32 implements rand.Uint32 on the grpcrand global source.
|
||||
func Uint32() uint32 {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return r.Uint32()
|
||||
}
|
||||
|
||||
// Shuffle implements rand.Shuffle on the grpcrand global source.
|
||||
var Shuffle = func(n int, f func(int, int)) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
r.Shuffle(n, f)
|
||||
}
|
||||
|
||||
119
vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
Normal file
119
vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
)
|
||||
|
||||
// CallbackSerializer provides a mechanism to schedule callbacks in a
|
||||
// synchronized manner. It provides a FIFO guarantee on the order of execution
|
||||
// of scheduled callbacks. New callbacks can be scheduled by invoking the
|
||||
// Schedule() method.
|
||||
//
|
||||
// This type is safe for concurrent access.
|
||||
type CallbackSerializer struct {
|
||||
// Done is closed once the serializer is shut down completely, i.e all
|
||||
// scheduled callbacks are executed and the serializer has deallocated all
|
||||
// its resources.
|
||||
Done chan struct{}
|
||||
|
||||
callbacks *buffer.Unbounded
|
||||
closedMu sync.Mutex
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
|
||||
// context will be passed to the scheduled callbacks. Users should cancel the
|
||||
// provided context to shutdown the CallbackSerializer. It is guaranteed that no
|
||||
// callbacks will be added once this context is canceled, and any pending un-run
|
||||
// callbacks will be executed before the serializer is shut down.
|
||||
func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
|
||||
t := &CallbackSerializer{
|
||||
Done: make(chan struct{}),
|
||||
callbacks: buffer.NewUnbounded(),
|
||||
}
|
||||
go t.run(ctx)
|
||||
return t
|
||||
}
|
||||
|
||||
// Schedule adds a callback to be scheduled after existing callbacks are run.
|
||||
//
|
||||
// Callbacks are expected to honor the context when performing any blocking
|
||||
// operations, and should return early when the context is canceled.
|
||||
//
|
||||
// Return value indicates if the callback was successfully added to the list of
|
||||
// callbacks to be executed by the serializer. It is not possible to add
|
||||
// callbacks once the context passed to NewCallbackSerializer is cancelled.
|
||||
func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
|
||||
t.closedMu.Lock()
|
||||
defer t.closedMu.Unlock()
|
||||
|
||||
if t.closed {
|
||||
return false
|
||||
}
|
||||
t.callbacks.Put(f)
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *CallbackSerializer) run(ctx context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
|
||||
defer close(t.Done)
|
||||
for ctx.Err() == nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// Do nothing here. Next iteration of the for loop will not happen,
|
||||
// since ctx.Err() would be non-nil.
|
||||
case callback, ok := <-t.callbacks.Get():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
t.callbacks.Load()
|
||||
callback.(func(ctx context.Context))(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch pending callbacks if any, and execute them before returning from
|
||||
// this method and closing t.Done.
|
||||
t.closedMu.Lock()
|
||||
t.closed = true
|
||||
backlog = t.fetchPendingCallbacks()
|
||||
t.callbacks.Close()
|
||||
t.closedMu.Unlock()
|
||||
for _, b := range backlog {
|
||||
b(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
|
||||
var backlog []func(context.Context)
|
||||
for {
|
||||
select {
|
||||
case b := <-t.callbacks.Get():
|
||||
backlog = append(backlog, b.(func(context.Context)))
|
||||
t.callbacks.Load()
|
||||
default:
|
||||
return backlog
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,8 +1,6 @@
|
||||
// +build appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,14 +16,17 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
package grpcsync
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SPIFFEIDFromState is a no-op for appengine builds.
|
||||
func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
|
||||
return nil
|
||||
// OnceFunc returns a function wrapping f which ensures f is only executed
|
||||
// once even if the returned function is executed multiple times.
|
||||
func OnceFunc(f func()) func() {
|
||||
var once sync.Once
|
||||
return func() {
|
||||
once.Do(f)
|
||||
}
|
||||
}
|
||||
47
vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2022 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
)
|
||||
|
||||
// RegisteredCompressorNames holds names of the registered compressors.
|
||||
var RegisteredCompressorNames []string
|
||||
|
||||
// IsCompressorNameRegistered returns true when name is available in registry.
|
||||
func IsCompressorNameRegistered(name string) bool {
|
||||
for _, compressor := range RegisteredCompressorNames {
|
||||
if compressor == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RegisteredCompressors returns a string of registered compressor names
|
||||
// separated by comma.
|
||||
func RegisteredCompressors() string {
|
||||
if !envconfig.AdvertiseCompressors {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(RegisteredCompressorNames, ",")
|
||||
}
|
||||
@ -1,8 +1,6 @@
|
||||
// +build appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,13 +16,5 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// WrapSyscallConn returns newConn on appengine.
|
||||
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||
return newConn
|
||||
}
|
||||
// Package grpcutil provides utility functions used across the gRPC codebase.
|
||||
package grpcutil
|
||||
6
vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/grpcutil/method.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
|
||||
// ParseMethod splits service and method from the input. It expects format
|
||||
// "/service/method".
|
||||
//
|
||||
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
if !strings.HasPrefix(methodName, "/") {
|
||||
return "", "", errors.New("invalid method name: should start with /")
|
||||
@ -39,6 +38,11 @@ func ParseMethod(methodName string) (service, method string, _ error) {
|
||||
return methodName[:pos], methodName[pos+1:], nil
|
||||
}
|
||||
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
const baseContentType = "application/grpc"
|
||||
|
||||
// ContentSubtype returns the content-subtype for the given content-type. The
|
||||
|
||||
@ -1,8 +1,6 @@
|
||||
// +build go1.13
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
@ -18,16 +16,16 @@
|
||||
*
|
||||
*/
|
||||
|
||||
package dns
|
||||
package grpcutil
|
||||
|
||||
import "net"
|
||||
import "regexp"
|
||||
|
||||
func init() {
|
||||
filterError = func(err error) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
|
||||
// The name does not exist; not an error.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
// FullMatchWithRegex returns whether the full text matches the regex provided.
|
||||
func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
|
||||
if len(text) == 0 {
|
||||
return re.MatchString(text)
|
||||
}
|
||||
re.Longest()
|
||||
rem := re.FindString(text)
|
||||
return len(rem) == len(text)
|
||||
}
|
||||
89
vendor/google.golang.org/grpc/internal/grpcutil/target.go
generated
vendored
89
vendor/google.golang.org/grpc/internal/grpcutil/target.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpcutil provides a bunch of utility functions to be used across the
|
||||
// gRPC codebase.
|
||||
package grpcutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// split2 returns the values from strings.SplitN(s, sep, 2).
|
||||
// If sep is not found, it returns ("", "", false) instead.
|
||||
func split2(s, sep string) (string, string, bool) {
|
||||
spl := strings.SplitN(s, sep, 2)
|
||||
if len(spl) < 2 {
|
||||
return "", "", false
|
||||
}
|
||||
return spl[0], spl[1], true
|
||||
}
|
||||
|
||||
// ParseTarget splits target into a resolver.Target struct containing scheme,
|
||||
// authority and endpoint. skipUnixColonParsing indicates that the parse should
|
||||
// not parse "unix:[path]" cases. This should be true in cases where a custom
|
||||
// dialer is present, to prevent a behavior change.
|
||||
//
|
||||
// If target is not a valid scheme://authority/endpoint as specified in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/naming.md,
|
||||
// it returns {Endpoint: target}.
|
||||
func ParseTarget(target string, skipUnixColonParsing bool) (ret resolver.Target) {
|
||||
var ok bool
|
||||
if strings.HasPrefix(target, "unix-abstract:") {
|
||||
if strings.HasPrefix(target, "unix-abstract://") {
|
||||
// Maybe, with Authority specified, try to parse it
|
||||
var remain string
|
||||
ret.Scheme, remain, _ = split2(target, "://")
|
||||
ret.Authority, ret.Endpoint, ok = split2(remain, "/")
|
||||
if !ok {
|
||||
// No Authority, add the "//" back
|
||||
ret.Endpoint = "//" + remain
|
||||
} else {
|
||||
// Found Authority, add the "/" back
|
||||
ret.Endpoint = "/" + ret.Endpoint
|
||||
}
|
||||
} else {
|
||||
// Without Authority specified, split target on ":"
|
||||
ret.Scheme, ret.Endpoint, _ = split2(target, ":")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
|
||||
if !ok {
|
||||
if strings.HasPrefix(target, "unix:") && !skipUnixColonParsing {
|
||||
// Handle the "unix:[local/path]" and "unix:[/absolute/path]" cases,
|
||||
// because splitting on :// only handles the
|
||||
// "unix://[/absolute/path]" case. Only handle if the dialer is nil,
|
||||
// to avoid a behavior change with custom dialers.
|
||||
return resolver.Target{Scheme: "unix", Endpoint: target[len("unix:"):]}
|
||||
}
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
|
||||
if !ok {
|
||||
return resolver.Target{Endpoint: target}
|
||||
}
|
||||
if ret.Scheme == "unix" {
|
||||
// Add the "/" back in the unix case, so the unix resolver receives the
|
||||
// actual endpoint in the "unix://[/absolute/path]" case.
|
||||
ret.Endpoint = "/" + ret.Endpoint
|
||||
}
|
||||
return ret
|
||||
}
|
||||
114
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
114
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -38,11 +38,10 @@ var (
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// ParseServiceConfigForTesting is for creating a fake
|
||||
// ClientConn for resolver testing only
|
||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||
// ParseServiceConfig parses a JSON representation of the service config.
|
||||
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
|
||||
// EqualServiceConfigForTesting is for testing service config generation and
|
||||
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
|
||||
// parsing. Both a and b should be returned by ParseServiceConfig.
|
||||
// This function compares the config without rawJSON stripped, in case the
|
||||
// there's difference in white space.
|
||||
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
|
||||
@ -59,11 +58,112 @@ var (
|
||||
// gRPC server. An xDS-enabled server needs to know what type of credentials
|
||||
// is configured on the underlying gRPC server. This is set by server.go.
|
||||
GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials
|
||||
// CanonicalString returns the canonical string of the code defined here:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
CanonicalString interface{} // func (codes.Code) string
|
||||
// DrainServerTransports initiates a graceful close of existing connections
|
||||
// on a gRPC server accepted on the provided listener address. An
|
||||
// xDS-enabled server invokes this method on a grpc.Server when a particular
|
||||
// listener moves to "not-serving" mode.
|
||||
DrainServerTransports interface{} // func(*grpc.Server, string)
|
||||
// AddGlobalServerOptions adds an array of ServerOption that will be
|
||||
// effective globally for newly created servers. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
AddGlobalServerOptions interface{} // func(opt ...ServerOption)
|
||||
// ClearGlobalServerOptions clears the array of extra ServerOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalServerOptions func()
|
||||
// AddGlobalDialOptions adds an array of DialOption that will be effective
|
||||
// globally for newly created client channels. The priority will be: 1.
|
||||
// user-provided; 2. this method; 3. default values.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
AddGlobalDialOptions interface{} // func(opt ...DialOption)
|
||||
// DisableGlobalDialOptions returns a DialOption that prevents the
|
||||
// ClientConn from applying the global DialOptions (set via
|
||||
// AddGlobalDialOptions).
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
DisableGlobalDialOptions interface{} // func() grpc.DialOption
|
||||
// ClearGlobalDialOptions clears the array of extra DialOption. This
|
||||
// method is useful in testing and benchmarking.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
ClearGlobalDialOptions func()
|
||||
// JoinDialOptions combines the dial options passed as arguments into a
|
||||
// single dial option.
|
||||
JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption
|
||||
// JoinServerOptions combines the server options passed as arguments into a
|
||||
// single server option.
|
||||
JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption
|
||||
|
||||
// WithBinaryLogger returns a DialOption that specifies the binary logger
|
||||
// for a ClientConn.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption
|
||||
// BinaryLogger returns a ServerOption that can set the binary logger for a
|
||||
// server.
|
||||
//
|
||||
// This is used in the 1.0 release of gcp/observability, and thus must not be
|
||||
// deleted or changed.
|
||||
BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption
|
||||
|
||||
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
|
||||
// the provided xds bootstrap config instead of the global configuration from
|
||||
// the supported environment variables. The resolver.Builder is meant to be
|
||||
// used in conjunction with the grpc.WithResolvers DialOption.
|
||||
//
|
||||
// Testing Only
|
||||
//
|
||||
// This function should ONLY be used for testing and may not work with some
|
||||
// other features, including the CSDS service.
|
||||
NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error)
|
||||
|
||||
// RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster
|
||||
// Specifier Plugin for testing purposes, regardless of the XDSRLS environment
|
||||
// variable.
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
RegisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster
|
||||
// Specifier Plugin for testing purposes. This is needed because there is no way
|
||||
// to unregister the RLS Cluster Specifier Plugin after registering it solely
|
||||
// for testing purposes using RegisterRLSClusterSpecifierPluginForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RLS env var is removed.
|
||||
UnregisterRLSClusterSpecifierPluginForTesting func()
|
||||
|
||||
// RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing
|
||||
// purposes, regardless of the RBAC environment variable.
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
RegisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for
|
||||
// testing purposes. This is needed because there is no way to unregister the
|
||||
// HTTP Filter after registering it solely for testing purposes using
|
||||
// RegisterRBACHTTPFilterForTesting().
|
||||
//
|
||||
// TODO: Remove this function once the RBAC env var is removed.
|
||||
UnregisterRBACHTTPFilterForTesting func()
|
||||
|
||||
// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
|
||||
ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions)
|
||||
)
|
||||
|
||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||
@ -86,3 +186,9 @@ const (
|
||||
// that supports backend returned by grpclb balancer.
|
||||
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
|
||||
)
|
||||
|
||||
// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
|
||||
//
|
||||
// It currently has an experimental suffix which would be removed once
|
||||
// end-to-end testing of the policy is completed.
|
||||
const RLSLoadBalancingPolicyName = "rls_experimental"
|
||||
|
||||
88
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
88
vendor/google.golang.org/grpc/internal/metadata/metadata.go
generated
vendored
@ -22,6 +22,9 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
@ -30,14 +33,38 @@ type mdKeyType string
|
||||
|
||||
const mdKey = mdKeyType("grpc.internal.address.metadata")
|
||||
|
||||
type mdValue metadata.MD
|
||||
|
||||
func (m mdValue) Equal(o interface{}) bool {
|
||||
om, ok := o.(mdValue)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if len(m) != len(om) {
|
||||
return false
|
||||
}
|
||||
for k, v := range m {
|
||||
ov := om[k]
|
||||
if len(ov) != len(v) {
|
||||
return false
|
||||
}
|
||||
for i, ve := range v {
|
||||
if ov[i] != ve {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Get returns the metadata of addr.
|
||||
func Get(addr resolver.Address) metadata.MD {
|
||||
attrs := addr.Attributes
|
||||
if attrs == nil {
|
||||
return nil
|
||||
}
|
||||
md, _ := attrs.Value(mdKey).(metadata.MD)
|
||||
return md
|
||||
md, _ := attrs.Value(mdKey).(mdValue)
|
||||
return metadata.MD(md)
|
||||
}
|
||||
|
||||
// Set sets (overrides) the metadata in addr.
|
||||
@ -45,6 +72,61 @@ func Get(addr resolver.Address) metadata.MD {
|
||||
// When a SubConn is created with this address, the RPCs sent on it will all
|
||||
// have this metadata.
|
||||
func Set(addr resolver.Address, md metadata.MD) resolver.Address {
|
||||
addr.Attributes = addr.Attributes.WithValues(mdKey, md)
|
||||
addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
|
||||
return addr
|
||||
}
|
||||
|
||||
// Validate validates every pair in md with ValidatePair.
|
||||
func Validate(md metadata.MD) error {
|
||||
for k, vals := range md {
|
||||
if err := ValidatePair(k, vals...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
|
||||
func hasNotPrintable(msg string) bool {
|
||||
// for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(msg); i++ {
|
||||
if msg[i] < 0x20 || msg[i] > 0x7E {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) :
|
||||
//
|
||||
// - key must contain one or more characters.
|
||||
// - the characters in the key must be contained in [0-9 a-z _ - .].
|
||||
// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed.
|
||||
// - the characters in the every value must be printable (in [%x20-%x7E]).
|
||||
func ValidatePair(key string, vals ...string) error {
|
||||
// key should not be empty
|
||||
if key == "" {
|
||||
return fmt.Errorf("there is an empty key in the header")
|
||||
}
|
||||
// pseudo-header will be ignored
|
||||
if key[0] == ':' {
|
||||
return nil
|
||||
}
|
||||
// check key, for i that saving a conversion if not using for range
|
||||
for i := 0; i < len(key); i++ {
|
||||
r := key[i]
|
||||
if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
|
||||
return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
|
||||
}
|
||||
}
|
||||
if strings.HasSuffix(key, "-bin") {
|
||||
return nil
|
||||
}
|
||||
// check value
|
||||
for _, val := range vals {
|
||||
if hasNotPrintable(val) {
|
||||
return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
82
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
Normal file
82
vendor/google.golang.org/grpc/internal/pretty/pretty.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2021 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package pretty defines helper functions to pretty-print structs for logging.
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
protov1 "github.com/golang/protobuf/proto"
|
||||
"google.golang.org/protobuf/encoding/protojson"
|
||||
protov2 "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const jsonIndent = " "
|
||||
|
||||
// ToJSON marshals the input into a json string.
|
||||
//
|
||||
// If marshal fails, it falls back to fmt.Sprintf("%+v").
|
||||
func ToJSON(e interface{}) string {
|
||||
switch ee := e.(type) {
|
||||
case protov1.Message:
|
||||
mm := jsonpb.Marshaler{Indent: jsonIndent}
|
||||
ret, err := mm.MarshalToString(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return ret
|
||||
case protov2.Message:
|
||||
mm := protojson.MarshalOptions{
|
||||
Multiline: true,
|
||||
Indent: jsonIndent,
|
||||
}
|
||||
ret, err := mm.Marshal(ee)
|
||||
if err != nil {
|
||||
// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
|
||||
// messages are not imported, and this will fail because the message
|
||||
// is not found.
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
default:
|
||||
ret, err := json.MarshalIndent(ee, "", jsonIndent)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("%+v", ee)
|
||||
}
|
||||
return string(ret)
|
||||
}
|
||||
}
|
||||
|
||||
// FormatJSON formats the input json bytes with indentation.
|
||||
//
|
||||
// If Indent fails, it returns the unchanged input as string.
|
||||
func FormatJSON(b []byte) string {
|
||||
var out bytes.Buffer
|
||||
err := json.Indent(&out, b, "", jsonIndent)
|
||||
if err != nil {
|
||||
return string(b)
|
||||
}
|
||||
return out.String()
|
||||
}
|
||||
9
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
9
vendor/google.golang.org/grpc/internal/resolver/config_selector.go
generated
vendored
@ -117,9 +117,12 @@ type ClientInterceptor interface {
|
||||
NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
|
||||
}
|
||||
|
||||
// ServerInterceptor is unimplementable; do not use.
|
||||
// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
|
||||
type ServerInterceptor interface {
|
||||
notDefined()
|
||||
// AllowRPC checks if an incoming RPC is allowed to proceed based on
|
||||
// information about connection RPC was received on, and HTTP Headers. This
|
||||
// information will be piped into context.
|
||||
AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
|
||||
}
|
||||
|
||||
type csKeyType string
|
||||
@ -129,7 +132,7 @@ const csKey = csKeyType("grpc.internal.resolver.configSelector")
|
||||
// SetConfigSelector sets the config selector in state and returns the new
|
||||
// state.
|
||||
func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValues(csKey, cs)
|
||||
state.Attributes = state.Attributes.WithValue(csKey, cs)
|
||||
return state
|
||||
}
|
||||
|
||||
|
||||
15
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
15
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
@ -116,7 +116,7 @@ type dnsBuilder struct{}
|
||||
|
||||
// Build creates and starts a DNS resolver that watches the name resolution of the target.
|
||||
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
host, port, err := parseTarget(target.Endpoint, defaultPort)
|
||||
host, port, err := parseTarget(target.Endpoint(), defaultPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
|
||||
disableServiceConfig: opts.DisableServiceConfig,
|
||||
}
|
||||
|
||||
if target.Authority == "" {
|
||||
if target.URL.Host == "" {
|
||||
d.resolver = defaultResolver
|
||||
} else {
|
||||
d.resolver, err = customAuthorityResolver(target.Authority)
|
||||
d.resolver, err = customAuthorityResolver(target.URL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -277,18 +277,13 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||
return newAddrs, nil
|
||||
}
|
||||
|
||||
var filterError = func(err error) error {
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
|
||||
// Timeouts and temporary errors should be communicated to gRPC to
|
||||
// attempt another DNS query (with backoff). Other errors should be
|
||||
// suppressed (they may represent the absence of a TXT record).
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleDNSError(err error, lookupType string) error {
|
||||
err = filterError(err)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
|
||||
logger.Info(err)
|
||||
@ -323,12 +318,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
|
||||
}
|
||||
|
||||
func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
|
||||
var newAddrs []resolver.Address
|
||||
addrs, err := d.resolver.LookupHost(d.ctx, d.host)
|
||||
if err != nil {
|
||||
err = handleDNSError(err, "A")
|
||||
return nil, err
|
||||
}
|
||||
newAddrs := make([]resolver.Address, 0, len(addrs))
|
||||
for _, a := range addrs {
|
||||
ip, ok := formatIP(a)
|
||||
if !ok {
|
||||
|
||||
11
vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
11
vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
generated
vendored
@ -20,13 +20,20 @@
|
||||
// name without scheme back to gRPC as resolved address.
|
||||
package passthrough
|
||||
|
||||
import "google.golang.org/grpc/resolver"
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const scheme = "passthrough"
|
||||
|
||||
type passthroughBuilder struct{}
|
||||
|
||||
func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Endpoint() == "" && opts.Dialer == nil {
|
||||
return nil, errors.New("passthrough: received empty target in Build()")
|
||||
}
|
||||
r := &passthroughResolver{
|
||||
target: target,
|
||||
cc: cc,
|
||||
@ -45,7 +52,7 @@ type passthroughResolver struct {
|
||||
}
|
||||
|
||||
func (r *passthroughResolver) start() {
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
|
||||
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
|
||||
}
|
||||
|
||||
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||
|
||||
21
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
generated
vendored
@ -34,13 +34,24 @@ type builder struct {
|
||||
}
|
||||
|
||||
func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
|
||||
if target.Authority != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority)
|
||||
if target.URL.Host != "" {
|
||||
return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
|
||||
}
|
||||
addr := resolver.Address{Addr: target.Endpoint}
|
||||
|
||||
// gRPC was parsing the dial target manually before PR #4817, and we
|
||||
// switched to using url.Parse() in that PR. To avoid breaking existing
|
||||
// resolver implementations we ended up stripping the leading "/" from the
|
||||
// endpoint. This obviously does not work for the "unix" scheme. Hence we
|
||||
// end up using the parsed URL instead.
|
||||
endpoint := target.URL.Path
|
||||
if endpoint == "" {
|
||||
endpoint = target.URL.Opaque
|
||||
}
|
||||
addr := resolver.Address{Addr: endpoint}
|
||||
if b.scheme == unixAbstractScheme {
|
||||
// prepend "\x00" to address for unix-abstract
|
||||
addr.Addr = "\x00" + addr.Addr
|
||||
// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
|
||||
// not want trailing \0 in address.
|
||||
addr.Addr = "@" + addr.Addr
|
||||
}
|
||||
cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
|
||||
return &nopResolver{}, nil
|
||||
|
||||
130
vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
generated
vendored
Normal file
130
vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package serviceconfig
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Duration defines JSON marshal and unmarshal methods to conform to the
|
||||
// protobuf JSON spec defined [here].
|
||||
//
|
||||
// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
|
||||
type Duration time.Duration
|
||||
|
||||
func (d Duration) String() string {
|
||||
return fmt.Sprint(time.Duration(d))
|
||||
}
|
||||
|
||||
// MarshalJSON converts from d to a JSON string output.
|
||||
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
ns := time.Duration(d).Nanoseconds()
|
||||
sec := ns / int64(time.Second)
|
||||
ns = ns % int64(time.Second)
|
||||
|
||||
var sign string
|
||||
if sec < 0 || ns < 0 {
|
||||
sign, sec, ns = "-", -1*sec, -1*ns
|
||||
}
|
||||
|
||||
// Generated output always contains 0, 3, 6, or 9 fractional digits,
|
||||
// depending on required precision.
|
||||
str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
|
||||
str = strings.TrimSuffix(str, "000")
|
||||
str = strings.TrimSuffix(str, "000")
|
||||
str = strings.TrimSuffix(str, ".000")
|
||||
return []byte(fmt.Sprintf("\"%ss\"", str)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals b as a duration JSON string into d.
|
||||
func (d *Duration) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.HasSuffix(s, "s") {
|
||||
return fmt.Errorf("malformed duration %q: missing seconds unit", s)
|
||||
}
|
||||
neg := false
|
||||
if s[0] == '-' {
|
||||
neg = true
|
||||
s = s[1:]
|
||||
}
|
||||
ss := strings.SplitN(s[:len(s)-1], ".", 3)
|
||||
if len(ss) > 2 {
|
||||
return fmt.Errorf("malformed duration %q: too many decimals", s)
|
||||
}
|
||||
// hasDigits is set if either the whole or fractional part of the number is
|
||||
// present, since both are optional but one is required.
|
||||
hasDigits := false
|
||||
var sec, ns int64
|
||||
if len(ss[0]) > 0 {
|
||||
var err error
|
||||
if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
|
||||
return fmt.Errorf("malformed duration %q: %v", s, err)
|
||||
}
|
||||
// Maximum seconds value per the durationpb spec.
|
||||
const maxProtoSeconds = 315_576_000_000
|
||||
if sec > maxProtoSeconds {
|
||||
return fmt.Errorf("out of range: %q", s)
|
||||
}
|
||||
hasDigits = true
|
||||
}
|
||||
if len(ss) == 2 && len(ss[1]) > 0 {
|
||||
if len(ss[1]) > 9 {
|
||||
return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
|
||||
}
|
||||
var err error
|
||||
if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
|
||||
return fmt.Errorf("malformed duration %q: %v", s, err)
|
||||
}
|
||||
for i := 9; i > len(ss[1]); i-- {
|
||||
ns *= 10
|
||||
}
|
||||
hasDigits = true
|
||||
}
|
||||
if !hasDigits {
|
||||
return fmt.Errorf("malformed duration %q: contains no numbers", s)
|
||||
}
|
||||
|
||||
if neg {
|
||||
sec *= -1
|
||||
ns *= -1
|
||||
}
|
||||
|
||||
// Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
|
||||
const maxSeconds = math.MaxInt64 / int64(time.Second)
|
||||
const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
|
||||
const minSeconds = math.MinInt64 / int64(time.Second)
|
||||
const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
|
||||
|
||||
if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
|
||||
*d = Duration(math.MaxInt64)
|
||||
} else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
|
||||
*d = Duration(math.MinInt64)
|
||||
} else {
|
||||
*d = Duration(sec*int64(time.Second) + ns)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
12
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
|
||||
// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
|
||||
// config. This method iterates through that list in order, and stops at the
|
||||
// first policy that is supported.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
// - If the config for the first supported policy is invalid, the whole service
|
||||
// config is invalid.
|
||||
// - If the list doesn't contain any supported policy, the whole service config
|
||||
// is invalid.
|
||||
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
var ir intermediateBalancerConfig
|
||||
err := json.Unmarshal(b, &ir)
|
||||
@ -78,6 +78,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var names []string
|
||||
for i, lbcfg := range ir {
|
||||
if len(lbcfg) != 1 {
|
||||
return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||
@ -92,6 +93,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
for name, jsonCfg = range lbcfg {
|
||||
}
|
||||
|
||||
names = append(names, name)
|
||||
builder := balancer.Get(name)
|
||||
if builder == nil {
|
||||
// If the balancer is not registered, move on to the next config.
|
||||
@ -120,7 +122,7 @@ func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||
// return. This means we had a loadBalancingConfig slice but did not
|
||||
// encounter a registered policy. The config is considered invalid in this
|
||||
// case.
|
||||
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
|
||||
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
|
||||
}
|
||||
|
||||
// MethodConfig defines the configuration recommended by the service providers for a
|
||||
|
||||
10
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
10
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool {
|
||||
}
|
||||
return proto.Equal(e.s.s, tse.s.s)
|
||||
}
|
||||
|
||||
// IsRestrictedControlPlaneCode returns whether the status includes a code
|
||||
// restricted for control plane usage as defined by gRFC A54.
|
||||
func IsRestrictedControlPlaneCode(s *Status) bool {
|
||||
switch s.Code() {
|
||||
case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
2
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
generated
vendored
@ -1,5 +1,3 @@
|
||||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
|
||||
21
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
21
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
@ -1,4 +1,5 @@
|
||||
// +build !linux appengine
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
*
|
||||
@ -35,41 +36,41 @@ var logger = grpclog.Component("core")
|
||||
|
||||
func log() {
|
||||
once.Do(func() {
|
||||
logger.Info("CPU time info is unavailable on non-linux or appengine environment.")
|
||||
logger.Info("CPU time info is unavailable on non-linux environments.")
|
||||
})
|
||||
}
|
||||
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this process.
|
||||
// It always returns 0 under non-linux or appengine environment.
|
||||
// GetCPUTime returns the how much CPU time has passed since the start of this
|
||||
// process. It always returns 0 under non-linux environments.
|
||||
func GetCPUTime() int64 {
|
||||
log()
|
||||
return 0
|
||||
}
|
||||
|
||||
// Rusage is an empty struct under non-linux or appengine environment.
|
||||
// Rusage is an empty struct under non-linux environments.
|
||||
type Rusage struct{}
|
||||
|
||||
// GetRusage is a no-op function under non-linux or appengine environment.
|
||||
// GetRusage is a no-op function under non-linux environments.
|
||||
func GetRusage() *Rusage {
|
||||
log()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
|
||||
// between two Rusage structs. It a no-op function for non-linux or appengine environment.
|
||||
// between two Rusage structs. It a no-op function for non-linux environments.
|
||||
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
|
||||
log()
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
|
||||
// SetTCPUserTimeout is a no-op function under non-linux environments.
|
||||
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
|
||||
log()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
|
||||
// a negative return value indicates the operation is not supported
|
||||
// GetTCPUserTimeout is a no-op function under non-linux environments.
|
||||
// A negative return value indicates the operation is not supported
|
||||
func GetTCPUserTimeout(conn net.Conn) (int, error) {
|
||||
log()
|
||||
return -1, nil
|
||||
|
||||
145
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
145
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -133,9 +135,11 @@ type cleanupStream struct {
|
||||
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
|
||||
|
||||
type earlyAbortStream struct {
|
||||
httpStatus uint32
|
||||
streamID uint32
|
||||
contentSubtype string
|
||||
status *status.Status
|
||||
rst bool
|
||||
}
|
||||
|
||||
func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
|
||||
@ -189,7 +193,7 @@ type goAway struct {
|
||||
code http2.ErrCode
|
||||
debugData []byte
|
||||
headsUp bool
|
||||
closeConn bool
|
||||
closeConn error // if set, loopyWriter will exit, resulting in conn closure
|
||||
}
|
||||
|
||||
func (*goAway) isTransportResponseFrame() bool { return false }
|
||||
@ -207,6 +211,14 @@ type outFlowControlSizeRequest struct {
|
||||
|
||||
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
|
||||
|
||||
// closeConnection is an instruction to tell the loopy writer to flush the
|
||||
// framer and exit, which will cause the transport's connection to be closed
|
||||
// (by the client or server). The transport itself will close after the reader
|
||||
// encounters the EOF caused by the connection closure.
|
||||
type closeConnection struct{}
|
||||
|
||||
func (closeConnection) isTransportResponseFrame() bool { return false }
|
||||
|
||||
type outStreamState int
|
||||
|
||||
const (
|
||||
@ -406,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) {
|
||||
select {
|
||||
case <-c.ch:
|
||||
case <-c.done:
|
||||
return nil, ErrConnClosing
|
||||
return nil, errors.New("transport closed by client")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -476,12 +488,14 @@ type loopyWriter struct {
|
||||
hEnc *hpack.Encoder // HPACK encoder.
|
||||
bdpEst *bdpEstimator
|
||||
draining bool
|
||||
conn net.Conn
|
||||
logger *grpclog.PrefixLogger
|
||||
|
||||
// Side-specific handlers
|
||||
ssGoAwayHandler func(*goAway) (bool, error)
|
||||
}
|
||||
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
|
||||
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
|
||||
var buf bytes.Buffer
|
||||
l := &loopyWriter{
|
||||
side: s,
|
||||
@ -494,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
bdpEst: bdpEst,
|
||||
conn: conn,
|
||||
logger: logger,
|
||||
}
|
||||
return l
|
||||
}
|
||||
@ -511,23 +527,26 @@ const minBatchSize = 1000
|
||||
// 2. Stream level flow control quota available.
|
||||
//
|
||||
// In each iteration of run loop, other than processing the incoming control
|
||||
// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
|
||||
// This results in writing of HTTP2 frames into an underlying write buffer.
|
||||
// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
|
||||
// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
|
||||
// if the batch size is too low to give stream goroutines a chance to fill it up.
|
||||
// frame, loopy calls processData, which processes one node from the
|
||||
// activeStreams linked-list. This results in writing of HTTP2 frames into an
|
||||
// underlying write buffer. When there's no more control frames to read from
|
||||
// controlBuf, loopy flushes the write buffer. As an optimization, to increase
|
||||
// the batch size for each flush, loopy yields the processor, once if the batch
|
||||
// size is too low to give stream goroutines a chance to fill it up.
|
||||
//
|
||||
// Upon exiting, if the error causing the exit is not an I/O error, run()
|
||||
// flushes and closes the underlying connection. Otherwise, the connection is
|
||||
// left open to allow the I/O error to be encountered by the reader instead.
|
||||
func (l *loopyWriter) run() (err error) {
|
||||
defer func() {
|
||||
if err == ErrConnClosing {
|
||||
// Don't log ErrConnClosing as error since it happens
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter.run returning. %v", err)
|
||||
}
|
||||
err = nil
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Infof("loopyWriter exiting with error: %v", err)
|
||||
}
|
||||
if !isIOError(err) {
|
||||
l.framer.writer.Flush()
|
||||
l.conn.Close()
|
||||
}
|
||||
l.cbuf.finish()
|
||||
}()
|
||||
for {
|
||||
it, err := l.cbuf.get(true)
|
||||
@ -572,7 +591,6 @@ func (l *loopyWriter) run() (err error) {
|
||||
}
|
||||
l.framer.writer.Flush()
|
||||
break hasdata
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -581,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error
|
||||
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
|
||||
func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
|
||||
// Otherwise update the quota.
|
||||
if w.streamID == 0 {
|
||||
l.sendQuota += w.increment
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// Find the stream and update it.
|
||||
if str, ok := l.estdStreams[w.streamID]; ok {
|
||||
@ -593,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
@ -604,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
|
||||
}
|
||||
|
||||
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
|
||||
if err := l.applySettings(s.ss); err != nil {
|
||||
return err
|
||||
}
|
||||
l.applySettings(s.ss)
|
||||
return l.framer.fr.WriteSettingsAck()
|
||||
}
|
||||
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
|
||||
str := &outStream{
|
||||
id: h.streamID,
|
||||
state: empty,
|
||||
@ -618,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
|
||||
wq: h.wq,
|
||||
}
|
||||
l.estdStreams[h.streamID] = str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -653,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
itl: &itemList{},
|
||||
wq: h.wq,
|
||||
}
|
||||
str.itl.enqueue(h)
|
||||
return l.originateStream(str)
|
||||
return l.originateStream(str, h)
|
||||
}
|
||||
|
||||
func (l *loopyWriter) originateStream(str *outStream) error {
|
||||
hdr := str.itl.dequeue().(*headerFrame)
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
if err == ErrConnClosing {
|
||||
return err
|
||||
}
|
||||
// Other errors(errStreamDrain) need not close transport.
|
||||
func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
|
||||
// l.draining is set when handling GoAway. In which case, we want to avoid
|
||||
// creating new streams.
|
||||
if l.draining {
|
||||
// TODO: provide a better error with the reason we are in draining.
|
||||
hdr.onOrphaned(errStreamDrain)
|
||||
return nil
|
||||
}
|
||||
if err := hdr.initStream(str.id); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -680,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
||||
if l.logger.V(logLevel) {
|
||||
l.logger.Warningf("Encountered error while encoding headers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -719,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
func (l *loopyWriter) preprocessData(df *dataFrame) {
|
||||
str, ok := l.estdStreams[df.streamID]
|
||||
if !ok {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
// If we got data for a stream it means that
|
||||
// stream was originated and the headers were sent out.
|
||||
@ -731,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error {
|
||||
str.state = active
|
||||
l.activeStreams.enqueue(str)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
@ -742,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error {
|
||||
|
||||
}
|
||||
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
|
||||
func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
|
||||
o.resp <- l.sendQuota
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
@ -761,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
if l.draining && len(l.estdStreams) == 0 {
|
||||
// Flush and close the connection; we are done with it.
|
||||
return errors.New("finished processing active streams while in draining mode")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -771,9 +785,12 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
|
||||
if l.side == clientSide {
|
||||
return errors.New("earlyAbortStream not handled on client")
|
||||
}
|
||||
|
||||
// In case the caller forgets to set the http status, default to 200.
|
||||
if eas.httpStatus == 0 {
|
||||
eas.httpStatus = 200
|
||||
}
|
||||
headerFields := []hpack.HeaderField{
|
||||
{Name: ":status", Value: "200"},
|
||||
{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
|
||||
{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
|
||||
{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
|
||||
{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
|
||||
@ -782,6 +799,11 @@ func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
|
||||
if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if eas.rst {
|
||||
if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -789,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
|
||||
if l.side == clientSide {
|
||||
l.draining = true
|
||||
if len(l.estdStreams) == 0 {
|
||||
return ErrConnClosing
|
||||
// Flush and close the connection; we are done with it.
|
||||
return errors.New("received GOAWAY with no active streams")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -810,7 +833,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error {
|
||||
func (l *loopyWriter) handle(i interface{}) error {
|
||||
switch i := i.(type) {
|
||||
case *incomingWindowUpdate:
|
||||
return l.incomingWindowUpdateHandler(i)
|
||||
l.incomingWindowUpdateHandler(i)
|
||||
case *outgoingWindowUpdate:
|
||||
return l.outgoingWindowUpdateHandler(i)
|
||||
case *incomingSettings:
|
||||
@ -820,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||||
case *headerFrame:
|
||||
return l.headerHandler(i)
|
||||
case *registerStream:
|
||||
return l.registerStreamHandler(i)
|
||||
l.registerStreamHandler(i)
|
||||
case *cleanupStream:
|
||||
return l.cleanupStreamHandler(i)
|
||||
case *earlyAbortStream:
|
||||
@ -828,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error {
|
||||
case *incomingGoAway:
|
||||
return l.incomingGoAwayHandler(i)
|
||||
case *dataFrame:
|
||||
return l.preprocessData(i)
|
||||
l.preprocessData(i)
|
||||
case *ping:
|
||||
return l.pingHandler(i)
|
||||
case *goAway:
|
||||
return l.goAwayHandler(i)
|
||||
case *outFlowControlSizeRequest:
|
||||
return l.outFlowControlSizeRequestHandler(i)
|
||||
l.outFlowControlSizeRequestHandler(i)
|
||||
case closeConnection:
|
||||
// Just return a non-I/O error and run() will flush and close the
|
||||
// connection.
|
||||
return ErrConnClosing
|
||||
default:
|
||||
return fmt.Errorf("transport: unknown control message type %T", i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
func (l *loopyWriter) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingInitialWindowSize:
|
||||
@ -859,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error {
|
||||
updateHeaderTblSize(l.hEnc, s.Val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processData removes the first stream from active streams, writes out at most 16KB
|
||||
@ -876,9 +903,9 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
|
||||
// A data item is represented by a dataFrame, since it later translates into
|
||||
// multiple HTTP2 data frames.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
|
||||
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
|
||||
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
|
||||
// maximum possilbe HTTP2 frame size.
|
||||
// maximum possible HTTP2 frame size.
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
|
||||
// Client sends out empty data frame with endStream = true
|
||||
@ -893,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
|
||||
return false, nil
|
||||
return false, err
|
||||
}
|
||||
} else {
|
||||
l.activeStreams.enqueue(str)
|
||||
|
||||
6
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/transport/defaults.go
generated
vendored
@ -47,3 +47,9 @@ const (
|
||||
defaultClientMaxHeaderListSize = uint32(16 << 20)
|
||||
defaultServerMaxHeaderListSize = uint32(16 << 20)
|
||||
)
|
||||
|
||||
// MaxStreamID is the upper bound for the stream ID before the current
|
||||
// transport gracefully closes and new transport is created for subsequent RPCs.
|
||||
// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
|
||||
// integer. It's exported so that tests can override it.
|
||||
var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
|
||||
|
||||
4
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
@ -136,12 +136,10 @@ type inFlow struct {
|
||||
|
||||
// newLimit updates the inflow window to a new value n.
|
||||
// It assumes that n is always greater than the old limit.
|
||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
||||
func (f *inFlow) newLimit(n uint32) {
|
||||
f.mu.Lock()
|
||||
d := n - f.limit
|
||||
f.limit = n
|
||||
f.mu.Unlock()
|
||||
return d
|
||||
}
|
||||
|
||||
func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||
|
||||
80
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
80
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@ -39,6 +39,7 @@ import (
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@ -46,24 +47,32 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
|
||||
// inside an http.Handler, or writes an HTTP error to w and returns an error.
|
||||
// It requires that the http Server supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
msg := "gRPC requires HTTP/2"
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
|
||||
http.Error(w, msg, http.StatusUnsupportedMediaType)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
contentSubtype: contentSubtype,
|
||||
stats: stats,
|
||||
}
|
||||
st.logger = prefixLoggerForServerHandlerTransport(st)
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := decodeTimeout(v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
|
||||
msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
for _, v := range vv {
|
||||
v, err := decodeMetadataHeader(k, v)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
|
||||
msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
|
||||
http.Error(w, msg, http.StatusBadRequest)
|
||||
return nil, status.Error(codes.Internal, msg)
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
}
|
||||
@ -138,15 +152,19 @@ type serverHandlerTransport struct {
|
||||
// TODO make sure this is consistent across handler_server and http2_server
|
||||
contentSubtype string
|
||||
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
func (ht *serverHandlerTransport) Close(err error) {
|
||||
ht.closeOnce.Do(func() {
|
||||
if ht.logger.V(logLevel) {
|
||||
ht.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
close(ht.closedCh)
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
@ -228,15 +246,15 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
})
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
for _, sh := range ht.stats {
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
ht.Close(errors.New("finished writing status"))
|
||||
return err
|
||||
}
|
||||
|
||||
@ -314,10 +332,10 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
for _, sh := range ht.stats {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
sh.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
})
|
||||
@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
case <-ht.req.Context().Done():
|
||||
}
|
||||
cancel()
|
||||
ht.Close()
|
||||
ht.Close(errors.New("request is done processing"))
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
@ -369,14 +387,14 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
if ht.stats != nil {
|
||||
s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range ht.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: ht.RemoteAddr(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
ht.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.trReader = &transportReader{
|
||||
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
|
||||
@ -435,17 +453,17 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
|
||||
|
||||
func (ht *serverHandlerTransport) IncrMsgRecv() {}
|
||||
|
||||
func (ht *serverHandlerTransport) Drain() {
|
||||
func (ht *serverHandlerTransport) Drain(debugData string) {
|
||||
panic("Drain() is not implemented")
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * an error from the status package
|
||||
// - io.EOF
|
||||
// - io.ErrUnexpectedEOF
|
||||
// - of type transport.ConnectionError
|
||||
// - an error from the status package
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
|
||||
500
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
500
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -37,8 +38,11 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
icredentials "google.golang.org/grpc/internal/credentials"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
imetadata "google.golang.org/grpc/internal/metadata"
|
||||
istatus "google.golang.org/grpc/internal/status"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/internal/transport/networktype"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@ -56,11 +60,15 @@ var clientConnectionCounter uint64
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
userAgent string
|
||||
// address contains the resolver returned address for this transport.
|
||||
// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
|
||||
// passed to `NewStream`, when determining the :authority header.
|
||||
address resolver.Address
|
||||
md metadata.MD
|
||||
conn net.Conn // underlying communication channel
|
||||
loopy *loopyWriter
|
||||
@ -77,6 +85,7 @@ type http2Client struct {
|
||||
framer *framer
|
||||
// controlBuf delivers all the control related tasks (e.g., window
|
||||
// updates, reset streams, and various settings) to the controller.
|
||||
// Do not access controlBuf with mu held.
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
// The scheme used: https if TLS is on, http otherwise.
|
||||
@ -89,7 +98,7 @@ type http2Client struct {
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
statsHandler stats.Handler
|
||||
statsHandlers []stats.Handler
|
||||
|
||||
initialWindowSize int32
|
||||
|
||||
@ -97,17 +106,15 @@ type http2Client struct {
|
||||
maxSendHeaderListSize *uint32
|
||||
|
||||
bdpEst *bdpEstimator
|
||||
// onPrefaceReceipt is a callback that client transport calls upon
|
||||
// receiving server preface to signal that a succefull HTTP2
|
||||
// connection was established.
|
||||
onPrefaceReceipt func()
|
||||
|
||||
maxConcurrentStreams uint32
|
||||
streamQuota int64
|
||||
streamsQuotaAvailable chan struct{}
|
||||
waitingStreams uint32
|
||||
nextID uint32
|
||||
registeredCompressors string
|
||||
|
||||
// Do not access controlBuf with mu held.
|
||||
mu sync.Mutex // guard the following variables
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
@ -131,28 +138,35 @@ type http2Client struct {
|
||||
kpDormant bool
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
|
||||
onGoAway func(GoAwayReason)
|
||||
onClose func()
|
||||
onClose func(GoAwayReason)
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) {
|
||||
address := addr.Addr
|
||||
networkType, ok := networktype.Get(addr)
|
||||
if fn != nil {
|
||||
// Special handling for unix scheme with custom dialer. Back in the day,
|
||||
// we did not have a unix resolver and therefore targets with a unix
|
||||
// scheme would end up using the passthrough resolver. So, user's used a
|
||||
// custom dialer in this case and expected the original dial target to
|
||||
// be passed to the custom dialer. Now, we have a unix resolver. But if
|
||||
// a custom dialer is specified, we want to retain the old behavior in
|
||||
// terms of the address being passed to the custom dialer.
|
||||
if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
|
||||
// For backward compatibility, if the user dialed "unix:///path",
|
||||
// the passthrough resolver would be used and the user's custom
|
||||
// dialer would see "unix:///path". Since the unix resolver is used
|
||||
// and the address is now "/path", prepend "unix://" so the user's
|
||||
// custom dialer sees the same address.
|
||||
return fn(ctx, "unix://"+address)
|
||||
// Supported unix targets are either "unix://absolute-path" or
|
||||
// "unix:relative-path".
|
||||
if filepath.IsAbs(address) {
|
||||
return fn(ctx, "unix://"+address)
|
||||
}
|
||||
return fn(ctx, "unix:"+address)
|
||||
}
|
||||
return fn(ctx, address)
|
||||
}
|
||||
@ -184,7 +198,7 @@ func isTemporary(err error) bool {
|
||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
@ -193,19 +207,51 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
}
|
||||
}()
|
||||
|
||||
// gRPC, resolver, balancer etc. can specify arbitrary data in the
|
||||
// Attributes field of resolver.Address, which is shoved into connectCtx
|
||||
// and passed to the dialer and credential handshaker. This makes it possible for
|
||||
// address specific arbitrary data to reach custom dialers and credential handshakers.
|
||||
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
|
||||
|
||||
conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
|
||||
if err != nil {
|
||||
if opts.FailOnNonTempDialError {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
|
||||
}
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
|
||||
return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
|
||||
}
|
||||
|
||||
// Any further errors will close the underlying connection
|
||||
defer func(conn net.Conn) {
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}(conn)
|
||||
|
||||
// The following defer and goroutine monitor the connectCtx for cancelation
|
||||
// and deadline. On context expiration, the connection is hard closed and
|
||||
// this function will naturally fail as a result. Otherwise, the defer
|
||||
// waits for the goroutine to exit to prevent the context from being
|
||||
// monitored (and to prevent the connection from ever being closed) after
|
||||
// returning from this function.
|
||||
ctxMonitorDone := grpcsync.NewEvent()
|
||||
newClientCtx, newClientDone := context.WithCancel(connectCtx)
|
||||
defer func() {
|
||||
newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
|
||||
<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
|
||||
}()
|
||||
go func(conn net.Conn) {
|
||||
defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
|
||||
<-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
|
||||
if err := connectCtx.Err(); err != nil {
|
||||
// connectCtx expired before exiting the function. Hard close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Aborting due to connect deadline expiring: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
}
|
||||
}(conn)
|
||||
|
||||
kp := opts.KeepaliveParams
|
||||
// Validate keepalive parameters.
|
||||
if kp.Time == 0 {
|
||||
@ -237,20 +283,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
}
|
||||
}
|
||||
if transportCreds != nil {
|
||||
// gRPC, resolver, balancer etc. can specify arbitrary data in the
|
||||
// Attributes field of resolver.Address, which is shoved into connectCtx
|
||||
// and passed to the credential handshaker. This makes it possible for
|
||||
// address specific arbitrary data to reach the credential handshaker.
|
||||
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
|
||||
rawConn := conn
|
||||
// Pull the deadline from the connectCtx, which will be used for
|
||||
// timeouts in the authentication protocol handshake. Can ignore the
|
||||
// boolean as the deadline will return the zero value, which will make
|
||||
// the conn not timeout on I/O operations.
|
||||
deadline, _ := connectCtx.Deadline()
|
||||
rawConn.SetDeadline(deadline)
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn)
|
||||
rawConn.SetDeadline(time.Time{})
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||||
}
|
||||
@ -288,6 +321,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
ctxDone: ctx.Done(), // Cache Done chan.
|
||||
cancel: cancel,
|
||||
userAgent: opts.UserAgent,
|
||||
registeredCompressors: grpcutil.RegisteredCompressors(),
|
||||
address: addr,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
@ -302,19 +337,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
isSecure: isSecure,
|
||||
perRPCCreds: perRPCCreds,
|
||||
kp: kp,
|
||||
statsHandler: opts.StatsHandler,
|
||||
statsHandlers: opts.StatsHandlers,
|
||||
initialWindowSize: initialWindowSize,
|
||||
onPrefaceReceipt: onPrefaceReceipt,
|
||||
nextID: 1,
|
||||
maxConcurrentStreams: defaultMaxStreamsClient,
|
||||
streamQuota: defaultMaxStreamsClient,
|
||||
streamsQuotaAvailable: make(chan struct{}, 1),
|
||||
czData: new(channelzData),
|
||||
onGoAway: onGoAway,
|
||||
onClose: onClose,
|
||||
keepaliveEnabled: keepaliveEnabled,
|
||||
bufferPool: newBufferPool(),
|
||||
onClose: onClose,
|
||||
}
|
||||
t.logger = prefixLoggerForClientTransport(t)
|
||||
// Add peer information to the http2client context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
if md, ok := addr.Metadata.(*metadata.MD); ok {
|
||||
t.md = *md
|
||||
@ -332,38 +368,50 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.statsHandlers {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if t.keepaliveEnabled {
|
||||
t.kpDormancyCond = sync.NewCond(&t.mu)
|
||||
go t.keepalive()
|
||||
}
|
||||
// Start the reader goroutine for incoming message. Each transport has
|
||||
// a dedicated goroutine which reads HTTP2 frame from network. Then it
|
||||
// dispatches the frame to the corresponding stream entity.
|
||||
go t.reader()
|
||||
|
||||
// Start the reader goroutine for incoming messages. Each transport has a
|
||||
// dedicated goroutine which reads HTTP2 frames from the network. Then it
|
||||
// dispatches the frame to the corresponding stream entity. When the
|
||||
// server preface is received, readerErrCh is closed. If an error occurs
|
||||
// first, an error is pushed to the channel. This must be checked before
|
||||
// returning from this function.
|
||||
readerErrCh := make(chan error, 1)
|
||||
go t.reader(readerErrCh)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = <-readerErrCh
|
||||
}
|
||||
if err != nil {
|
||||
t.Close(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Send connection preface to server.
|
||||
n, err := t.conn.Write(clientPreface)
|
||||
if err != nil {
|
||||
err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
if n != len(clientPreface) {
|
||||
err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
var ss []http2.Setting
|
||||
@ -383,14 +431,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
err = t.framer.fr.WriteSettings(ss...)
|
||||
if err != nil {
|
||||
err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
// Adjust the connection flow control window if needed.
|
||||
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
|
||||
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
|
||||
err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
|
||||
t.Close(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@ -401,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
if err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
// Do not close the transport. Let reader goroutine handle it since
|
||||
// there might be data in the buffers.
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
return t, nil
|
||||
@ -457,7 +494,7 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -493,9 +530,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
|
||||
}
|
||||
|
||||
registeredCompressors := t.registeredCompressors
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||||
// Include the outgoing compressor name when compressor is not registered
|
||||
// via encoding.RegisterCompressor. This is possible when client uses
|
||||
// WithCompressor dial option.
|
||||
if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
|
||||
if registeredCompressors != "" {
|
||||
registeredCompressors += ","
|
||||
}
|
||||
registeredCompressors += callHdr.SendCompress
|
||||
}
|
||||
}
|
||||
|
||||
if registeredCompressors != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
@ -575,11 +625,15 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
|
||||
for _, c := range t.perRPCCreds {
|
||||
data, err := c.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
if _, ok := status.FromError(err); ok {
|
||||
if st, ok := status.FromError(err); ok {
|
||||
// Restrict the code to the list allowed by gRFC A54.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
|
||||
return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
|
||||
}
|
||||
for k, v := range data {
|
||||
// Capital header names are illegal in HTTP/2.
|
||||
@ -604,7 +658,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
}
|
||||
data, err := callCreds.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "transport: %v", err)
|
||||
if st, ok := status.FromError(err); ok {
|
||||
// Restrict the code to the list allowed by gRFC A54.
|
||||
if istatus.IsRestrictedControlPlaneCode(st) {
|
||||
err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
|
||||
}
|
||||
callAuthData = make(map[string]string, len(data))
|
||||
for k, v := range data {
|
||||
@ -616,12 +677,21 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
return callAuthData, nil
|
||||
}
|
||||
|
||||
// NewStreamError wraps an error and reports additional information.
|
||||
// NewStreamError wraps an error and reports additional information. Typically
|
||||
// NewStream errors result in transparent retry, as they mean nothing went onto
|
||||
// the wire. However, there are two notable exceptions:
|
||||
//
|
||||
// 1. If the stream headers violate the max header list size allowed by the
|
||||
// server. It's possible this could succeed on another transport, even if
|
||||
// it's unlikely, but do not transparently retry.
|
||||
// 2. If the credentials errored when requesting their headers. In this case,
|
||||
// it's possible a retry can fix the problem, but indefinitely transparently
|
||||
// retrying is not appropriate as it is likely the credentials, if they can
|
||||
// eventually succeed, would need I/O to do so.
|
||||
type NewStreamError struct {
|
||||
Err error
|
||||
|
||||
DoNotRetry bool
|
||||
PerformedIO bool
|
||||
AllowTransparentRetry bool
|
||||
}
|
||||
|
||||
func (e NewStreamError) Error() string {
|
||||
@ -630,25 +700,23 @@ func (e NewStreamError) Error() string {
|
||||
|
||||
// NewStream creates a stream and registers it into the transport as "active"
|
||||
// streams. All non-nil errors returned will be *NewStreamError.
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
nse, ok := err.(*NewStreamError)
|
||||
if !ok {
|
||||
nse = &NewStreamError{Err: err}
|
||||
}
|
||||
if len(t.perRPCCreds) > 0 || callHdr.Creds != nil {
|
||||
// We may have performed I/O in the per-RPC creds callback, so do not
|
||||
// allow transparent retry.
|
||||
nse.PerformedIO = true
|
||||
}
|
||||
err = nse
|
||||
}
|
||||
}()
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) {
|
||||
ctx = peer.NewContext(ctx, t.getPeer())
|
||||
|
||||
// ServerName field of the resolver returned address takes precedence over
|
||||
// Host field of CallHdr to determine the :authority header. This is because,
|
||||
// the ServerName field takes precedence for server authentication during
|
||||
// TLS handshake, and the :authority header should match the value used
|
||||
// for server authentication.
|
||||
if t.address.ServerName != "" {
|
||||
newCallHdr := *callHdr
|
||||
newCallHdr.Host = t.address.ServerName
|
||||
callHdr = &newCallHdr
|
||||
}
|
||||
|
||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
|
||||
}
|
||||
s := t.newStream(ctx, callHdr)
|
||||
cleanup := func(err error) {
|
||||
@ -670,17 +738,13 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
endStream: false,
|
||||
initStream: func(id uint32) error {
|
||||
t.mu.Lock()
|
||||
if state := t.state; state != reachable {
|
||||
// TODO: handle transport closure in loopy instead and remove this
|
||||
// initStream is never called when transport is draining.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
// Do a quick cleanup.
|
||||
err := error(errStreamDrain)
|
||||
if state == closing {
|
||||
err = ErrConnClosing
|
||||
}
|
||||
cleanup(err)
|
||||
return err
|
||||
cleanup(ErrConnClosing)
|
||||
return ErrConnClosing
|
||||
}
|
||||
t.activeStreams[id] = s
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.streamsStarted, 1)
|
||||
atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
|
||||
@ -697,6 +761,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
}
|
||||
firstTry := true
|
||||
var ch chan struct{}
|
||||
transportDrainRequired := false
|
||||
checkForStreamQuota := func(it interface{}) bool {
|
||||
if t.streamQuota <= 0 { // Can go negative if server decreases it.
|
||||
if firstTry {
|
||||
@ -712,8 +777,20 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
h := it.(*headerFrame)
|
||||
h.streamID = t.nextID
|
||||
t.nextID += 2
|
||||
|
||||
// Drain client transport if nextID > MaxStreamID which signals gRPC that
|
||||
// the connection is closed and a new one must be created for subsequent RPCs.
|
||||
transportDrainRequired = t.nextID > MaxStreamID
|
||||
|
||||
s.id = h.streamID
|
||||
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
|
||||
t.mu.Lock()
|
||||
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
|
||||
t.mu.Unlock()
|
||||
return false // Don't create a stream if the transport is already closed.
|
||||
}
|
||||
t.activeStreams[s.id] = s
|
||||
t.mu.Unlock()
|
||||
if t.streamQuota > 0 && t.waitingStreams > 0 {
|
||||
select {
|
||||
case t.streamsQuotaAvailable <- struct{}{}:
|
||||
@ -739,52 +816,56 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
}
|
||||
for {
|
||||
success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
|
||||
if !checkForStreamQuota(it) {
|
||||
return false
|
||||
}
|
||||
if !checkForHeaderListSize(it) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return checkForHeaderListSize(it) && checkForStreamQuota(it)
|
||||
}, hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Connection closed.
|
||||
return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
|
||||
}
|
||||
if success {
|
||||
break
|
||||
}
|
||||
if hdrListSizeErr != nil {
|
||||
return nil, &NewStreamError{Err: hdrListSizeErr, DoNotRetry: true}
|
||||
return nil, &NewStreamError{Err: hdrListSizeErr}
|
||||
}
|
||||
firstTry = false
|
||||
select {
|
||||
case <-ch:
|
||||
case <-s.ctx.Done():
|
||||
return nil, ContextErr(s.ctx.Err())
|
||||
case <-ctx.Done():
|
||||
return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
|
||||
case <-t.goAway:
|
||||
return nil, errStreamDrain
|
||||
return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
|
||||
case <-t.ctx.Done():
|
||||
return nil, ErrConnClosing
|
||||
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
if len(t.statsHandlers) != 0 {
|
||||
header, ok := metadata.FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
header.Set("user-agent", t.userAgent)
|
||||
} else {
|
||||
header = metadata.Pairs("user-agent", t.userAgent)
|
||||
}
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
for _, sh := range t.statsHandlers {
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
// Note: Creating a new stats object to prevent pollution.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
if transportDrainRequired {
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
|
||||
}
|
||||
t.GracefulClose()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
@ -867,20 +948,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
|
||||
// Close kicks off the shutdown process of the transport. This should be called
|
||||
// only once on a transport. Once it is called, the transport should not be
|
||||
// accessed any more.
|
||||
//
|
||||
// This method blocks until the addrConn that initiated this transport is
|
||||
// re-connected. This happens because t.onClose() begins reconnect logic at the
|
||||
// addrConn level and blocks until the addrConn is successfully connected.
|
||||
func (t *http2Client) Close(err error) {
|
||||
t.mu.Lock()
|
||||
// Make sure we only Close once.
|
||||
// Make sure we only close once.
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Call t.onClose before setting the state to closing to prevent the client
|
||||
// from attempting to create new streams ASAP.
|
||||
t.onClose()
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
// Call t.onClose ASAP to prevent the client from attempting to create new
|
||||
// streams.
|
||||
if t.state != draining {
|
||||
t.onClose(GoAwayInvalid)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
@ -893,9 +975,7 @@ func (t *http2Client) Close(err error) {
|
||||
t.controlBuf.finish()
|
||||
t.cancel()
|
||||
t.conn.Close()
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
}
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
// Append info about previous goaways if there were any, since this may be important
|
||||
// for understanding the root cause for this connection to be closed.
|
||||
_, goAwayDebugMessage := t.GetGoAwayReason()
|
||||
@ -912,11 +992,11 @@ func (t *http2Client) Close(err error) {
|
||||
for _, s := range streams {
|
||||
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
for _, sh := range t.statsHandlers {
|
||||
connEnd := &stats.ConnEnd{
|
||||
Client: true,
|
||||
}
|
||||
t.statsHandler.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
@ -932,11 +1012,15 @@ func (t *http2Client) GracefulClose() {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("GracefulClose called")
|
||||
}
|
||||
t.onClose(GoAwayInvalid)
|
||||
t.state = draining
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close(ErrConnClosing)
|
||||
t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
|
||||
return
|
||||
}
|
||||
t.controlBuf.put(&incomingGoAway{})
|
||||
@ -996,13 +1080,13 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
|
||||
// for the transport and the stream based on the current bdp
|
||||
// estimation.
|
||||
func (t *http2Client) updateFlowControl(n uint32) {
|
||||
t.mu.Lock()
|
||||
for _, s := range t.activeStreams {
|
||||
s.fc.newLimit(n)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
updateIWS := func(interface{}) bool {
|
||||
t.initialWindowSize = int32(n)
|
||||
t.mu.Lock()
|
||||
for _, s := range t.activeStreams {
|
||||
s.fc.newLimit(n)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return true
|
||||
}
|
||||
t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
|
||||
@ -1077,7 +1161,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
||||
}
|
||||
// The server has closed the stream without sending trailers. Record that
|
||||
// the read direction is closed, and set the status appropriately.
|
||||
if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
|
||||
if f.StreamEnded() {
|
||||
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
|
||||
}
|
||||
}
|
||||
@ -1093,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
}
|
||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
@ -1176,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
|
||||
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
|
||||
// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
|
||||
// enabled by default and double the configure KEEPALIVE_TIME used for new connections
|
||||
// on that channel.
|
||||
logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
|
||||
}
|
||||
id := f.LastStreamID
|
||||
if id > 0 && id%2 == 0 {
|
||||
@ -1208,12 +1294,14 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
default:
|
||||
t.setGoAwayReason(f)
|
||||
close(t.goAway)
|
||||
t.controlBuf.put(&incomingGoAway{})
|
||||
defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
|
||||
// Notify the clientconn about the GOAWAY before we set the state to
|
||||
// draining, to allow the client to stop attempting to create streams
|
||||
// before disallowing new streams on this connection.
|
||||
t.onGoAway(t.goAwayReason)
|
||||
t.state = draining
|
||||
if t.state != draining {
|
||||
t.onClose(t.goAwayReason)
|
||||
t.state = draining
|
||||
}
|
||||
}
|
||||
// All streams with IDs greater than the GoAwayId
|
||||
// and smaller than the previous GoAway ID should be killed.
|
||||
@ -1221,24 +1309,35 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
if upperLimit == 0 { // This is the first GoAway Frame.
|
||||
upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
|
||||
}
|
||||
|
||||
t.prevGoAwayID = id
|
||||
if len(t.activeStreams) == 0 {
|
||||
t.mu.Unlock()
|
||||
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
||||
return
|
||||
}
|
||||
|
||||
streamsToClose := make([]*Stream, 0)
|
||||
for streamID, stream := range t.activeStreams {
|
||||
if streamID > id && streamID <= upperLimit {
|
||||
// The stream was unprocessed by the server.
|
||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
||||
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
|
||||
if streamID > id && streamID <= upperLimit {
|
||||
atomic.StoreUint32(&stream.unprocessed, 1)
|
||||
streamsToClose = append(streamsToClose, stream)
|
||||
}
|
||||
}
|
||||
}
|
||||
t.prevGoAwayID = id
|
||||
active := len(t.activeStreams)
|
||||
t.mu.Unlock()
|
||||
if active == 0 {
|
||||
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
|
||||
// Called outside t.mu because closeStream can take controlBuf's mu, which
|
||||
// could induce deadlock and is not allowed.
|
||||
for _, stream := range streamsToClose {
|
||||
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
|
||||
}
|
||||
}
|
||||
|
||||
// setGoAwayReason sets the value of t.goAwayReason based
|
||||
// on the GoAway frame received.
|
||||
// It expects a lock on transport's mutext to be held by
|
||||
// It expects a lock on transport's mutex to be held by
|
||||
// the caller.
|
||||
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
|
||||
t.goAwayReason = GoAwayNoReason
|
||||
@ -1407,26 +1506,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
}
|
||||
|
||||
isHeader := false
|
||||
defer func() {
|
||||
if t.statsHandler != nil {
|
||||
if isHeader {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: s.trailer.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
@ -1448,6 +1527,25 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
close(s.headerChan)
|
||||
}
|
||||
|
||||
for _, sh := range t.statsHandlers {
|
||||
if isHeader {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: metadata.MD(mdata).Copy(),
|
||||
}
|
||||
sh.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
}
|
||||
|
||||
if !endStream {
|
||||
return
|
||||
}
|
||||
@ -1461,33 +1559,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true)
|
||||
}
|
||||
|
||||
// reader runs as a separate goroutine in charge of reading data from network
|
||||
// connection.
|
||||
//
|
||||
// TODO(zhaoq): currently one reader per transport. Investigate whether this is
|
||||
// optimal.
|
||||
// TODO(zhaoq): Check the validity of the incoming frame sequence.
|
||||
func (t *http2Client) reader() {
|
||||
defer close(t.readerDone)
|
||||
// Check the validity of server preface.
|
||||
// readServerPreface reads and handles the initial settings frame from the
|
||||
// server.
|
||||
func (t *http2Client) readServerPreface() error {
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if err != nil {
|
||||
err = connectionErrorf(true, err, "error reading server preface: %v", err)
|
||||
t.Close(err) // this kicks off resetTransport, so must be last before return
|
||||
return
|
||||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
return connectionErrorf(true, err, "error reading server preface: %v", err)
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
// this kicks off resetTransport, so must be last before return
|
||||
t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame))
|
||||
return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
|
||||
}
|
||||
t.handleSettings(sf, true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// reader verifies the server preface and reads all subsequent data from
|
||||
// network connection. If the server preface is not read successfully, an
|
||||
// error is pushed to errCh; otherwise errCh is closed with no error.
|
||||
func (t *http2Client) reader(errCh chan<- error) {
|
||||
defer close(t.readerDone)
|
||||
|
||||
if err := t.readServerPreface(); err != nil {
|
||||
errCh <- err
|
||||
return
|
||||
}
|
||||
t.onPrefaceReceipt()
|
||||
t.handleSettings(sf, true)
|
||||
close(errCh)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
for {
|
||||
@ -1553,7 +1653,7 @@ func minTime(a, b time.Duration) time.Duration {
|
||||
return b
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
|
||||
func (t *http2Client) keepalive() {
|
||||
p := &ping{data: [8]byte{}}
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
@ -1690,3 +1790,9 @@ func (t *http2Client) getOutFlowWindow() int64 {
|
||||
return -2
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) stateForTesting() transportState {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
return t.state
|
||||
}
|
||||
|
||||
466
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
466
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@ -35,12 +35,16 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/internal/pretty"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
@ -52,10 +56,10 @@ import (
|
||||
var (
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
// the stream's state.
|
||||
ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
|
||||
ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
|
||||
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||
// than the limit set by peer.
|
||||
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
||||
ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
|
||||
)
|
||||
|
||||
// serverConnectionCounter counts the number of connections a server has seen
|
||||
@ -73,7 +77,6 @@ type http2Server struct {
|
||||
writerDone chan struct{} // sync point to enable testing.
|
||||
remoteAddr net.Addr
|
||||
localAddr net.Addr
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
inTapHandle tap.ServerInHandle
|
||||
framer *framer
|
||||
@ -83,7 +86,7 @@ type http2Server struct {
|
||||
// updates, reset streams, and various settings) to the controller.
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
stats []stats.Handler
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
// Keepalive enforcement policy.
|
||||
@ -102,13 +105,13 @@ type http2Server struct {
|
||||
|
||||
mu sync.Mutex // guard the following
|
||||
|
||||
// drainChan is initialized when Drain() is called the first time.
|
||||
// After which the server writes out the first GoAway(with ID 2^31-1) frame.
|
||||
// Then an independent goroutine will be launched to later send the second GoAway.
|
||||
// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
|
||||
// Thus call to Drain() will be a no-op if drainChan is already initialized since draining is
|
||||
// already underway.
|
||||
drainChan chan struct{}
|
||||
// drainEvent is initialized when Drain() is called the first time. After
|
||||
// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
|
||||
// an independent goroutine will be launched to later send the second
|
||||
// GoAway. During this time we don't want to write another first GoAway(with
|
||||
// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
|
||||
// already initialized since draining is already underway.
|
||||
drainEvent *grpcsync.Event
|
||||
state transportState
|
||||
activeStreams map[uint32]*Stream
|
||||
// idle is the time instant when the connection went idle.
|
||||
@ -118,21 +121,44 @@ type http2Server struct {
|
||||
idle time.Time
|
||||
|
||||
// Fields below are for channelz metric collection.
|
||||
channelzID int64 // channelz unique identification number
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
|
||||
// maxStreamMu guards the maximum stream ID
|
||||
// This lock may not be taken if mu is already held.
|
||||
maxStreamMu sync.Mutex
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
|
||||
logger *grpclog.PrefixLogger
|
||||
}
|
||||
|
||||
// NewServerTransport creates a http2 transport with conn and configuration
|
||||
// options from config.
|
||||
//
|
||||
// It returns a non-nil transport and a nil error on success. On failure, it
|
||||
// returns a non-nil transport and a nil-error. For a special case where the
|
||||
// returns a nil transport and a non-nil error. For a special case where the
|
||||
// underlying conn gets closed before the client preface could be read, it
|
||||
// returns a nil transport and a nil error.
|
||||
func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
|
||||
var authInfo credentials.AuthInfo
|
||||
rawConn := conn
|
||||
if config.Credentials != nil {
|
||||
var err error
|
||||
conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
|
||||
if err != nil {
|
||||
// ErrConnDispatched means that the connection was dispatched away
|
||||
// from gRPC; those connections should be left open. io.EOF means
|
||||
// the connection was closed before handshaking completed, which can
|
||||
// happen naturally from probers. Return these errors directly.
|
||||
if err == credentials.ErrConnDispatched || err == io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||
}
|
||||
}
|
||||
writeBufSize := config.WriteBufferSize
|
||||
readBufSize := config.ReadBufferSize
|
||||
maxHeaderListSize := defaultServerMaxHeaderListSize
|
||||
@ -211,18 +237,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
if kp.Timeout == 0 {
|
||||
kp.Timeout = defaultServerKeepaliveTimeout
|
||||
}
|
||||
if kp.Time != infinity {
|
||||
if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
|
||||
}
|
||||
}
|
||||
kep := config.KeepalivePolicy
|
||||
if kep.MinTime == 0 {
|
||||
kep.MinTime = defaultKeepalivePolicyMinTime
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
t := &http2Server{
|
||||
ctx: context.Background(),
|
||||
ctx: setConnection(context.Background(), rawConn),
|
||||
done: done,
|
||||
conn: conn,
|
||||
remoteAddr: conn.RemoteAddr(),
|
||||
localAddr: conn.LocalAddr(),
|
||||
authInfo: config.AuthInfo,
|
||||
authInfo: authInfo,
|
||||
framer: framer,
|
||||
readerDone: make(chan struct{}),
|
||||
writerDone: make(chan struct{}),
|
||||
@ -231,7 +263,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
fc: &trInFlow{limit: uint32(icwz)},
|
||||
state: reachable,
|
||||
activeStreams: make(map[uint32]*Stream),
|
||||
stats: config.StatsHandler,
|
||||
stats: config.StatsHandlers,
|
||||
kp: kp,
|
||||
idle: time.Now(),
|
||||
kep: kep,
|
||||
@ -239,6 +271,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
czData: new(channelzData),
|
||||
bufferPool: newBufferPool(),
|
||||
}
|
||||
t.logger = prefixLoggerForServerTransport(t)
|
||||
// Add peer information to the http2server context.
|
||||
t.ctx = peer.NewContext(t.ctx, t.getPeer())
|
||||
|
||||
t.controlBuf = newControlBuffer(t.done)
|
||||
if dynamicWindow {
|
||||
t.bdpEst = &bdpEstimator{
|
||||
@ -246,25 +282,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
updateFlowControl: t.updateFlowControl,
|
||||
}
|
||||
}
|
||||
if t.stats != nil {
|
||||
t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
for _, sh := range t.stats {
|
||||
t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
})
|
||||
connBegin := &stats.ConnBegin{}
|
||||
t.stats.HandleConn(t.ctx, connBegin)
|
||||
sh.HandleConn(t.ctx, connBegin)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
|
||||
t.framer.writer.Flush()
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -273,10 +309,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
if _, err := io.ReadFull(t.conn, preface); err != nil {
|
||||
// In deployments where a gRPC server runs behind a cloud load balancer
|
||||
// which performs regular TCP level health checks, the connection is
|
||||
// closed immediately by the latter. Skipping the error here will help
|
||||
// reduce log clutter.
|
||||
// closed immediately by the latter. Returning io.EOF here allows the
|
||||
// grpc server implementation to recognize this scenario and suppress
|
||||
// logging to reduce spam.
|
||||
if err == io.EOF {
|
||||
return nil, nil
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
|
||||
}
|
||||
@ -299,23 +336,22 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||
t.handleSettings(sf)
|
||||
|
||||
go func() {
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
if err := t.loopy.run(); err != nil {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
t.conn.Close()
|
||||
t.controlBuf.finish()
|
||||
t.loopy.run()
|
||||
close(t.writerDone)
|
||||
}()
|
||||
go t.keepalive()
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||||
// operateHeaders takes action on the decoded headers. Returns an error if fatal
|
||||
// error encountered and transport needs to close, otherwise returns nil.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
|
||||
// Acquire max stream ID lock for entire duration
|
||||
t.maxStreamMu.Lock()
|
||||
defer t.maxStreamMu.Unlock()
|
||||
|
||||
streamID := frame.Header().StreamID
|
||||
|
||||
// frame.Truncated is set to true when framer detects that the current header
|
||||
@ -327,9 +363,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
rstCode: http2.ErrCodeFrameSize,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
// illegal gRPC stream id.
|
||||
return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
|
||||
buf := newRecvBuffer()
|
||||
s := &Stream{
|
||||
id: streamID,
|
||||
@ -337,15 +379,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
buf: buf,
|
||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||
}
|
||||
|
||||
var (
|
||||
// If a gRPC Response-Headers has already been received, then it means
|
||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||
isGRPC = false
|
||||
mdata = make(map[string][]string)
|
||||
httpMethod string
|
||||
// headerError is set if an error is encountered while parsing the headers
|
||||
headerError bool
|
||||
// if false, content-type was missing or invalid
|
||||
isGRPC = false
|
||||
contentType = ""
|
||||
mdata = make(metadata.MD, len(frame.Fields))
|
||||
httpMethod string
|
||||
// these are set if an error is encountered while parsing the headers
|
||||
protocolError bool
|
||||
headerError *status.Status
|
||||
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
@ -356,11 +398,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
|
||||
if !validContentType {
|
||||
contentType = hf.Value
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
s.contentSubtype = contentSubtype
|
||||
isGRPC = true
|
||||
|
||||
case "grpc-accept-encoding":
|
||||
mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
|
||||
if hf.Value == "" {
|
||||
continue
|
||||
}
|
||||
compressors := hf.Value
|
||||
if s.clientAdvertisedCompressors != "" {
|
||||
compressors = s.clientAdvertisedCompressors + "," + compressors
|
||||
}
|
||||
s.clientAdvertisedCompressors = compressors
|
||||
case "grpc-encoding":
|
||||
s.recvCompress = hf.Value
|
||||
case ":method":
|
||||
@ -371,30 +425,90 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
timeoutSet = true
|
||||
var err error
|
||||
if timeout, err = decodeTimeout(hf.Value); err != nil {
|
||||
headerError = true
|
||||
headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
|
||||
}
|
||||
// "Transports must consider requests containing the Connection header
|
||||
// as malformed." - A41
|
||||
case "connection":
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
|
||||
}
|
||||
protocolError = true
|
||||
default:
|
||||
if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
|
||||
break
|
||||
}
|
||||
v, err := decodeMetadataHeader(hf.Name, hf.Value)
|
||||
if err != nil {
|
||||
headerError = true
|
||||
logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
|
||||
t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
|
||||
break
|
||||
}
|
||||
mdata[hf.Name] = append(mdata[hf.Name], v)
|
||||
}
|
||||
}
|
||||
|
||||
if !isGRPC || headerError {
|
||||
// "If multiple Host headers or multiple :authority headers are present, the
|
||||
// request must be rejected with an HTTP status code 400 as required by Host
|
||||
// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
|
||||
// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
|
||||
// error, this takes precedence over a client not speaking gRPC.
|
||||
if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
|
||||
errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
if protocolError {
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeProtocol,
|
||||
onWrite: func() {},
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if !isGRPC {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusUnsupportedMediaType,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
if headerError != nil {
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: http.StatusBadRequest,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: headerError,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
// "If :authority is missing, Host must be renamed to :authority." - A41
|
||||
if len(mdata[":authority"]) == 0 {
|
||||
// No-op if host isn't present, no eventual :authority header is a valid
|
||||
// RPC.
|
||||
if host, ok := mdata["host"]; ok {
|
||||
mdata[":authority"] = host
|
||||
delete(mdata, "host")
|
||||
}
|
||||
} else {
|
||||
// "If :authority is present, Host must be discarded" - A41
|
||||
delete(mdata, "host")
|
||||
}
|
||||
|
||||
if frame.StreamEnded() {
|
||||
@ -406,14 +520,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(t.ctx)
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
s.ctx = peer.NewContext(s.ctx, pr)
|
||||
|
||||
// Attach the received metadata to the context.
|
||||
if len(mdata) > 0 {
|
||||
s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
|
||||
@ -428,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
t.mu.Unlock()
|
||||
@ -439,49 +546,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
onWrite: func() {},
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
}
|
||||
s.cancel()
|
||||
return true
|
||||
}
|
||||
t.maxStreamID = streamID
|
||||
if httpMethod != http.MethodPost {
|
||||
t.mu.Unlock()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod)
|
||||
errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early: %v", errMsg)
|
||||
}
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: streamID,
|
||||
rst: true,
|
||||
rstCode: http2.ErrCodeProtocol,
|
||||
onWrite: func() {},
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 405,
|
||||
streamID: streamID,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: status.New(codes.Internal, errMsg),
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
s.cancel()
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
if t.inTapHandle != nil {
|
||||
var err error
|
||||
if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
|
||||
t.mu.Unlock()
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
|
||||
}
|
||||
stat, ok := status.FromError(err)
|
||||
if !ok {
|
||||
stat = status.New(codes.PermissionDenied, err.Error())
|
||||
}
|
||||
t.controlBuf.put(&earlyAbortStream{
|
||||
httpStatus: 200,
|
||||
streamID: s.id,
|
||||
contentSubtype: s.contentSubtype,
|
||||
status: stat,
|
||||
rst: !frame.StreamEnded(),
|
||||
})
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
}
|
||||
t.activeStreams[streamID] = s
|
||||
@ -497,17 +598,17 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
t.adjustWindow(s, uint32(n))
|
||||
}
|
||||
s.ctx = traceCtx(s.ctx, s.method)
|
||||
if t.stats != nil {
|
||||
s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
for _, sh := range t.stats {
|
||||
s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
|
||||
inHeader := &stats.InHeader{
|
||||
FullMethod: s.method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(mdata).Copy(),
|
||||
Header: mdata.Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
sh.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
s.ctxDone = s.ctx.Done()
|
||||
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
|
||||
@ -528,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
wq: s.wq,
|
||||
})
|
||||
handle(s)
|
||||
return false
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
@ -542,8 +643,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Warningf("Encountered http2.StreamError: %v", se)
|
||||
}
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
@ -561,19 +662,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
continue
|
||||
}
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
}
|
||||
t.Close()
|
||||
t.Close(err)
|
||||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.MetaHeadersFrame:
|
||||
if t.operateHeaders(frame, handle, traceCtx) {
|
||||
t.Close()
|
||||
if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
|
||||
t.Close(err)
|
||||
break
|
||||
}
|
||||
case *http2.DataFrame:
|
||||
@ -589,8 +687,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
case *http2.GoAwayFrame:
|
||||
// TODO: Handle GoAway from the client appropriately.
|
||||
default:
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Received unsupported frame type %T", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -717,7 +815,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
s.write(recvMsg{buffer: buffer})
|
||||
}
|
||||
}
|
||||
if f.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||
if f.StreamEnded() {
|
||||
// Received the end of stream from the client.
|
||||
s.compareAndSwapState(streamActive, streamReadDone)
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
@ -774,8 +872,8 @@ const (
|
||||
|
||||
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
if f.IsAck() {
|
||||
if f.Data == goAwayPing.data && t.drainChan != nil {
|
||||
close(t.drainChan)
|
||||
if f.Data == goAwayPing.data && t.drainEvent != nil {
|
||||
t.drainEvent.Fire()
|
||||
return
|
||||
}
|
||||
// Maybe it's a BDP ping.
|
||||
@ -817,10 +915,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
|
||||
if t.pingStrikes > maxPingStrikes {
|
||||
// Send goaway and close the connection.
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
|
||||
}
|
||||
}
|
||||
|
||||
@ -852,8 +947,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@ -861,12 +956,27 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *http2Server) streamContextErr(s *Stream) error {
|
||||
select {
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
s.hdrMu.Lock()
|
||||
defer s.hdrMu.Unlock()
|
||||
if s.getState() == streamDone {
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
|
||||
if s.updateHeaderSent() {
|
||||
return ErrIllegalHeaderWrite
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
|
||||
if md.Len() > 0 {
|
||||
if s.header.Len() > 0 {
|
||||
s.header = metadata.Join(s.header, md)
|
||||
@ -875,10 +985,8 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return err
|
||||
return status.Convert(err).Err()
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -909,14 +1017,14 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
t.closeStream(s, true, http2.ErrCodeInternal, false)
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
// Note: Headers are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
sh.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -926,17 +1034,19 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||
// OK is adopted.
|
||||
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
s.hdrMu.Lock()
|
||||
defer s.hdrMu.Unlock()
|
||||
|
||||
if s.getState() == streamDone {
|
||||
return nil
|
||||
}
|
||||
s.hdrMu.Lock()
|
||||
|
||||
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
|
||||
if !s.updateHeaderSent() { // No headers have been sent.
|
||||
if len(s.header) > 0 { // Send a separate header frame.
|
||||
if err := t.writeHeaderLocked(s); err != nil {
|
||||
s.hdrMu.Unlock()
|
||||
return err
|
||||
}
|
||||
} else { // Send a trailer only response.
|
||||
@ -951,7 +1061,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
|
||||
} else {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
}
|
||||
@ -965,7 +1075,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
endStream: true,
|
||||
onWrite: t.setResetPingStrikes,
|
||||
}
|
||||
s.hdrMu.Unlock()
|
||||
|
||||
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
|
||||
if !success {
|
||||
if err != nil {
|
||||
@ -977,10 +1087,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
// Send a RST_STREAM after the trailers if the client has not already half-closed.
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
sh.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
@ -992,23 +1102,12 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
if !s.isHeaderSent() { // Headers haven't been written yet.
|
||||
if err := t.WriteHeader(s, nil); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
return err
|
||||
}
|
||||
// TODO(mmukhi, dfawley): Make sure this is the right code to return.
|
||||
return status.Errorf(codes.Internal, "transport: %v", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Writing headers checks for this condition.
|
||||
if s.getState() == streamDone {
|
||||
// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
|
||||
s.cancel()
|
||||
select {
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
}
|
||||
df := &dataFrame{
|
||||
@ -1018,12 +1117,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
onEachWrite: t.setResetPingStrikes,
|
||||
}
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
select {
|
||||
case <-t.done:
|
||||
return ErrConnClosing
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
return t.streamContextErr(s)
|
||||
}
|
||||
return t.controlBuf.put(df)
|
||||
}
|
||||
@ -1072,20 +1166,20 @@ func (t *http2Server) keepalive() {
|
||||
if val <= 0 {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.Drain()
|
||||
t.Drain("max_idle")
|
||||
return
|
||||
}
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.Drain()
|
||||
t.Drain("max_age")
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing server transport due to maximum connection age")
|
||||
}
|
||||
t.Close()
|
||||
t.controlBuf.put(closeConnection{})
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
@ -1101,10 +1195,7 @@ func (t *http2Server) keepalive() {
|
||||
continue
|
||||
}
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to idleness.")
|
||||
}
|
||||
t.Close()
|
||||
t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
|
||||
return
|
||||
}
|
||||
if !outstandingPing {
|
||||
@ -1131,40 +1222,37 @@ func (t *http2Server) keepalive() {
|
||||
// Close starts shutting down the http2Server transport.
|
||||
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
|
||||
// could cause some resource issue. Revisit this later.
|
||||
func (t *http2Server) Close() {
|
||||
func (t *http2Server) Close(err error) {
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
t.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if t.logger.V(logLevel) {
|
||||
t.logger.Infof("Closing: %v", err)
|
||||
}
|
||||
t.state = closing
|
||||
streams := t.activeStreams
|
||||
t.activeStreams = nil
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.finish()
|
||||
close(t.done)
|
||||
if err := t.conn.Close(); err != nil && logger.V(logLevel) {
|
||||
logger.Infof("transport: error closing conn during Close: %v", err)
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
|
||||
t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
|
||||
}
|
||||
channelz.RemoveEntry(t.channelzID)
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.cancel()
|
||||
}
|
||||
if t.stats != nil {
|
||||
for _, sh := range t.stats {
|
||||
connEnd := &stats.ConnEnd{}
|
||||
t.stats.HandleConn(t.ctx, connEnd)
|
||||
sh.HandleConn(t.ctx, connEnd)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteStream deletes the stream s from transport's active streams.
|
||||
func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
t.mu.Lock()
|
||||
if _, ok := t.activeStreams[s.id]; ok {
|
||||
@ -1186,6 +1274,11 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
|
||||
|
||||
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
|
||||
func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
oldState := s.swapState(streamDone)
|
||||
if oldState == streamDone {
|
||||
// If the stream was already done, return.
|
||||
@ -1205,6 +1298,11 @@ func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, h
|
||||
|
||||
// closeStream clears the footprint of a stream when the stream is not needed any more.
|
||||
func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
|
||||
s.swapState(streamDone)
|
||||
t.deleteStream(s, eosReceived)
|
||||
|
||||
@ -1220,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr {
|
||||
return t.remoteAddr
|
||||
}
|
||||
|
||||
func (t *http2Server) Drain() {
|
||||
func (t *http2Server) Drain(debugData string) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.drainChan != nil {
|
||||
if t.drainEvent != nil {
|
||||
return
|
||||
}
|
||||
t.drainChan = make(chan struct{})
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true})
|
||||
t.drainEvent = grpcsync.NewEvent()
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
|
||||
}
|
||||
|
||||
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
@ -1235,39 +1333,41 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||
// Handles outgoing GoAway and returns true if loopy needs to put itself
|
||||
// in draining mode.
|
||||
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
t.maxStreamMu.Lock()
|
||||
t.mu.Lock()
|
||||
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
// The transport is closing.
|
||||
return false, ErrConnClosing
|
||||
}
|
||||
sid := t.maxStreamID
|
||||
if !g.headsUp {
|
||||
// Stop accepting more streams now.
|
||||
t.state = draining
|
||||
sid := t.maxStreamID
|
||||
retErr := g.closeConn
|
||||
if len(t.activeStreams) == 0 {
|
||||
g.closeConn = true
|
||||
retErr = errors.New("second GOAWAY written and no active streams left to process")
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if g.closeConn {
|
||||
// Abruptly close the connection following the GoAway (via
|
||||
// loopywriter). But flush out what's inside the buffer first.
|
||||
t.framer.writer.Flush()
|
||||
return false, fmt.Errorf("transport: Connection closing")
|
||||
if retErr != nil {
|
||||
return false, retErr
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
t.mu.Unlock()
|
||||
t.maxStreamMu.Unlock()
|
||||
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
|
||||
// Follow that with a ping and wait for the ack to come back or a timer
|
||||
// to expire. During this time accept new streams since they might have
|
||||
// originated before the GoAway reaches the client.
|
||||
// After getting the ack or timer expiration send out another GoAway this
|
||||
// time with an ID of the max stream server intends to process.
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
|
||||
if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
|
||||
@ -1277,7 +1377,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-t.drainChan:
|
||||
case <-t.drainEvent.Done():
|
||||
case <-timer.C:
|
||||
case <-t.done:
|
||||
return
|
||||
@ -1336,6 +1436,13 @@ func (t *http2Server) getOutFlowWindow() int64 {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *http2Server) getPeer() *peer.Peer {
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo, // Can be nil
|
||||
}
|
||||
}
|
||||
|
||||
func getJitter(v time.Duration) time.Duration {
|
||||
if v == infinity {
|
||||
return 0
|
||||
@ -1345,3 +1452,18 @@ func getJitter(v time.Duration) time.Duration {
|
||||
j := grpcrand.Int63n(2*r) - r
|
||||
return time.Duration(j)
|
||||
}
|
||||
|
||||
type connectionKey struct{}
|
||||
|
||||
// GetConnection gets the connection from the context.
|
||||
func GetConnection(ctx context.Context) net.Conn {
|
||||
conn, _ := ctx.Value(connectionKey{}).(net.Conn)
|
||||
return conn
|
||||
}
|
||||
|
||||
// SetConnection adds the connection to the context to be able to get
|
||||
// information about the destination ip and port for an incoming RPC. This also
|
||||
// allows any unary or streaming interceptors to see the connection.
|
||||
func setConnection(ctx context.Context, conn net.Conn) context.Context {
|
||||
return context.WithValue(ctx, connectionKey{}, conn)
|
||||
}
|
||||
|
||||
60
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
60
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
@ -20,8 +20,8 @@ package transport
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@ -38,21 +38,14 @@ import (
|
||||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
// https://httpwg.org/specs/rfc7540.html#SettingValues
|
||||
http2InitHeaderTableSize = 4096
|
||||
// baseContentType is the base content-type for gRPC. This is a valid
|
||||
// content-type on it's own, but can also include a content-subtype such as
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
|
||||
)
|
||||
|
||||
var (
|
||||
@ -92,7 +85,6 @@ var (
|
||||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
logger = grpclog.Component("transport")
|
||||
)
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
@ -257,13 +249,13 @@ func encodeGrpcMessage(msg string) string {
|
||||
}
|
||||
|
||||
func encodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
var sb strings.Builder
|
||||
for len(msg) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(msg)
|
||||
for _, b := range []byte(string(r)) {
|
||||
if size > 1 {
|
||||
// If size > 1, r is not ascii. Always do percent encoding.
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
fmt.Fprintf(&sb, "%%%02X", b)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -272,14 +264,14 @@ func encodeGrpcMessageUnchecked(msg string) string {
|
||||
//
|
||||
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
|
||||
if b >= spaceByte && b <= tildeByte && b != percentByte {
|
||||
buf.WriteByte(b)
|
||||
sb.WriteByte(b)
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf("%%%02X", b))
|
||||
fmt.Fprintf(&sb, "%%%02X", b)
|
||||
}
|
||||
}
|
||||
msg = msg[size:]
|
||||
}
|
||||
return buf.String()
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
|
||||
@ -297,23 +289,23 @@ func decodeGrpcMessage(msg string) string {
|
||||
}
|
||||
|
||||
func decodeGrpcMessageUnchecked(msg string) string {
|
||||
var buf bytes.Buffer
|
||||
var sb strings.Builder
|
||||
lenMsg := len(msg)
|
||||
for i := 0; i < lenMsg; i++ {
|
||||
c := msg[i]
|
||||
if c == percentByte && i+2 < lenMsg {
|
||||
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
|
||||
if err != nil {
|
||||
buf.WriteByte(c)
|
||||
sb.WriteByte(c)
|
||||
} else {
|
||||
buf.WriteByte(byte(parsed))
|
||||
sb.WriteByte(byte(parsed))
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
buf.WriteByte(c)
|
||||
sb.WriteByte(c)
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
type bufWriter struct {
|
||||
@ -322,8 +314,6 @@ type bufWriter struct {
|
||||
batchSize int
|
||||
conn net.Conn
|
||||
err error
|
||||
|
||||
onFlush func()
|
||||
}
|
||||
|
||||
func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
|
||||
@ -339,7 +329,8 @@ func (w *bufWriter) Write(b []byte) (n int, err error) {
|
||||
return 0, w.err
|
||||
}
|
||||
if w.batchSize == 0 { // Buffer has been disabled.
|
||||
return w.conn.Write(b)
|
||||
n, err = w.conn.Write(b)
|
||||
return n, toIOError(err)
|
||||
}
|
||||
for len(b) > 0 {
|
||||
nn := copy(w.buf[w.offset:], b)
|
||||
@ -360,14 +351,31 @@ func (w *bufWriter) Flush() error {
|
||||
if w.offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if w.onFlush != nil {
|
||||
w.onFlush()
|
||||
}
|
||||
_, w.err = w.conn.Write(w.buf[:w.offset])
|
||||
w.err = toIOError(w.err)
|
||||
w.offset = 0
|
||||
return w.err
|
||||
}
|
||||
|
||||
type ioError struct {
|
||||
error
|
||||
}
|
||||
|
||||
func (i ioError) Unwrap() error {
|
||||
return i.error
|
||||
}
|
||||
|
||||
func isIOError(err error) bool {
|
||||
return errors.As(err, &ioError{})
|
||||
}
|
||||
|
||||
func toIOError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return ioError{error: err}
|
||||
}
|
||||
|
||||
type framer struct {
|
||||
writer *bufWriter
|
||||
fr *http2.Framer
|
||||
|
||||
40
vendor/google.golang.org/grpc/internal/transport/logging.go
generated
vendored
Normal file
40
vendor/google.golang.org/grpc/internal/transport/logging.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2023 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("transport")
|
||||
|
||||
func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
|
||||
}
|
||||
|
||||
func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
|
||||
}
|
||||
|
||||
func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
|
||||
return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
|
||||
}
|
||||
2
vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
generated
vendored
@ -31,7 +31,7 @@ const key = keyType("grpc.internal.transport.networktype")
|
||||
|
||||
// Set returns a copy of the provided address with attributes containing networkType.
|
||||
func Set(address resolver.Address, networkType string) resolver.Address {
|
||||
address.Attributes = address.Attributes.WithValues(key, networkType)
|
||||
address.Attributes = address.Attributes.WithValue(key, networkType)
|
||||
return address
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user