mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 14:09:58 +00:00
TUN-3375: Upgrade coredns and prometheus dependencies
This commit is contained in:
76
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
76
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
@@ -505,7 +505,9 @@ func (l *loopyWriter) run() (err error) {
|
||||
// 1. When the connection is closed by some other known issue.
|
||||
// 2. User closed the connection.
|
||||
// 3. A graceful close of connection.
|
||||
infof("transport: loopyWriter.run returning. %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: loopyWriter.run returning. %v", err)
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
}()
|
||||
@@ -605,7 +607,9 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
|
||||
if l.side == serverSide {
|
||||
str, ok := l.estdStreams[h.streamID]
|
||||
if !ok {
|
||||
warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Case 1.A: Server is responding back with headers.
|
||||
@@ -658,7 +662,9 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He
|
||||
l.hBuf.Reset()
|
||||
for _, f := range hf {
|
||||
if err := l.hEnc.WriteField(f); err != nil {
|
||||
warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
var (
|
||||
@@ -857,38 +863,45 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
var (
|
||||
idx int
|
||||
buf []byte
|
||||
)
|
||||
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
idx = 1
|
||||
buf = dataItem.d
|
||||
}
|
||||
size := http2MaxFrameLen
|
||||
if len(buf) < size {
|
||||
size = len(buf)
|
||||
}
|
||||
// Figure out the maximum size we can send
|
||||
maxSize := http2MaxFrameLen
|
||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||
str.state = waitingOnStreamQuota
|
||||
return false, nil
|
||||
} else if strQuota < size {
|
||||
size = strQuota
|
||||
} else if maxSize > strQuota {
|
||||
maxSize = strQuota
|
||||
}
|
||||
if maxSize > int(l.sendQuota) { // connection-level flow control.
|
||||
maxSize = int(l.sendQuota)
|
||||
}
|
||||
// Compute how much of the header and data we can send within quota and max frame length
|
||||
hSize := min(maxSize, len(dataItem.h))
|
||||
dSize := min(maxSize-hSize, len(dataItem.d))
|
||||
if hSize != 0 {
|
||||
if dSize == 0 {
|
||||
buf = dataItem.h
|
||||
} else {
|
||||
// We can add some data to grpc message header to distribute bytes more equally across frames.
|
||||
// Copy on the stack to avoid generating garbage
|
||||
var localBuf [http2MaxFrameLen]byte
|
||||
copy(localBuf[:hSize], dataItem.h)
|
||||
copy(localBuf[hSize:], dataItem.d[:dSize])
|
||||
buf = localBuf[:hSize+dSize]
|
||||
}
|
||||
} else {
|
||||
buf = dataItem.d
|
||||
}
|
||||
|
||||
if l.sendQuota < uint32(size) { // connection-level flow control.
|
||||
size = int(l.sendQuota)
|
||||
}
|
||||
size := hSize + dSize
|
||||
|
||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||
str.wq.replenish(size)
|
||||
var endStream bool
|
||||
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||
if dataItem.endStream && size == len(buf) {
|
||||
// buf contains either data or it contains header but data is empty.
|
||||
if idx == 1 || len(dataItem.d) == 0 {
|
||||
endStream = true
|
||||
}
|
||||
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
|
||||
endStream = true
|
||||
}
|
||||
if dataItem.onEachWrite != nil {
|
||||
dataItem.onEachWrite()
|
||||
@@ -896,14 +909,10 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||
return false, err
|
||||
}
|
||||
buf = buf[size:]
|
||||
str.bytesOutStanding += size
|
||||
l.sendQuota -= uint32(size)
|
||||
if idx == 0 {
|
||||
dataItem.h = buf
|
||||
} else {
|
||||
dataItem.d = buf
|
||||
}
|
||||
dataItem.h = dataItem.h[hSize:]
|
||||
dataItem.d = dataItem.d[dSize:]
|
||||
|
||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||
str.itl.dequeue()
|
||||
@@ -924,3 +933,10 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
86
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
86
vendor/google.golang.org/grpc/internal/transport/handler_server.go
generated
vendored
@@ -39,6 +39,7 @@ import (
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
@@ -57,7 +58,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
}
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
// TODO: do we assume contentType is lowercase? we did before
|
||||
contentSubtype, validContentType := contentSubtype(contentType)
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
|
||||
if !validContentType {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
@@ -112,11 +113,10 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
didCommonHeaders bool
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
@@ -186,8 +186,11 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
ht.writeStatusMu.Lock()
|
||||
defer ht.writeStatusMu.Unlock()
|
||||
|
||||
headersWritten := s.updateHeaderSent()
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
@@ -227,21 +230,27 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
|
||||
|
||||
if err == nil { // transport has not been closed
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
}
|
||||
ht.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// writePendingHeaders sets common and custom headers on the first
|
||||
// write call (Write, WriteHeader, or WriteStatus)
|
||||
func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.writeCustomHeaders(s)
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
if ht.didCommonHeaders {
|
||||
return
|
||||
}
|
||||
ht.didCommonHeaders = true
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", ht.contentType)
|
||||
@@ -260,9 +269,30 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
}
|
||||
}
|
||||
|
||||
// writeCustomHeaders sets custom headers set on the stream via SetHeader
|
||||
// on the first write call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
|
||||
h := ht.rw.Header()
|
||||
|
||||
s.hdrMu.Lock()
|
||||
for k, vv := range s.header {
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
h.Add(k, encodeMetadataHeader(k, v))
|
||||
}
|
||||
}
|
||||
|
||||
s.hdrMu.Unlock()
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
|
||||
headersWritten := s.updateHeaderSent()
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
ht.rw.Write(hdr)
|
||||
ht.rw.Write(data)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
@@ -270,26 +300,28 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if err := s.SetHeader(md); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headersWritten := s.updateHeaderSent()
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
v = encodeMetadataHeader(k, v)
|
||||
h.Add(k, v)
|
||||
}
|
||||
if !headersWritten {
|
||||
ht.writePendingHeaders(s)
|
||||
}
|
||||
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
if ht.stats != nil {
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
|
||||
Header: md.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
})
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -334,7 +366,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
|
||||
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
|
||||
}
|
||||
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
|
||||
s.ctx = peer.NewContext(ctx, pr)
|
||||
|
187
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
187
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
@@ -32,20 +32,29 @@ import (
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/syscall"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// clientConnectionCounter counts the number of connections a client has
|
||||
// initiated (equal to the number of http2Clients created). Must be accessed
|
||||
// atomically.
|
||||
var clientConnectionCounter uint64
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ctxDone <-chan struct{} // Cache the ctx.Done() chan.
|
||||
@@ -75,9 +84,6 @@ type http2Client struct {
|
||||
|
||||
perRPCCreds []credentials.PerRPCCredentials
|
||||
|
||||
// Boolean to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
kp keepalive.ClientParameters
|
||||
keepaliveEnabled bool
|
||||
|
||||
@@ -127,6 +133,8 @@ type http2Client struct {
|
||||
onClose func()
|
||||
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
|
||||
@@ -155,7 +163,7 @@ func isTemporary(err error) bool {
|
||||
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
|
||||
// and starts to receive messages on it. Non-nil error returns if construction
|
||||
// fails.
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
|
||||
scheme := "http"
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer func() {
|
||||
@@ -208,12 +216,20 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}
|
||||
if transportCreds != nil {
|
||||
scheme = "https"
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
|
||||
// gRPC, resolver, balancer etc. can specify arbitrary data in the
|
||||
// Attributes field of resolver.Address, which is shoved into connectCtx
|
||||
// and passed to the credential handshaker. This makes it possible for
|
||||
// address specific arbitrary data to reach the credential handshaker.
|
||||
contextWithHandshakeInfo := internal.NewClientHandshakeInfoContext.(func(context.Context, credentials.ClientHandshakeInfo) context.Context)
|
||||
connectCtx = contextWithHandshakeInfo(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
|
||||
conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
|
||||
}
|
||||
isSecure = true
|
||||
if transportCreds.Info().SecurityProtocol == "tls" {
|
||||
scheme = "https"
|
||||
}
|
||||
}
|
||||
dynamicWindow := true
|
||||
icwz := int32(initialWindowSize)
|
||||
@@ -330,6 +346,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
}
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
|
||||
|
||||
if err := t.framer.writer.Flush(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -337,7 +355,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
err := t.loopy.run()
|
||||
if err != nil {
|
||||
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
// If it's a connection error, let reader goroutine handle it
|
||||
// since there might be data in the buffers.
|
||||
@@ -352,6 +372,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
|
||||
s := &Stream{
|
||||
ct: t,
|
||||
done: make(chan struct{}),
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
@@ -385,23 +406,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
}
|
||||
|
||||
func (t *http2Client) getPeer() *peer.Peer {
|
||||
pr := &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
return &peer.Peer{
|
||||
Addr: t.remoteAddr,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
return pr
|
||||
}
|
||||
|
||||
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
|
||||
aud := t.createAudience(callHdr)
|
||||
authData, err := t.getTrAuthData(ctx, aud)
|
||||
ri := credentials.RequestInfo{
|
||||
Method: callHdr.Method,
|
||||
AuthInfo: t.authInfo,
|
||||
}
|
||||
ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
|
||||
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
|
||||
callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -415,7 +437,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||
if callHdr.PreviousAttempts > 0 {
|
||||
@@ -424,12 +446,13 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if dl, ok := ctx.Deadline(); ok {
|
||||
// Send out timeout regardless its value. The server can detect timeout context by itself.
|
||||
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
|
||||
timeout := time.Until(dl)
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
|
||||
}
|
||||
for k, v := range authData {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
|
||||
@@ -543,13 +566,26 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call
|
||||
return callAuthData, nil
|
||||
}
|
||||
|
||||
// PerformedIOError wraps an error to indicate IO may have been performed
|
||||
// before the error occurred.
|
||||
type PerformedIOError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error implements error.
|
||||
func (p PerformedIOError) Error() string {
|
||||
return p.Err.Error()
|
||||
}
|
||||
|
||||
// NewStream creates a stream and registers it into the transport as "active"
|
||||
// streams.
|
||||
func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
|
||||
ctx = peer.NewContext(ctx, t.getPeer())
|
||||
headerFields, err := t.createHeaderFields(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// We may have performed I/O in the per-RPC creds callback, so do not
|
||||
// allow transparent retry.
|
||||
return nil, PerformedIOError{err}
|
||||
}
|
||||
s := t.newStream(ctx, callHdr)
|
||||
cleanup := func(err error) {
|
||||
@@ -669,12 +705,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
||||
}
|
||||
}
|
||||
if t.statsHandler != nil {
|
||||
header, ok := metadata.FromOutgoingContext(ctx)
|
||||
if ok {
|
||||
header.Set("user-agent", t.userAgent)
|
||||
} else {
|
||||
header = metadata.Pairs("user-agent", t.userAgent)
|
||||
}
|
||||
// Note: The header fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Client: true,
|
||||
FullMethod: callHdr.Method,
|
||||
RemoteAddr: t.remoteAddr,
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: callHdr.SendCompress,
|
||||
Header: header,
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, outHeader)
|
||||
}
|
||||
@@ -834,18 +879,10 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
df := &dataFrame{
|
||||
streamID: s.id,
|
||||
endStream: opts.Last,
|
||||
h: hdr,
|
||||
d: data,
|
||||
}
|
||||
if hdr != nil || data != nil { // If it's not an empty data frame.
|
||||
// Add some data to grpc message header so that we can equally
|
||||
// distribute bytes across frames.
|
||||
emptyLen := http2MaxFrameLen - len(hdr)
|
||||
if emptyLen > len(data) {
|
||||
emptyLen = len(data)
|
||||
}
|
||||
hdr = append(hdr, data[:emptyLen]...)
|
||||
data = data[emptyLen:]
|
||||
df.h, df.d = hdr, data
|
||||
// TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
|
||||
if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
|
||||
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -979,7 +1016,9 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
}
|
||||
statusCode, ok := http2ErrConvTab[f.ErrCode]
|
||||
if !ok {
|
||||
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
|
||||
}
|
||||
statusCode = codes.Unknown
|
||||
}
|
||||
if statusCode == codes.Canceled {
|
||||
@@ -1061,7 +1100,9 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
return
|
||||
}
|
||||
if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
|
||||
infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
|
||||
}
|
||||
}
|
||||
id := f.LastStreamID
|
||||
if id > 0 && id%2 != 1 {
|
||||
@@ -1175,14 +1216,17 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
if t.statsHandler != nil {
|
||||
if isHeader {
|
||||
inHeader := &stats.InHeader{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.recvCompress,
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inHeader)
|
||||
} else {
|
||||
inTrailer := &stats.InTrailer{
|
||||
Client: true,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Trailer: s.trailer.Copy(),
|
||||
}
|
||||
t.statsHandler.HandleRPC(s.ctx, inTrailer)
|
||||
}
|
||||
@@ -1191,6 +1235,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
|
||||
// If headerChan hasn't been closed yet
|
||||
if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
|
||||
s.headerValid = true
|
||||
if !endStream {
|
||||
// HEADERS frame block carries a Response-Headers.
|
||||
isHeader = true
|
||||
@@ -1233,7 +1278,7 @@ func (t *http2Client) reader() {
|
||||
}
|
||||
t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
@@ -1248,7 +1293,7 @@ func (t *http2Client) reader() {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
if t.keepaliveEnabled {
|
||||
atomic.CompareAndSwapUint32(&t.activity, 0, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
}
|
||||
if err != nil {
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
@@ -1287,22 +1332,48 @@ func (t *http2Client) reader() {
|
||||
case *http2.WindowUpdateFrame:
|
||||
t.handleWindowUpdate(frame)
|
||||
default:
|
||||
errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func minTime(a, b time.Duration) time.Duration {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
||||
func (t *http2Client) keepalive() {
|
||||
p := &ping{data: [8]byte{}}
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
timeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
timer := time.NewTimer(t.kp.Time)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were here.
|
||||
outstandingPing = false
|
||||
// Next timer should fire at kp.Time seconds from lastRead time.
|
||||
timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
if outstandingPing && timeoutLeft <= 0 {
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
if t.state == closing {
|
||||
// If the transport is closing, we should exit from the
|
||||
@@ -1315,36 +1386,36 @@ func (t *http2Client) keepalive() {
|
||||
return
|
||||
}
|
||||
if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream {
|
||||
// If a ping was sent out previously (because there were active
|
||||
// streams at that point) which wasn't acked and its timeout
|
||||
// hadn't fired, but we got here and are about to go dormant,
|
||||
// we should make sure that we unconditionally send a ping once
|
||||
// we awaken.
|
||||
outstandingPing = false
|
||||
t.kpDormant = true
|
||||
t.kpDormancyCond.Wait()
|
||||
}
|
||||
t.kpDormant = false
|
||||
t.mu.Unlock()
|
||||
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
// We get here either because we were dormant and a new stream was
|
||||
// created which unblocked the Wait() call, or because the
|
||||
// keepalive timer expired. In both cases, we need to send a ping.
|
||||
t.controlBuf.put(p)
|
||||
|
||||
timer.Reset(t.kp.Timeout)
|
||||
select {
|
||||
case <-timer.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
timer.Reset(t.kp.Time)
|
||||
continue
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
infof("transport: closing client transport due to idleness.")
|
||||
t.Close()
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
return
|
||||
t.controlBuf.put(p)
|
||||
timeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, timeoutLeft)
|
||||
timeoutLeft -= sleepDuration
|
||||
timer.Reset(sleepDuration)
|
||||
case <-t.ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
|
195
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
195
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
@@ -34,12 +34,10 @@ import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
@@ -57,13 +55,15 @@ var (
|
||||
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
|
||||
// than the limit set by peer.
|
||||
ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
|
||||
// statusRawProto is a function to get to the raw status proto wrapped in a
|
||||
// status.Status without a proto.Clone().
|
||||
statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
|
||||
)
|
||||
|
||||
// serverConnectionCounter counts the number of connections a server has seen
|
||||
// (equal to the number of http2Servers created). Must be accessed atomically.
|
||||
var serverConnectionCounter uint64
|
||||
|
||||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
|
||||
ctx context.Context
|
||||
done chan struct{}
|
||||
conn net.Conn
|
||||
@@ -83,12 +83,8 @@ type http2Server struct {
|
||||
controlBuf *controlBuffer
|
||||
fc *trInFlow
|
||||
stats stats.Handler
|
||||
// Flag to keep track of reading activity on transport.
|
||||
// 1 is true and 0 is false.
|
||||
activity uint32 // Accessed atomically.
|
||||
// Keepalive and max-age parameters for the server.
|
||||
kp keepalive.ServerParameters
|
||||
|
||||
// Keepalive enforcement policy.
|
||||
kep keepalive.EnforcementPolicy
|
||||
// The time instance last ping was received.
|
||||
@@ -124,6 +120,8 @@ type http2Server struct {
|
||||
channelzID int64 // channelz unique identification number
|
||||
czData *channelzData
|
||||
bufferPool *bufferPool
|
||||
|
||||
connectionID uint64
|
||||
}
|
||||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
@@ -174,6 +172,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
Val: *config.MaxHeaderListSize,
|
||||
})
|
||||
}
|
||||
if config.HeaderTableSize != nil {
|
||||
isettings = append(isettings, http2.Setting{
|
||||
ID: http2.SettingHeaderTableSize,
|
||||
Val: *config.HeaderTableSize,
|
||||
})
|
||||
}
|
||||
if err := framer.fr.WriteSettings(isettings...); err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: %v", err)
|
||||
}
|
||||
@@ -247,6 +251,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if channelz.IsOn() {
|
||||
t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
|
||||
}
|
||||
|
||||
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
|
||||
|
||||
t.framer.writer.Flush()
|
||||
|
||||
defer func() {
|
||||
@@ -271,7 +278,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
if err != nil {
|
||||
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
|
||||
}
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
sf, ok := frame.(*http2.SettingsFrame)
|
||||
if !ok {
|
||||
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
|
||||
@@ -282,7 +289,9 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
|
||||
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
|
||||
t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
|
||||
if err := t.loopy.run(); err != nil {
|
||||
errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: loopyWriter.run returning. Err: %v", err)
|
||||
}
|
||||
}
|
||||
t.conn.Close()
|
||||
close(t.writerDone)
|
||||
@@ -353,7 +362,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
}
|
||||
s.ctx, err = t.inTapHandle(s.ctx, info)
|
||||
if err != nil {
|
||||
warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
|
||||
}
|
||||
t.controlBuf.put(&cleanupStream{
|
||||
streamID: s.id,
|
||||
rst: true,
|
||||
@@ -384,7 +395,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||
t.mu.Unlock()
|
||||
// illegal gRPC stream id.
|
||||
errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||
}
|
||||
s.cancel()
|
||||
return true
|
||||
}
|
||||
@@ -410,6 +423,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||
LocalAddr: t.localAddr,
|
||||
Compression: s.recvCompress,
|
||||
WireLength: int(frame.Header().Length),
|
||||
Header: metadata.MD(state.data.mdata).Copy(),
|
||||
}
|
||||
t.stats.HandleRPC(s.ctx, inHeader)
|
||||
}
|
||||
@@ -443,10 +457,12 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
for {
|
||||
t.controlBuf.throttle()
|
||||
frame, err := t.framer.fr.ReadFrame()
|
||||
atomic.StoreUint32(&t.activity, 1)
|
||||
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
|
||||
if err != nil {
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
|
||||
}
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
t.mu.Unlock()
|
||||
@@ -466,7 +482,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
|
||||
}
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
@@ -489,7 +507,9 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
|
||||
case *http2.GoAwayFrame:
|
||||
// TODO: Handle GoAway from the client appropriately.
|
||||
default:
|
||||
errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -591,6 +611,10 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if s.getState() == streamReadDone {
|
||||
t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
|
||||
return
|
||||
}
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(size); err != nil {
|
||||
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
|
||||
@@ -711,7 +735,9 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
|
||||
if t.pingStrikes > maxPingStrikes {
|
||||
// Send goaway and close the connection.
|
||||
errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("transport: Got too many pings from the client, closing the connection.")
|
||||
}
|
||||
t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
|
||||
}
|
||||
}
|
||||
@@ -744,14 +770,16 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
|
||||
var sz int64
|
||||
for _, f := range hdrFrame.hf {
|
||||
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
|
||||
errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// WriteHeader sends the header metedata md back to the client.
|
||||
// WriteHeader sends the header metadata md back to the client.
|
||||
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
if s.updateHeaderSent() || s.getState() == streamDone {
|
||||
return ErrIllegalHeaderWrite
|
||||
@@ -781,7 +809,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
// first and create a slice of that exact size.
|
||||
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
|
||||
if s.sendCompress != "" {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
@@ -800,9 +828,12 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
|
||||
return ErrHeaderListSizeLimitViolation
|
||||
}
|
||||
if t.stats != nil {
|
||||
// Note: WireLength is not set in outHeader.
|
||||
// TODO(mmukhi): Revisit this later, if needed.
|
||||
outHeader := &stats.OutHeader{}
|
||||
// Note: Headers are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
outHeader := &stats.OutHeader{
|
||||
Header: s.header.Copy(),
|
||||
Compression: s.sendCompress,
|
||||
}
|
||||
t.stats.HandleRPC(s.Context(), outHeader)
|
||||
}
|
||||
return nil
|
||||
@@ -828,17 +859,17 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
}
|
||||
} else { // Send a trailer only response.
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
|
||||
}
|
||||
}
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
|
||||
|
||||
if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
|
||||
if p := st.Proto(); p != nil && len(p.Details) > 0 {
|
||||
stBytes, err := proto.Marshal(p)
|
||||
if err != nil {
|
||||
// TODO: return error instead, when callers are able to handle it.
|
||||
grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
|
||||
} else {
|
||||
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
|
||||
}
|
||||
@@ -865,7 +896,11 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
|
||||
rst := s.getState() == streamActive
|
||||
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
|
||||
if t.stats != nil {
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
|
||||
// Note: The trailer fields are compressed with hpack after this call returns.
|
||||
// No WireLength field is set here.
|
||||
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
|
||||
Trailer: s.trailer.Copy(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -894,13 +929,6 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
return ContextErr(s.ctx.Err())
|
||||
}
|
||||
}
|
||||
// Add some data to header frame so that we can equally distribute bytes across frames.
|
||||
emptyLen := http2MaxFrameLen - len(hdr)
|
||||
if emptyLen > len(data) {
|
||||
emptyLen = len(data)
|
||||
}
|
||||
hdr = append(hdr, data[:emptyLen]...)
|
||||
data = data[emptyLen:]
|
||||
df := &dataFrame{
|
||||
streamID: s.id,
|
||||
h: hdr,
|
||||
@@ -926,32 +954,35 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
|
||||
// after an additional duration of keepalive.Timeout.
|
||||
func (t *http2Server) keepalive() {
|
||||
p := &ping{}
|
||||
var pingSent bool
|
||||
maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
maxAge := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
keepalive := time.NewTimer(t.kp.Time)
|
||||
// NOTE: All exit paths of this function should reset their
|
||||
// respective timers. A failure to do so will cause the
|
||||
// following clean-up to deadlock and eventually leak.
|
||||
// True iff a ping has been sent, and no data has been received since then.
|
||||
outstandingPing := false
|
||||
// Amount of time remaining before which we should receive an ACK for the
|
||||
// last sent ping.
|
||||
kpTimeoutLeft := time.Duration(0)
|
||||
// Records the last value of t.lastRead before we go block on the timer.
|
||||
// This is required to check for read activity since then.
|
||||
prevNano := time.Now().UnixNano()
|
||||
// Initialize the different timers to their default values.
|
||||
idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
|
||||
ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
|
||||
kpTimer := time.NewTimer(t.kp.Time)
|
||||
defer func() {
|
||||
if !maxIdle.Stop() {
|
||||
<-maxIdle.C
|
||||
}
|
||||
if !maxAge.Stop() {
|
||||
<-maxAge.C
|
||||
}
|
||||
if !keepalive.Stop() {
|
||||
<-keepalive.C
|
||||
}
|
||||
// We need to drain the underlying channel in these timers after a call
|
||||
// to Stop(), only if we are interested in resetting them. Clearly we
|
||||
// are not interested in resetting them here.
|
||||
idleTimer.Stop()
|
||||
ageTimer.Stop()
|
||||
kpTimer.Stop()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-maxIdle.C:
|
||||
case <-idleTimer.C:
|
||||
t.mu.Lock()
|
||||
idle := t.idle
|
||||
if idle.IsZero() { // The connection is non-idle.
|
||||
t.mu.Unlock()
|
||||
maxIdle.Reset(t.kp.MaxConnectionIdle)
|
||||
idleTimer.Reset(t.kp.MaxConnectionIdle)
|
||||
continue
|
||||
}
|
||||
val := t.kp.MaxConnectionIdle - time.Since(idle)
|
||||
@@ -960,43 +991,55 @@ func (t *http2Server) keepalive() {
|
||||
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
|
||||
// Gracefully close the connection.
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxIdle.Reset(infinity)
|
||||
return
|
||||
}
|
||||
maxIdle.Reset(val)
|
||||
case <-maxAge.C:
|
||||
idleTimer.Reset(val)
|
||||
case <-ageTimer.C:
|
||||
t.drain(http2.ErrCodeNo, []byte{})
|
||||
maxAge.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
|
||||
select {
|
||||
case <-maxAge.C:
|
||||
case <-ageTimer.C:
|
||||
// Close the connection after grace period.
|
||||
infof("transport: closing server transport due to maximum connection age.")
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to maximum connection age.")
|
||||
}
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
maxAge.Reset(infinity)
|
||||
case <-t.done:
|
||||
}
|
||||
return
|
||||
case <-keepalive.C:
|
||||
if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
|
||||
pingSent = false
|
||||
keepalive.Reset(t.kp.Time)
|
||||
case <-kpTimer.C:
|
||||
lastRead := atomic.LoadInt64(&t.lastRead)
|
||||
if lastRead > prevNano {
|
||||
// There has been read activity since the last time we were
|
||||
// here. Setup the timer to fire at kp.Time seconds from
|
||||
// lastRead time and continue.
|
||||
outstandingPing = false
|
||||
kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
|
||||
prevNano = lastRead
|
||||
continue
|
||||
}
|
||||
if pingSent {
|
||||
infof("transport: closing server transport due to idleness.")
|
||||
if outstandingPing && kpTimeoutLeft <= 0 {
|
||||
if logger.V(logLevel) {
|
||||
logger.Infof("transport: closing server transport due to idleness.")
|
||||
}
|
||||
t.Close()
|
||||
// Resetting the timer so that the clean-up doesn't deadlock.
|
||||
keepalive.Reset(infinity)
|
||||
return
|
||||
}
|
||||
pingSent = true
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
if !outstandingPing {
|
||||
if channelz.IsOn() {
|
||||
atomic.AddInt64(&t.czData.kpCount, 1)
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
kpTimeoutLeft = t.kp.Timeout
|
||||
outstandingPing = true
|
||||
}
|
||||
t.controlBuf.put(p)
|
||||
keepalive.Reset(t.kp.Timeout)
|
||||
// The amount of time to sleep here is the minimum of kp.Time and
|
||||
// timeoutLeft. This will ensure that we wait only for kp.Time
|
||||
// before sending out the next ping (for cases where the ping is
|
||||
// acked).
|
||||
sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
|
||||
kpTimeoutLeft -= sleepDuration
|
||||
kpTimer.Reset(sleepDuration)
|
||||
case <-t.done:
|
||||
return
|
||||
}
|
||||
|
86
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
86
vendor/google.golang.org/grpc/internal/transport/http_util.go
generated
vendored
@@ -37,6 +37,8 @@ import (
|
||||
"golang.org/x/net/http2/hpack"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
@@ -50,7 +52,7 @@ const (
|
||||
// "proto" as a suffix after "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
|
||||
// for more details.
|
||||
baseContentType = "application/grpc"
|
||||
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -97,6 +99,7 @@ var (
|
||||
// 504 Gateway timeout - UNAVAILABLE.
|
||||
http.StatusGatewayTimeout: codes.Unavailable,
|
||||
}
|
||||
logger = grpclog.Component("transport")
|
||||
)
|
||||
|
||||
type parsedHeaderData struct {
|
||||
@@ -182,46 +185,6 @@ func isWhitelistedHeader(hdr string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype returns the content-subtype for the given content-type. The
|
||||
// given content-type must be a valid content-type that starts with
|
||||
// "application/grpc". A content-subtype will follow "application/grpc" after a
|
||||
// "+" or ";". See
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// If contentType is not a valid content-type for gRPC, the boolean
|
||||
// will be false, otherwise true. If content-type == "application/grpc",
|
||||
// "application/grpc+", or "application/grpc;", the boolean will be true,
|
||||
// but no content-subtype will be returned.
|
||||
//
|
||||
// contentType is assumed to be lowercase already.
|
||||
func contentSubtype(contentType string) (string, bool) {
|
||||
if contentType == baseContentType {
|
||||
return "", true
|
||||
}
|
||||
if !strings.HasPrefix(contentType, baseContentType) {
|
||||
return "", false
|
||||
}
|
||||
// guaranteed since != baseContentType and has baseContentType prefix
|
||||
switch contentType[len(baseContentType)] {
|
||||
case '+', ';':
|
||||
// this will return true for "application/grpc+" or "application/grpc;"
|
||||
// which the previous validContentType function tested to be valid, so we
|
||||
// just say that no content-subtype is specified in this case
|
||||
return contentType[len(baseContentType)+1:], true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
// contentSubtype is assumed to be lowercase
|
||||
func contentType(contentSubtype string) string {
|
||||
if contentSubtype == "" {
|
||||
return baseContentType
|
||||
}
|
||||
return baseContentType + "+" + contentSubtype
|
||||
}
|
||||
|
||||
func (d *decodeState) status() *status.Status {
|
||||
if d.data.statusGen == nil {
|
||||
// No status-details were provided; generate status using code/msg.
|
||||
@@ -340,7 +303,7 @@ func (d *decodeState) addMetadata(k, v string) {
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
contentSubtype, validContentType := contentSubtype(f.Value)
|
||||
contentSubtype, validContentType := grpcutil.ContentSubtype(f.Value)
|
||||
if !validContentType {
|
||||
d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
|
||||
return
|
||||
@@ -412,7 +375,9 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||
}
|
||||
v, err := decodeMetadataHeader(f.Name, f.Value)
|
||||
if err != nil {
|
||||
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||
if logger.V(logLevel) {
|
||||
logger.Errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
d.addMetadata(f.Name, v)
|
||||
@@ -449,41 +414,6 @@ func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
const maxTimeoutValue int64 = 100000000 - 1
|
||||
|
||||
// div does integer division and round-up the result. Note that this is
|
||||
// equivalent to (d+r-1)/r but has less chance to overflow.
|
||||
func div(d, r time.Duration) int64 {
|
||||
if m := d % r; m > 0 {
|
||||
return int64(d/r + 1)
|
||||
}
|
||||
return int64(d / r)
|
||||
}
|
||||
|
||||
// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
|
||||
func encodeTimeout(t time.Duration) string {
|
||||
if t <= 0 {
|
||||
return "0n"
|
||||
}
|
||||
if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "n"
|
||||
}
|
||||
if d := div(t, time.Microsecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "u"
|
||||
}
|
||||
if d := div(t, time.Millisecond); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "m"
|
||||
}
|
||||
if d := div(t, time.Second); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "S"
|
||||
}
|
||||
if d := div(t, time.Minute); d <= maxTimeoutValue {
|
||||
return strconv.FormatInt(d, 10) + "M"
|
||||
}
|
||||
// Note that maxTimeoutValue * time.Hour > MaxInt64.
|
||||
return strconv.FormatInt(div(t, time.Hour), 10) + "H"
|
||||
}
|
||||
|
||||
func decodeTimeout(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
|
44
vendor/google.golang.org/grpc/internal/transport/log.go
generated
vendored
44
vendor/google.golang.org/grpc/internal/transport/log.go
generated
vendored
@@ -1,44 +0,0 @@
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file contains wrappers for grpclog functions.
|
||||
// The transport package only logs to verbose level 2 by default.
|
||||
|
||||
package transport
|
||||
|
||||
import "google.golang.org/grpc/grpclog"
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
func infof(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Infof(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func warningf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Warningf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func errorf(format string, args ...interface{}) {
|
||||
if grpclog.V(logLevel) {
|
||||
grpclog.Errorf(format, args...)
|
||||
}
|
||||
}
|
84
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
84
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@@ -35,11 +35,14 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/tap"
|
||||
)
|
||||
|
||||
const logLevel = 2
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
@@ -73,10 +76,11 @@ type recvMsg struct {
|
||||
}
|
||||
|
||||
// recvBuffer is an unbounded channel of recvMsg structs.
|
||||
// Note recvBuffer differs from controlBuffer only in that recvBuffer
|
||||
// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
|
||||
// recvBuffer is written to much more often than
|
||||
// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
|
||||
//
|
||||
// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
|
||||
// holds a channel of recvMsg structs instead of objects implementing "item"
|
||||
// interface. recvBuffer is written to much more often and using strict recvMsg
|
||||
// structs helps avoid allocation in "recvBuffer.put"
|
||||
type recvBuffer struct {
|
||||
c chan recvMsg
|
||||
mu sync.Mutex
|
||||
@@ -233,6 +237,7 @@ const (
|
||||
type Stream struct {
|
||||
id uint32
|
||||
st ServerTransport // nil for client side Stream
|
||||
ct *http2Client // nil for server side Stream
|
||||
ctx context.Context // the associated context of the stream
|
||||
cancel context.CancelFunc // always nil for client side Stream
|
||||
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
|
||||
@@ -251,6 +256,10 @@ type Stream struct {
|
||||
|
||||
headerChan chan struct{} // closed to indicate the end of header metadata.
|
||||
headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
|
||||
// headerValid indicates whether a valid header was received. Only
|
||||
// meaningful after headerChan is closed (always call waitOnHeader() before
|
||||
// reading its value). Not valid on server side.
|
||||
headerValid bool
|
||||
|
||||
// hdrMu protects header and trailer metadata on the server-side.
|
||||
hdrMu sync.Mutex
|
||||
@@ -303,34 +312,28 @@ func (s *Stream) getState() streamState {
|
||||
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
|
||||
}
|
||||
|
||||
func (s *Stream) waitOnHeader() error {
|
||||
func (s *Stream) waitOnHeader() {
|
||||
if s.headerChan == nil {
|
||||
// On the server headerChan is always nil since a stream originates
|
||||
// only after having received headers.
|
||||
return nil
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
// We prefer success over failure when reading messages because we delay
|
||||
// context error in stream.Read(). To keep behavior consistent, we also
|
||||
// prefer success here.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return ContextErr(s.ctx.Err())
|
||||
// Close the stream to prevent headers/trailers from changing after
|
||||
// this function returns.
|
||||
s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
|
||||
// headerChan could possibly not be closed yet if closeStream raced
|
||||
// with operateHeaders; wait until it is closed explicitly here.
|
||||
<-s.headerChan
|
||||
case <-s.headerChan:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
if err := s.waitOnHeader(); err != nil {
|
||||
return ""
|
||||
}
|
||||
s.waitOnHeader()
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
@@ -351,36 +354,27 @@ func (s *Stream) Done() <-chan struct{} {
|
||||
// available. It blocks until i) the metadata is ready or ii) there is no header
|
||||
// metadata or iii) the stream is canceled/expired.
|
||||
//
|
||||
// On server side, it returns the out header after t.WriteHeader is called.
|
||||
// On server side, it returns the out header after t.WriteHeader is called. It
|
||||
// does not block and must not be called until after WriteHeader.
|
||||
func (s *Stream) Header() (metadata.MD, error) {
|
||||
if s.headerChan == nil && s.header != nil {
|
||||
if s.headerChan == nil {
|
||||
// On server side, return the header in stream. It will be the out
|
||||
// header after t.WriteHeader is called.
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
err := s.waitOnHeader()
|
||||
// Even if the stream is closed, header is returned if available.
|
||||
select {
|
||||
case <-s.headerChan:
|
||||
if s.header == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.header.Copy(), nil
|
||||
default:
|
||||
s.waitOnHeader()
|
||||
if !s.headerValid {
|
||||
return nil, s.status.Err()
|
||||
}
|
||||
return nil, err
|
||||
return s.header.Copy(), nil
|
||||
}
|
||||
|
||||
// TrailersOnly blocks until a header or trailers-only frame is received and
|
||||
// then returns true if the stream was trailers-only. If the stream ends
|
||||
// before headers are received, returns true, nil. If a context error happens
|
||||
// first, returns it as a status error. Client-side only.
|
||||
func (s *Stream) TrailersOnly() (bool, error) {
|
||||
err := s.waitOnHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return s.noHeaders, nil
|
||||
// before headers are received, returns true, nil. Client-side only.
|
||||
func (s *Stream) TrailersOnly() bool {
|
||||
s.waitOnHeader()
|
||||
return s.noHeaders
|
||||
}
|
||||
|
||||
// Trailer returns the cached trailer metedata. Note that if it is not called
|
||||
@@ -534,6 +528,7 @@ type ServerConfig struct {
|
||||
ReadBufferSize int
|
||||
ChannelzParentID int64
|
||||
MaxHeaderListSize *uint32
|
||||
HeaderTableSize *uint32
|
||||
}
|
||||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
@@ -576,17 +571,10 @@ type ConnectOptions struct {
|
||||
MaxHeaderListSize *uint32
|
||||
}
|
||||
|
||||
// TargetInfo contains the information of the target such as network address and metadata.
|
||||
type TargetInfo struct {
|
||||
Addr string
|
||||
Metadata interface{}
|
||||
Authority string
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
// and returns it to the caller.
|
||||
func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||
func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
|
||||
return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose)
|
||||
}
|
||||
|
||||
// Options provides additional hints and information for message
|
||||
|
Reference in New Issue
Block a user