mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 11:09:58 +00:00
TUN-8407: Upgrade go to version 1.22.2
This commit is contained in:
180
vendor/google.golang.org/grpc/server.go
generated
vendored
180
vendor/google.golang.org/grpc/server.go
generated
vendored
@@ -33,8 +33,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/trace"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
@@ -74,9 +72,6 @@ func init() {
|
||||
return srv.isRegisteredMethod(method)
|
||||
}
|
||||
internal.ServerFromContext = serverFromContext
|
||||
internal.DrainServerTransports = func(srv *Server, addr string) {
|
||||
srv.drainServerTransports(addr)
|
||||
}
|
||||
internal.AddGlobalServerOptions = func(opt ...ServerOption) {
|
||||
globalServerOptions = append(globalServerOptions, opt...)
|
||||
}
|
||||
@@ -134,17 +129,18 @@ type Server struct {
|
||||
drain bool
|
||||
cv *sync.Cond // signaled when connections close for GracefulStop
|
||||
services map[string]*serviceInfo // service name -> service info
|
||||
events trace.EventLog
|
||||
events traceEventLog
|
||||
|
||||
quit *grpcsync.Event
|
||||
done *grpcsync.Event
|
||||
channelzRemoveOnce sync.Once
|
||||
serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
|
||||
serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
|
||||
handlersWG sync.WaitGroup // counts active method handler goroutines
|
||||
|
||||
channelzID *channelz.Identifier
|
||||
czData *channelzData
|
||||
channelz *channelz.Server
|
||||
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannel chan func()
|
||||
serverWorkerChannelClose func()
|
||||
}
|
||||
|
||||
type serverOptions struct {
|
||||
@@ -175,6 +171,7 @@ type serverOptions struct {
|
||||
headerTableSize *uint32
|
||||
numServerWorkers uint32
|
||||
recvBufferPool SharedBufferPool
|
||||
waitForHandlers bool
|
||||
}
|
||||
|
||||
var defaultServerOptions = serverOptions{
|
||||
@@ -251,11 +248,9 @@ func SharedWriteBuffer(val bool) ServerOption {
|
||||
}
|
||||
|
||||
// WriteBufferSize determines how much data can be batched before doing a write
|
||||
// on the wire. The corresponding memory allocation for this buffer will be
|
||||
// twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB. Zero or negative values will disable the write buffer such that each
|
||||
// write will be on underlying connection.
|
||||
// Note: A Send call may not directly translate to a write.
|
||||
// on the wire. The default value for this buffer is 32KB. Zero or negative
|
||||
// values will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
func WriteBufferSize(s int) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.writeBufferSize = s
|
||||
@@ -572,6 +567,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForHandlers cause Stop to wait until all outstanding method handlers have
|
||||
// exited before returning. If false, Stop will return as soon as all
|
||||
// connections have closed, but method handlers may still be running. By
|
||||
// default, Stop does not wait for method handlers to return.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func WaitForHandlers(w bool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.waitForHandlers = w
|
||||
})
|
||||
}
|
||||
|
||||
// RecvBufferPool returns a ServerOption that configures the server
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
@@ -623,15 +633,14 @@ func (s *Server) serverWorker() {
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
|
||||
close(s.serverWorkerChannel)
|
||||
})
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
go s.serverWorker()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) stopServerWorkers() {
|
||||
close(s.serverWorkerChannel)
|
||||
}
|
||||
|
||||
// NewServer creates a gRPC server which has no service registered and has not
|
||||
// started to accept requests yet.
|
||||
func NewServer(opt ...ServerOption) *Server {
|
||||
@@ -649,22 +658,21 @@ func NewServer(opt ...ServerOption) *Server {
|
||||
services: make(map[string]*serviceInfo),
|
||||
quit: grpcsync.NewEvent(),
|
||||
done: grpcsync.NewEvent(),
|
||||
czData: new(channelzData),
|
||||
channelz: channelz.RegisterServer(""),
|
||||
}
|
||||
chainUnaryServerInterceptors(s)
|
||||
chainStreamServerInterceptors(s)
|
||||
s.cv = sync.NewCond(&s.mu)
|
||||
if EnableTracing {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
s.initServerWorkers()
|
||||
}
|
||||
|
||||
s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
|
||||
channelz.Info(logger, s.channelzID, "Server created")
|
||||
channelz.Info(logger, s.channelz, "Server created")
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -790,20 +798,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
|
||||
type listenSocket struct {
|
||||
net.Listener
|
||||
channelzID *channelz.Identifier
|
||||
}
|
||||
|
||||
func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
|
||||
return &channelz.SocketInternalMetric{
|
||||
SocketOptions: channelz.GetSocketOption(l.Listener),
|
||||
LocalAddr: l.Listener.Addr(),
|
||||
}
|
||||
channelz *channelz.Socket
|
||||
}
|
||||
|
||||
func (l *listenSocket) Close() error {
|
||||
err := l.Listener.Close()
|
||||
channelz.RemoveEntry(l.channelzID)
|
||||
channelz.Info(logger, l.channelzID, "ListenSocket deleted")
|
||||
channelz.RemoveEntry(l.channelz.ID)
|
||||
channelz.Info(logger, l.channelz, "ListenSocket deleted")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -845,7 +846,16 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||
}
|
||||
}()
|
||||
|
||||
ls := &listenSocket{Listener: lis}
|
||||
ls := &listenSocket{
|
||||
Listener: lis,
|
||||
channelz: channelz.RegisterSocket(&channelz.Socket{
|
||||
SocketType: channelz.SocketTypeListen,
|
||||
Parent: s.channelz,
|
||||
RefName: lis.Addr().String(),
|
||||
LocalAddr: lis.Addr(),
|
||||
SocketOptions: channelz.GetSocketOption(lis)},
|
||||
),
|
||||
}
|
||||
s.lis[ls] = true
|
||||
|
||||
defer func() {
|
||||
@@ -857,14 +867,8 @@ func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
|
||||
var err error
|
||||
ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
|
||||
if err != nil {
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
s.mu.Unlock()
|
||||
channelz.Info(logger, ls.channelzID, "ListenSocket created")
|
||||
channelz.Info(logger, ls.channelz, "ListenSocket created")
|
||||
|
||||
var tempDelay time.Duration // how long to sleep on accept failure
|
||||
for {
|
||||
@@ -932,6 +936,12 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
|
||||
return
|
||||
}
|
||||
|
||||
if cc, ok := rawConn.(interface {
|
||||
PassServerTransport(transport.ServerTransport)
|
||||
}); ok {
|
||||
cc.PassServerTransport(st)
|
||||
}
|
||||
|
||||
if !s.addConn(lisAddr, st) {
|
||||
return
|
||||
}
|
||||
@@ -941,15 +951,6 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
|
||||
}()
|
||||
}
|
||||
|
||||
func (s *Server) drainServerTransports(addr string) {
|
||||
s.mu.Lock()
|
||||
conns := s.conns[addr]
|
||||
for st := range conns {
|
||||
st.Drain("")
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// newHTTP2Transport sets up a http/2 transport (using the
|
||||
// gRPC http2 server transport in transport/http2_server.go).
|
||||
func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
@@ -966,7 +967,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
WriteBufferSize: s.opts.writeBufferSize,
|
||||
ReadBufferSize: s.opts.readBufferSize,
|
||||
SharedWriteBuffer: s.opts.sharedWriteBuffer,
|
||||
ChannelzParentID: s.channelzID,
|
||||
ChannelzParent: s.channelz,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
}
|
||||
@@ -980,7 +981,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
if err != credentials.ErrConnDispatched {
|
||||
// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
|
||||
if err != io.EOF {
|
||||
channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
}
|
||||
c.Close()
|
||||
}
|
||||
@@ -1010,9 +1011,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
|
||||
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(ctx, func(stream *transport.Stream) {
|
||||
s.handlersWG.Add(1)
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
defer streamQuota.release()
|
||||
defer s.handlersWG.Done()
|
||||
s.handleStream(st, stream)
|
||||
}
|
||||
|
||||
@@ -1110,37 +1113,28 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
|
||||
return &channelz.ServerInternalMetric{
|
||||
CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
|
||||
CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
|
||||
CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
|
||||
LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsStarted() {
|
||||
atomic.AddInt64(&s.czData.callsStarted, 1)
|
||||
atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
|
||||
s.channelz.ServerMetrics.CallsStarted.Add(1)
|
||||
s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsSucceeded() {
|
||||
atomic.AddInt64(&s.czData.callsSucceeded, 1)
|
||||
s.channelz.ServerMetrics.CallsSucceeded.Add(1)
|
||||
}
|
||||
|
||||
func (s *Server) incrCallsFailed() {
|
||||
atomic.AddInt64(&s.czData.callsFailed, 1)
|
||||
s.channelz.ServerMetrics.CallsFailed.Add(1)
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err)
|
||||
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
|
||||
return err
|
||||
}
|
||||
compData, err := compress(data, cp, comp)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err)
|
||||
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
|
||||
return err
|
||||
}
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
@@ -1331,10 +1325,11 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
if len(shs) != 0 || len(binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
}
|
||||
d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
|
||||
d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1342,6 +1337,8 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
t.IncrMsgRecv()
|
||||
}
|
||||
df := func(v any) error {
|
||||
defer cancel()
|
||||
|
||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
@@ -1383,7 +1380,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if e := t.WriteStatus(stream, appStatus); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
if len(binlogs) != 0 {
|
||||
if h, _ := stream.Header(); h.Len() > 0 {
|
||||
@@ -1423,7 +1420,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
}
|
||||
if sts, ok := status.FromError(err); ok {
|
||||
if e := t.WriteStatus(stream, sts); e != nil {
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
} else {
|
||||
switch st := err.(type) {
|
||||
@@ -1721,8 +1718,8 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ctx = contextWithServer(ctx, s)
|
||||
var ti *traceInfo
|
||||
if EnableTracing {
|
||||
tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
|
||||
ctx = trace.NewContext(ctx, tr)
|
||||
tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
|
||||
ctx = newTraceContext(ctx, tr)
|
||||
ti = &traceInfo{
|
||||
tr: tr,
|
||||
firstLine: firstLine{
|
||||
@@ -1751,7 +1748,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if ti != nil {
|
||||
ti.tr.Finish()
|
||||
@@ -1808,7 +1805,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if ti != nil {
|
||||
ti.tr.Finish()
|
||||
@@ -1880,8 +1877,7 @@ func (s *Server) stop(graceful bool) {
|
||||
s.quit.Fire()
|
||||
defer s.done.Fire()
|
||||
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) })
|
||||
|
||||
s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
|
||||
s.mu.Lock()
|
||||
s.closeListenersLocked()
|
||||
// Wait for serving threads to be ready to exit. Only then can we be sure no
|
||||
@@ -1898,15 +1894,23 @@ func (s *Server) stop(graceful bool) {
|
||||
s.closeServerTransportsLocked()
|
||||
}
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
s.stopServerWorkers()
|
||||
}
|
||||
|
||||
for len(s.conns) != 0 {
|
||||
s.cv.Wait()
|
||||
}
|
||||
s.conns = nil
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
// Closing the channel (only once, via grpcsync.OnceFunc) after all the
|
||||
// connections have been closed above ensures that there are no
|
||||
// goroutines executing the callback passed to st.HandleStreams (where
|
||||
// the channel is written to).
|
||||
s.serverWorkerChannelClose()
|
||||
}
|
||||
|
||||
if graceful || s.opts.waitForHandlers {
|
||||
s.handlersWG.Wait()
|
||||
}
|
||||
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
@@ -2098,7 +2102,7 @@ func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
||||
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
||||
}
|
||||
|
||||
return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil
|
||||
return stream.ClientAdvertisedCompressors(), nil
|
||||
}
|
||||
|
||||
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
|
||||
@@ -2128,17 +2132,9 @@ func Method(ctx context.Context) (string, bool) {
|
||||
return s.Method(), true
|
||||
}
|
||||
|
||||
type channelzServer struct {
|
||||
s *Server
|
||||
}
|
||||
|
||||
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
|
||||
return c.s.channelzMetric()
|
||||
}
|
||||
|
||||
// validateSendCompressor returns an error when given compressor name cannot be
|
||||
// handled by the server or the client based on the advertised compressors.
|
||||
func validateSendCompressor(name, clientCompressors string) error {
|
||||
func validateSendCompressor(name string, clientCompressors []string) error {
|
||||
if name == encoding.Identity {
|
||||
return nil
|
||||
}
|
||||
@@ -2147,7 +2143,7 @@ func validateSendCompressor(name, clientCompressors string) error {
|
||||
return fmt.Errorf("compressor not registered %q", name)
|
||||
}
|
||||
|
||||
for _, c := range strings.Split(clientCompressors, ",") {
|
||||
for _, c := range clientCompressors {
|
||||
if c == name {
|
||||
return nil // found match
|
||||
}
|
||||
|
Reference in New Issue
Block a user