mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 04:59:58 +00:00
TUN-9467: bump coredns to solve CVE
* TUN-9467: bump coredns to solve CVE
This commit is contained in:

committed by
João "Pisco" Fernandes

parent
f8d12c9d39
commit
a408612f26
229
vendor/google.golang.org/grpc/server.go
generated
vendored
229
vendor/google.golang.org/grpc/server.go
generated
vendored
@@ -37,14 +37,17 @@ import (
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/encoding"
|
||||
"google.golang.org/grpc/encoding/proto"
|
||||
estats "google.golang.org/grpc/experimental/stats"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/binarylog"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
istats "google.golang.org/grpc/internal/stats"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/mem"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/stats"
|
||||
@@ -80,18 +83,22 @@ func init() {
|
||||
}
|
||||
internal.BinaryLogger = binaryLogger
|
||||
internal.JoinServerOptions = newJoinServerOption
|
||||
internal.RecvBufferPool = recvBufferPool
|
||||
internal.BufferPool = bufferPool
|
||||
internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
|
||||
return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
|
||||
}
|
||||
}
|
||||
|
||||
var statusOK = status.New(codes.OK, "")
|
||||
var logger = grpclog.Component("core")
|
||||
|
||||
type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
||||
// MethodHandler is a function type that processes a unary RPC method call.
|
||||
type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
|
||||
|
||||
// MethodDesc represents an RPC service's method specification.
|
||||
type MethodDesc struct {
|
||||
MethodName string
|
||||
Handler methodHandler
|
||||
Handler MethodHandler
|
||||
}
|
||||
|
||||
// ServiceDesc represents an RPC service's specification.
|
||||
@@ -170,7 +177,7 @@ type serverOptions struct {
|
||||
maxHeaderListSize *uint32
|
||||
headerTableSize *uint32
|
||||
numServerWorkers uint32
|
||||
recvBufferPool SharedBufferPool
|
||||
bufferPool mem.BufferPool
|
||||
waitForHandlers bool
|
||||
}
|
||||
|
||||
@@ -181,7 +188,7 @@ var defaultServerOptions = serverOptions{
|
||||
connectionTimeout: 120 * time.Second,
|
||||
writeBufferSize: defaultWriteBufSize,
|
||||
readBufferSize: defaultReadBufSize,
|
||||
recvBufferPool: nopBufferPool{},
|
||||
bufferPool: mem.DefaultBufferPool(),
|
||||
}
|
||||
var globalServerOptions []ServerOption
|
||||
|
||||
@@ -313,7 +320,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
|
||||
// Will be supported throughout 1.x.
|
||||
func CustomCodec(codec Codec) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.codec = codec
|
||||
o.codec = newCodecV0Bridge(codec)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -342,7 +349,22 @@ func CustomCodec(codec Codec) ServerOption {
|
||||
// later release.
|
||||
func ForceServerCodec(codec encoding.Codec) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.codec = codec
|
||||
o.codec = newCodecV1Bridge(codec)
|
||||
})
|
||||
}
|
||||
|
||||
// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
|
||||
// CodecV2 interface.
|
||||
//
|
||||
// Will be supported throughout 1.x.
|
||||
//
|
||||
// # Experimental
|
||||
//
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.codec = codecV2
|
||||
})
|
||||
}
|
||||
|
||||
@@ -527,12 +549,22 @@ func ConnectionTimeout(d time.Duration) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// MaxHeaderListSizeServerOption is a ServerOption that sets the max
|
||||
// (uncompressed) size of header list that the server is prepared to accept.
|
||||
type MaxHeaderListSizeServerOption struct {
|
||||
MaxHeaderListSize uint32
|
||||
}
|
||||
|
||||
func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
|
||||
so.maxHeaderListSize = &o.MaxHeaderListSize
|
||||
}
|
||||
|
||||
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
|
||||
// of header list that the server is prepared to accept.
|
||||
func MaxHeaderListSize(s uint32) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.maxHeaderListSize = &s
|
||||
})
|
||||
return MaxHeaderListSizeServerOption{
|
||||
MaxHeaderListSize: s,
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderTableSize returns a ServerOption that sets the size of dynamic
|
||||
@@ -582,26 +614,9 @@ func WaitForHandlers(w bool) ServerOption {
|
||||
})
|
||||
}
|
||||
|
||||
// RecvBufferPool returns a ServerOption that configures the server
|
||||
// to use the provided shared buffer pool for parsing incoming messages. Depending
|
||||
// on the application's workload, this could result in reduced memory allocation.
|
||||
//
|
||||
// If you are unsure about how to implement a memory pool but want to utilize one,
|
||||
// begin with grpc.NewSharedBufferPool.
|
||||
//
|
||||
// Note: The shared buffer pool feature will not be active if any of the following
|
||||
// options are used: StatsHandler, EnableTracing, or binary logging. In such
|
||||
// cases, the shared buffer pool will be ignored.
|
||||
//
|
||||
// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
|
||||
// v1.60.0 or later.
|
||||
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
return recvBufferPool(bufferPool)
|
||||
}
|
||||
|
||||
func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
func bufferPool(bufferPool mem.BufferPool) ServerOption {
|
||||
return newFuncServerOption(func(o *serverOptions) {
|
||||
o.recvBufferPool = bufferPool
|
||||
o.bufferPool = bufferPool
|
||||
})
|
||||
}
|
||||
|
||||
@@ -612,8 +627,8 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
|
||||
// workload (assuming a QPS of a few thousand requests/sec).
|
||||
const serverWorkerResetThreshold = 1 << 16
|
||||
|
||||
// serverWorkers blocks on a *transport.Stream channel forever and waits for
|
||||
// data to be fed by serveStreams. This allows multiple requests to be
|
||||
// serverWorker blocks on a *transport.ServerStream channel forever and waits
|
||||
// for data to be fed by serveStreams. This allows multiple requests to be
|
||||
// processed by the same goroutine, removing the need for expensive stack
|
||||
// re-allocations (see the runtime.morestack problem [1]).
|
||||
//
|
||||
@@ -633,7 +648,7 @@ func (s *Server) serverWorker() {
|
||||
// connections to reduce the time spent overall on runtime.morestack.
|
||||
func (s *Server) initServerWorkers() {
|
||||
s.serverWorkerChannel = make(chan func())
|
||||
s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
|
||||
s.serverWorkerChannelClose = sync.OnceFunc(func() {
|
||||
close(s.serverWorkerChannel)
|
||||
})
|
||||
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
|
||||
@@ -970,6 +985,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
|
||||
ChannelzParent: s.channelz,
|
||||
MaxHeaderListSize: s.opts.maxHeaderListSize,
|
||||
HeaderTableSize: s.opts.headerTableSize,
|
||||
BufferPool: s.opts.bufferPool,
|
||||
}
|
||||
st, err := transport.NewServerTransport(c, config)
|
||||
if err != nil {
|
||||
@@ -1010,7 +1026,7 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport,
|
||||
}()
|
||||
|
||||
streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
|
||||
st.HandleStreams(ctx, func(stream *transport.Stream) {
|
||||
st.HandleStreams(ctx, func(stream *transport.ServerStream) {
|
||||
s.handlersWG.Add(1)
|
||||
streamQuota.acquire()
|
||||
f := func() {
|
||||
@@ -1062,7 +1078,7 @@ var _ http.Handler = (*Server)(nil)
|
||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
|
||||
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
|
||||
if err != nil {
|
||||
// Errors returned from transport.NewServerHandlerTransport have
|
||||
// already been written to w.
|
||||
@@ -1126,26 +1142,41 @@ func (s *Server) incrCallsFailed() {
|
||||
s.channelz.ServerMetrics.CallsFailed.Add(1)
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
|
||||
func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
|
||||
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
|
||||
if err != nil {
|
||||
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
|
||||
return err
|
||||
}
|
||||
compData, err := compress(data, cp, comp)
|
||||
|
||||
compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
|
||||
if err != nil {
|
||||
data.Free()
|
||||
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
|
||||
return err
|
||||
}
|
||||
hdr, payload := msgHeader(data, compData)
|
||||
|
||||
hdr, payload := msgHeader(data, compData, pf)
|
||||
|
||||
defer func() {
|
||||
compData.Free()
|
||||
data.Free()
|
||||
// payload does not need to be freed here, it is either data or compData, both of
|
||||
// which are already freed.
|
||||
}()
|
||||
|
||||
dataLen := data.Len()
|
||||
payloadLen := payload.Len()
|
||||
// TODO(dfawley): should we be checking len(data) instead?
|
||||
if len(payload) > s.opts.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
|
||||
if payloadLen > s.opts.maxSendMessageSize {
|
||||
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
|
||||
}
|
||||
err = t.Write(stream, hdr, payload, opts)
|
||||
err = stream.Write(hdr, payload, opts)
|
||||
if err == nil {
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
|
||||
if len(s.opts.statsHandlers) != 0 {
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -1187,7 +1218,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
shs := s.opts.statsHandlers
|
||||
if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
|
||||
if channelz.IsOn() {
|
||||
@@ -1295,7 +1326,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
decomp = encoding.GetCompressor(rc)
|
||||
if decomp == nil {
|
||||
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
||||
t.WriteStatus(stream, st)
|
||||
stream.WriteStatus(st)
|
||||
return st.Err()
|
||||
}
|
||||
}
|
||||
@@ -1324,37 +1355,42 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
var payInfo *payloadInfo
|
||||
if len(shs) != 0 || len(binlogs) != 0 {
|
||||
payInfo = &payloadInfo{}
|
||||
defer payInfo.free()
|
||||
}
|
||||
|
||||
d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
|
||||
d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
|
||||
if err != nil {
|
||||
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
|
||||
if e := stream.WriteStatus(status.Convert(err)); e != nil {
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.IncrMsgRecv()
|
||||
freed := false
|
||||
dataFree := func() {
|
||||
if !freed {
|
||||
d.Free()
|
||||
freed = true
|
||||
}
|
||||
}
|
||||
defer dataFree()
|
||||
df := func(v any) error {
|
||||
defer cancel()
|
||||
|
||||
defer dataFree()
|
||||
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
|
||||
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
|
||||
}
|
||||
|
||||
for _, sh := range shs {
|
||||
sh.HandleRPC(ctx, &stats.InPayload{
|
||||
RecvTime: time.Now(),
|
||||
Payload: v,
|
||||
Length: len(d),
|
||||
Length: d.Len(),
|
||||
WireLength: payInfo.compressedLength + headerLen,
|
||||
CompressedLength: payInfo.compressedLength,
|
||||
Data: d,
|
||||
})
|
||||
}
|
||||
if len(binlogs) != 0 {
|
||||
cm := &binarylog.ClientMessage{
|
||||
Message: d,
|
||||
Message: d.Materialize(),
|
||||
}
|
||||
for _, binlog := range binlogs {
|
||||
binlog.Log(ctx, cm)
|
||||
@@ -1379,7 +1415,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if e := t.WriteStatus(stream, appStatus); e != nil {
|
||||
if e := stream.WriteStatus(appStatus); e != nil {
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
if len(binlogs) != 0 {
|
||||
@@ -1406,20 +1442,20 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
opts := &transport.Options{Last: true}
|
||||
opts := &transport.WriteOptions{Last: true}
|
||||
|
||||
// Server handler could have set new compressor by calling SetSendCompressor.
|
||||
// In case it is set, we need to use it for compressing outbound message.
|
||||
if stream.SendCompress() != sendCompressorName {
|
||||
comp = encoding.GetCompressor(stream.SendCompress())
|
||||
}
|
||||
if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil {
|
||||
if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
return err
|
||||
}
|
||||
if sts, ok := status.FromError(err); ok {
|
||||
if e := t.WriteStatus(stream, sts); e != nil {
|
||||
if e := stream.WriteStatus(sts); e != nil {
|
||||
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
|
||||
}
|
||||
} else {
|
||||
@@ -1459,9 +1495,6 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
binlog.Log(ctx, sm)
|
||||
}
|
||||
}
|
||||
if channelz.IsOn() {
|
||||
t.IncrMsgSent()
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||
}
|
||||
@@ -1477,7 +1510,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
return t.WriteStatus(stream, statusOK)
|
||||
return stream.WriteStatus(statusOK)
|
||||
}
|
||||
|
||||
// chainStreamServerInterceptors chains all stream server interceptors into one.
|
||||
@@ -1516,7 +1549,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if channelz.IsOn() {
|
||||
s.incrCallsStarted()
|
||||
}
|
||||
@@ -1536,9 +1569,8 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
ctx = NewContextWithServerTransportStream(ctx, stream)
|
||||
ss := &serverStream{
|
||||
ctx: ctx,
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
|
||||
p: &parser{r: stream, bufferPool: s.opts.bufferPool},
|
||||
codec: s.getCodec(stream.ContentSubtype()),
|
||||
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
|
||||
maxSendMessageSize: s.opts.maxSendMessageSize,
|
||||
@@ -1618,12 +1650,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
// If dc is set and matches the stream's compression, use it. Otherwise, try
|
||||
// to find a matching registered compressor for decomp.
|
||||
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
|
||||
ss.dc = s.opts.dc
|
||||
ss.decompressorV0 = s.opts.dc
|
||||
} else if rc != "" && rc != encoding.Identity {
|
||||
ss.decomp = encoding.GetCompressor(rc)
|
||||
if ss.decomp == nil {
|
||||
ss.decompressorV1 = encoding.GetCompressor(rc)
|
||||
if ss.decompressorV1 == nil {
|
||||
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
|
||||
t.WriteStatus(ss.s, st)
|
||||
ss.s.WriteStatus(st)
|
||||
return st.Err()
|
||||
}
|
||||
}
|
||||
@@ -1633,12 +1665,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
//
|
||||
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
|
||||
if s.opts.cp != nil {
|
||||
ss.cp = s.opts.cp
|
||||
ss.compressorV0 = s.opts.cp
|
||||
ss.sendCompressorName = s.opts.cp.Type()
|
||||
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
|
||||
// Legacy compressor not specified; attempt to respond with same encoding.
|
||||
ss.comp = encoding.GetCompressor(rc)
|
||||
if ss.comp != nil {
|
||||
ss.compressorV1 = encoding.GetCompressor(rc)
|
||||
if ss.compressorV1 != nil {
|
||||
ss.sendCompressorName = rc
|
||||
}
|
||||
}
|
||||
@@ -1649,7 +1681,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
}
|
||||
}
|
||||
|
||||
ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp)
|
||||
ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
|
||||
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
@@ -1692,7 +1724,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
t.WriteStatus(ss.s, appStatus)
|
||||
ss.s.WriteStatus(appStatus)
|
||||
// TODO: Should we log an error from WriteStatus here and below?
|
||||
return appErr
|
||||
}
|
||||
@@ -1710,10 +1742,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
|
||||
binlog.Log(ctx, st)
|
||||
}
|
||||
}
|
||||
return t.WriteStatus(ss.s, statusOK)
|
||||
return ss.s.WriteStatus(statusOK)
|
||||
}
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
|
||||
ctx := stream.Context()
|
||||
ctx = contextWithServer(ctx, s)
|
||||
var ti *traceInfo
|
||||
@@ -1743,7 +1775,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ti.tr.SetError()
|
||||
}
|
||||
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
|
||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if ti != nil {
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
@@ -1758,17 +1790,20 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
service := sm[:pos]
|
||||
method := sm[pos+1:]
|
||||
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
|
||||
sh.HandleRPC(ctx, &stats.InHeader{
|
||||
FullMethod: stream.Method(),
|
||||
RemoteAddr: t.Peer().Addr,
|
||||
LocalAddr: t.Peer().LocalAddr,
|
||||
Compression: stream.RecvCompress(),
|
||||
WireLength: stream.HeaderWireLength(),
|
||||
Header: md,
|
||||
})
|
||||
// FromIncomingContext is expensive: skip if there are no statsHandlers
|
||||
if len(s.opts.statsHandlers) > 0 {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
for _, sh := range s.opts.statsHandlers {
|
||||
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
|
||||
sh.HandleRPC(ctx, &stats.InHeader{
|
||||
FullMethod: stream.Method(),
|
||||
RemoteAddr: t.Peer().Addr,
|
||||
LocalAddr: t.Peer().LocalAddr,
|
||||
Compression: stream.RecvCompress(),
|
||||
WireLength: stream.HeaderWireLength(),
|
||||
Header: md,
|
||||
})
|
||||
}
|
||||
}
|
||||
// To have calls in stream callouts work. Will delete once all stats handler
|
||||
// calls come from the gRPC layer.
|
||||
@@ -1777,17 +1812,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
srv, knownService := s.services[service]
|
||||
if knownService {
|
||||
if md, ok := srv.methods[method]; ok {
|
||||
s.processUnaryRPC(ctx, t, stream, srv, md, ti)
|
||||
s.processUnaryRPC(ctx, stream, srv, md, ti)
|
||||
return
|
||||
}
|
||||
if sd, ok := srv.streams[method]; ok {
|
||||
s.processStreamingRPC(ctx, t, stream, srv, sd, ti)
|
||||
s.processStreamingRPC(ctx, stream, srv, sd, ti)
|
||||
return
|
||||
}
|
||||
}
|
||||
// Unknown service, or known server unknown method.
|
||||
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
|
||||
s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti)
|
||||
s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
|
||||
return
|
||||
}
|
||||
var errDesc string
|
||||
@@ -1800,7 +1835,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
|
||||
ti.tr.LazyPrintf("%s", errDesc)
|
||||
ti.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
|
||||
if ti != nil {
|
||||
ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
|
||||
ti.tr.SetError()
|
||||
@@ -1900,7 +1935,7 @@ func (s *Server) stop(graceful bool) {
|
||||
s.conns = nil
|
||||
|
||||
if s.opts.numServerWorkers > 0 {
|
||||
// Closing the channel (only once, via grpcsync.OnceFunc) after all the
|
||||
// Closing the channel (only once, via sync.OnceFunc) after all the
|
||||
// connections have been closed above ensures that there are no
|
||||
// goroutines executing the callback passed to st.HandleStreams (where
|
||||
// the channel is written to).
|
||||
@@ -1953,12 +1988,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
|
||||
return s.opts.codec
|
||||
}
|
||||
if contentSubtype == "" {
|
||||
return encoding.GetCodec(proto.Name)
|
||||
return getCodec(proto.Name)
|
||||
}
|
||||
codec := encoding.GetCodec(contentSubtype)
|
||||
codec := getCodec(contentSubtype)
|
||||
if codec == nil {
|
||||
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
|
||||
return encoding.GetCodec(proto.Name)
|
||||
return getCodec(proto.Name)
|
||||
}
|
||||
return codec
|
||||
}
|
||||
@@ -2075,7 +2110,7 @@ func SendHeader(ctx context.Context, md metadata.MD) error {
|
||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func SetSendCompressor(ctx context.Context, name string) error {
|
||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
|
||||
if !ok || stream == nil {
|
||||
return fmt.Errorf("failed to fetch the stream from the given context")
|
||||
}
|
||||
@@ -2097,7 +2132,7 @@ func SetSendCompressor(ctx context.Context, name string) error {
|
||||
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
|
||||
// later release.
|
||||
func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
|
||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream)
|
||||
stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
|
||||
if !ok || stream == nil {
|
||||
return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
|
||||
}
|
||||
|
Reference in New Issue
Block a user