TUN-8052: Update go to 1.21.5

Also update golang.org/x/net and google.golang.org/grpc to fix vulnerabilities,
although cloudflared is using them in a way that is not exposed to those risks
This commit is contained in:
Chung-Ting
2023-12-11 11:58:19 +00:00
parent b901d73d9b
commit 12dd91ada1
974 changed files with 28780 additions and 25799 deletions

View File

@@ -31,6 +31,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancerload"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
@@ -54,7 +55,7 @@ import (
// status package, or be one of the context errors. Otherwise, gRPC will use
// codes.Unknown as the status code and err.Error() as the status message of the
// RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
type StreamHandler func(srv any, stream ServerStream) error
// StreamDesc represents a streaming RPC service's method specification. Used
// on the server when registering services and on the client when initiating
@@ -79,9 +80,9 @@ type Stream interface {
// Deprecated: See ClientStream and ServerStream documentation instead.
Context() context.Context
// Deprecated: See ClientStream and ServerStream documentation instead.
SendMsg(m interface{}) error
SendMsg(m any) error
// Deprecated: See ClientStream and ServerStream documentation instead.
RecvMsg(m interface{}) error
RecvMsg(m any) error
}
// ClientStream defines the client-side behavior of a streaming RPC.
@@ -90,7 +91,9 @@ type Stream interface {
// status package.
type ClientStream interface {
// Header returns the header metadata received from the server if there
// is any. It blocks if the metadata is not ready to read.
// is any. It blocks if the metadata is not ready to read. If the metadata
// is nil and the error is also nil, then the stream was terminated without
// headers, and the status can be discovered by calling RecvMsg.
Header() (metadata.MD, error)
// Trailer returns the trailer metadata from the server, if there is any.
// It must only be called after stream.CloseAndRecv has returned, or
@@ -123,7 +126,10 @@ type ClientStream interface {
// calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines. It is also
// not safe to call CloseSend concurrently with SendMsg.
SendMsg(m interface{}) error
//
// It is not safe to modify the message after calling SendMsg. Tracing
// libraries and stats handlers may use the message lazily.
SendMsg(m any) error
// RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the stream completes successfully. On
// any other error, the stream is aborted and the error contains the RPC
@@ -132,7 +138,7 @@ type ClientStream interface {
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not
// safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error
RecvMsg(m any) error
}
// NewStream creates a new Stream for the client side. This is typically
@@ -168,10 +174,29 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok {
// Start tracking the RPC for idleness purposes. This is where a stream is
// created for both streaming and unary RPCs, and hence is a good place to
// track active RPC count.
if err := cc.idlenessMgr.OnCallBegin(); err != nil {
return nil, err
}
// Add a calloption, to decrement the active call count, that gets executed
// when the RPC completes.
opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
// validate md
if err := imetadata.Validate(md); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
// validate added
for _, kvs := range added {
for i := 0; i < len(kvs); i += 2 {
if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
}
}
if channelz.IsOn() {
cc.incrCallsStarted()
@@ -352,7 +377,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
}
}
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
binlog.Log(cs.ctx, logEntry)
}
}
@@ -416,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx = trace.NewContext(ctx, trInfo.tr)
}
if cs.cc.parsedTarget.Scheme == "xds" {
if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
// Add extra metadata (metadata that will be added by transport) to context
// so the balancer can see them.
ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
@@ -438,7 +463,7 @@ func (a *csAttempt) getTransport() error {
cs := a.cs
var err error
a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method)
if err != nil {
if de, ok := err.(dropError); ok {
err = de.error
@@ -455,6 +480,25 @@ func (a *csAttempt) getTransport() error {
func (a *csAttempt) newStream() error {
cs := a.cs
cs.callHdr.PreviousAttempts = cs.numRetries
// Merge metadata stored in PickResult, if any, with existing call metadata.
// It is safe to overwrite the csAttempt's context here, since all state
// maintained in it are local to the attempt. When the attempt has to be
// retried, a new instance of csAttempt will be created.
if a.pickResult.Metadata != nil {
// We currently do not have a function it the metadata package which
// merges given metadata with existing metadata in a context. Existing
// function `AppendToOutgoingContext()` takes a variadic argument of key
// value pairs.
//
// TODO: Make it possible to retrieve key value pairs from metadata.MD
// in a form passable to AppendToOutgoingContext(), or create a version
// of AppendToOutgoingContext() that accepts a metadata.MD.
md, _ := metadata.FromOutgoingContext(a.ctx)
md = metadata.Join(md, a.pickResult.Metadata)
a.ctx = metadata.NewOutgoingContext(a.ctx, md)
}
s, err := a.t.NewStream(a.ctx, cs.callHdr)
if err != nil {
nse, ok := err.(*transport.NewStreamError)
@@ -471,7 +515,7 @@ func (a *csAttempt) newStream() error {
return toRPCErr(nse.Err)
}
a.s = s
a.p = &parser{r: s}
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
return nil
}
@@ -529,12 +573,12 @@ type clientStream struct {
// csAttempt implements a single transport stream attempt within a
// clientStream.
type csAttempt struct {
ctx context.Context
cs *clientStream
t transport.ClientTransport
s *transport.Stream
p *parser
done func(balancer.DoneInfo)
ctx context.Context
cs *clientStream
t transport.ClientTransport
s *transport.Stream
p *parser
pickResult balancer.PickResult
finished bool
dc Decompressor
@@ -752,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
func (cs *clientStream) Header() (metadata.MD, error) {
var m metadata.MD
noHeader := false
err := cs.withRetry(func(a *csAttempt) error {
var err error
m, err = a.s.Header()
if err == transport.ErrNoHeaders {
noHeader = true
return nil
}
return toRPCErr(err)
}, cs.commitAttemptLocked)
if err != nil {
cs.finish(err)
return nil, err
if m == nil && err == nil {
// The stream ended with success. Finish the clientStream.
err = io.EOF
}
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader {
if err != nil {
cs.finish(err)
// Do not return the error. The user should get it by calling Recv().
return nil, nil
}
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
// Only log if binary log is on and header has not been logged, and
// there is actually headers to log.
logEntry := &binarylog.ServerHeader{
@@ -781,9 +826,10 @@ func (cs *clientStream) Header() (metadata.MD, error) {
}
cs.serverHeaderBinlogged = true
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
binlog.Log(cs.ctx, logEntry)
}
}
return m, nil
}
@@ -824,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error
cs.buffer = append(cs.buffer, op)
}
func (cs *clientStream) SendMsg(m interface{}) (err error) {
func (cs *clientStream) SendMsg(m any) (err error) {
defer func() {
if err != nil && err != io.EOF {
// Call finish on the client stream for errors generated by this SendMsg
@@ -862,13 +908,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
Message: data,
}
for _, binlog := range cs.binlogs {
binlog.Log(cm)
binlog.Log(cs.ctx, cm)
}
}
return err
}
func (cs *clientStream) RecvMsg(m interface{}) error {
func (cs *clientStream) RecvMsg(m any) error {
if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
// Call Header() to binary log header if it's not already logged.
cs.Header()
@@ -886,30 +932,12 @@ func (cs *clientStream) RecvMsg(m interface{}) error {
Message: recvInfo.uncompressedBytes,
}
for _, binlog := range cs.binlogs {
binlog.Log(sm)
binlog.Log(cs.ctx, sm)
}
}
if err != nil || !cs.desc.ServerStreams {
// err != nil or non-server-streaming indicates end of stream.
cs.finish(err)
if len(cs.binlogs) != 0 {
// finish will not log Trailer. Log Trailer here.
logEntry := &binarylog.ServerTrailer{
OnClientSide: true,
Trailer: cs.Trailer(),
Err: err,
}
if logEntry.Err == io.EOF {
logEntry.Err = nil
}
if peer, ok := peer.FromContext(cs.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range cs.binlogs {
binlog.Log(logEntry)
}
}
}
return err
}
@@ -934,7 +962,7 @@ func (cs *clientStream) CloseSend() error {
OnClientSide: true,
}
for _, binlog := range cs.binlogs {
binlog.Log(chc)
binlog.Log(cs.ctx, chc)
}
}
// We never returned an error here for reasons.
@@ -952,6 +980,9 @@ func (cs *clientStream) finish(err error) {
return
}
cs.finished = true
for _, onFinish := range cs.callInfo.onFinish {
onFinish(err)
}
cs.commitAttemptLocked()
if cs.attempt != nil {
cs.attempt.finish(err)
@@ -962,18 +993,30 @@ func (cs *clientStream) finish(err error) {
}
}
}
cs.mu.Unlock()
// For binary logging. only log cancel in finish (could be caused by RPC ctx
// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
//
// Only one of cancel or trailer needs to be logged. In the cases where
// users don't call RecvMsg, users must have already canceled the RPC.
if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled {
c := &binarylog.Cancel{
OnClientSide: true,
}
for _, binlog := range cs.binlogs {
binlog.Log(c)
// Only one of cancel or trailer needs to be logged.
if len(cs.binlogs) != 0 {
switch err {
case errContextCanceled, errContextDeadline, ErrClientConnClosing:
c := &binarylog.Cancel{
OnClientSide: true,
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, c)
}
default:
logEntry := &binarylog.ServerTrailer{
OnClientSide: true,
Trailer: cs.Trailer(),
Err: err,
}
if peer, ok := peer.FromContext(cs.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, logEntry)
}
}
}
if err == nil {
@@ -989,7 +1032,7 @@ func (cs *clientStream) finish(err error) {
cs.cancel()
}
func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@@ -1016,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
return nil
}
func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
@@ -1062,9 +1105,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength + headerLen,
Length: len(payInfo.uncompressedBytes),
Data: payInfo.uncompressedBytes,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
Length: len(payInfo.uncompressedBytes),
})
}
if channelz.IsOn() {
@@ -1103,12 +1147,12 @@ func (a *csAttempt) finish(err error) {
tr = a.s.Trailer()
}
if a.done != nil {
if a.pickResult.Done != nil {
br := false
if a.s != nil {
br = a.s.BytesReceived()
}
a.done(balancer.DoneInfo{
a.pickResult.Done(balancer.DoneInfo{
Err: err,
Trailer: tr,
BytesSent: a.s != nil,
@@ -1230,17 +1274,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.s = s
as.p = &parser{r: s}
as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on cc and stream contexts to cleanup when the user closes the
// ClientConn or cancels the stream context. In all other cases, an error
// should already be injected into the recv buffer by the transport, which
// the client will eventually receive, and then we will cancel the stream's
// context in clientStream.finish.
// Listen on stream context to cleanup when the stream context is
// canceled. Also listen for the addrConn's context in case the
// addrConn is closed or reconnects to a different address. In all
// other cases, an error should already be injected into the recv
// buffer by the transport, which the client will eventually receive,
// and then we will cancel the stream's context in
// addrConnStream.finish.
go func() {
ac.mu.Lock()
acCtx := ac.ctx
ac.mu.Unlock()
select {
case <-ac.ctx.Done():
case <-acCtx.Done():
as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
case <-ctx.Done():
as.finish(toRPCErr(ctx.Err()))
@@ -1303,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context {
return as.s.Context()
}
func (as *addrConnStream) SendMsg(m interface{}) (err error) {
func (as *addrConnStream) SendMsg(m any) (err error) {
defer func() {
if err != nil && err != io.EOF {
// Call finish on the client stream for errors generated by this SendMsg
@@ -1348,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) {
return nil
}
func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
func (as *addrConnStream) RecvMsg(m any) (err error) {
defer func() {
if err != nil || !as.desc.ServerStreams {
// err != nil or non-server-streaming indicates end of stream.
@@ -1464,7 +1513,10 @@ type ServerStream interface {
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines.
SendMsg(m interface{}) error
//
// It is not safe to modify the message after calling SendMsg. Tracing
// libraries and stats handlers may use the message lazily.
SendMsg(m any) error
// RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the client has performed a CloseSend. On
// any non-EOF error, the stream is aborted and the error contains the
@@ -1473,7 +1525,7 @@ type ServerStream interface {
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not
// safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error
RecvMsg(m any) error
}
// serverStream implements a server side Stream.
@@ -1489,6 +1541,8 @@ type serverStream struct {
comp encoding.Compressor
decomp encoding.Compressor
sendCompressorName string
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
@@ -1536,7 +1590,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error {
}
ss.serverHeaderBinlogged = true
for _, binlog := range ss.binlogs {
binlog.Log(sh)
binlog.Log(ss.ctx, sh)
}
}
return err
@@ -1552,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
ss.s.SetTrailer(md)
}
func (ss *serverStream) SendMsg(m interface{}) (err error) {
func (ss *serverStream) SendMsg(m any) (err error) {
defer func() {
if ss.trInfo != nil {
ss.mu.Lock()
@@ -1560,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
if err == nil {
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
} else {
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
ss.trInfo.tr.SetError()
}
}
@@ -1581,6 +1635,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
}()
// Server handler could have set new compressor by calling SetSendCompressor.
// In case it is set, we need to use it for compressing outbound message.
if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
ss.comp = encoding.GetCompressor(sendCompressorsName)
ss.sendCompressorName = sendCompressorsName
}
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
if err != nil {
@@ -1602,14 +1663,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
}
ss.serverHeaderBinlogged = true
for _, binlog := range ss.binlogs {
binlog.Log(sh)
binlog.Log(ss.ctx, sh)
}
}
sm := &binarylog.ServerMessage{
Message: data,
}
for _, binlog := range ss.binlogs {
binlog.Log(sm)
binlog.Log(ss.ctx, sm)
}
}
if len(ss.statsHandler) != 0 {
@@ -1620,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
return nil
}
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
func (ss *serverStream) RecvMsg(m any) (err error) {
defer func() {
if ss.trInfo != nil {
ss.mu.Lock()
@@ -1628,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if err == nil {
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
} else if err != io.EOF {
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
ss.trInfo.tr.SetError()
}
}
@@ -1657,7 +1718,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
for _, binlog := range ss.binlogs {
binlog.Log(chc)
binlog.Log(ss.ctx, chc)
}
}
return err
@@ -1673,9 +1734,10 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
WireLength: payInfo.wireLength + headerLen,
Length: len(payInfo.uncompressedBytes),
Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
}
}
@@ -1684,7 +1746,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
Message: payInfo.uncompressedBytes,
}
for _, binlog := range ss.binlogs {
binlog.Log(cm)
binlog.Log(ss.ctx, cm)
}
}
return nil
@@ -1699,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
// prepareMsg returns the hdr, payload and data
// using the compressors passed or using the
// passed preparedmsg
func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
}