mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 04:59:58 +00:00
TUN-9467: bump coredns to solve CVE
* TUN-9467: bump coredns to solve CVE
This commit is contained in:

committed by
João "Pisco" Fernandes

parent
f8d12c9d39
commit
a408612f26
229
vendor/google.golang.org/grpc/balancer_wrapper.go
generated
vendored
229
vendor/google.golang.org/grpc/balancer_wrapper.go
generated
vendored
@@ -24,11 +24,25 @@ import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/experimental/stats"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/internal/balancer/gracefulswitch"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
var (
|
||||
setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
|
||||
// noOpRegisterHealthListenerFn is used when client side health checking is
|
||||
// disabled. It sends a single READY update on the registered listener.
|
||||
noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
|
||||
listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
|
||||
return func() {}
|
||||
}
|
||||
)
|
||||
|
||||
// ccBalancerWrapper sits between the ClientConn and the Balancer.
|
||||
@@ -46,6 +60,7 @@ import (
|
||||
// It uses the gracefulswitch.Balancer internally to ensure that balancer
|
||||
// switches happen in a graceful manner.
|
||||
type ccBalancerWrapper struct {
|
||||
internal.EnforceClientConnEmbedding
|
||||
// The following fields are initialized when the wrapper is created and are
|
||||
// read-only afterwards, and therefore can be accessed without a mutex.
|
||||
cc *ClientConn
|
||||
@@ -87,12 +102,16 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
|
||||
return ccb
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
|
||||
return ccb.cc.metricsRecorderList
|
||||
}
|
||||
|
||||
// updateClientConnState is invoked by grpc to push a ClientConnState update to
|
||||
// the underlying balancer. This is always executed from the serializer, so
|
||||
// it is safe to call into the balancer here.
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
errCh := make(chan error)
|
||||
ok := ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
uccs := func(ctx context.Context) {
|
||||
defer close(errCh)
|
||||
if ctx.Err() != nil || ccb.balancer == nil {
|
||||
return
|
||||
@@ -107,17 +126,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
|
||||
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
|
||||
}
|
||||
errCh <- err
|
||||
})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
onFailure := func() { close(errCh) }
|
||||
|
||||
// UpdateClientConnState can race with Close, and when the latter wins, the
|
||||
// serializer is closed, and the attempt to schedule the callback will fail.
|
||||
// It is acceptable to ignore this failure. But since we want to handle the
|
||||
// state update in a blocking fashion (when we successfully schedule the
|
||||
// callback), we have to use the ScheduleOr method and not the MaybeSchedule
|
||||
// method on the serializer.
|
||||
ccb.serializer.ScheduleOr(uccs, onFailure)
|
||||
return <-errCh
|
||||
}
|
||||
|
||||
// resolverError is invoked by grpc to push a resolver error to the underlying
|
||||
// balancer. The call to the balancer is executed from the serializer.
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
@@ -133,7 +158,7 @@ func (ccb *ccBalancerWrapper) close() {
|
||||
ccb.closed = true
|
||||
ccb.mu.Unlock()
|
||||
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
|
||||
ccb.serializer.Schedule(func(context.Context) {
|
||||
ccb.serializer.TrySchedule(func(context.Context) {
|
||||
if ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
@@ -145,7 +170,7 @@ func (ccb *ccBalancerWrapper) close() {
|
||||
|
||||
// exitIdle invokes the balancer's exitIdle method in the serializer.
|
||||
func (ccb *ccBalancerWrapper) exitIdle() {
|
||||
ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
@@ -177,12 +202,13 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
|
||||
ac: ac,
|
||||
producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
|
||||
stateListener: opts.StateListener,
|
||||
healthData: newHealthData(connectivity.Idle),
|
||||
}
|
||||
ac.acbw = acbw
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
|
||||
// The graceful switch balancer will never call this.
|
||||
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
|
||||
}
|
||||
@@ -198,6 +224,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
ccb.cc.mu.Lock()
|
||||
defer ccb.cc.mu.Unlock()
|
||||
if ccb.cc.conns == nil {
|
||||
// The CC has been closed; ignore this update.
|
||||
return
|
||||
}
|
||||
|
||||
ccb.mu.Lock()
|
||||
if ccb.closed {
|
||||
@@ -238,25 +268,77 @@ func (ccb *ccBalancerWrapper) Target() string {
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
internal.EnforceSubConnEmbedding
|
||||
ac *addrConn // read-only
|
||||
ccb *ccBalancerWrapper // read-only
|
||||
stateListener func(balancer.SubConnState)
|
||||
|
||||
mu sync.Mutex
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
producersMu sync.Mutex
|
||||
producers map[balancer.ProducerBuilder]*refCountedProducer
|
||||
|
||||
// Access to healthData is protected by healthMu.
|
||||
healthMu sync.Mutex
|
||||
// healthData is stored as a pointer to detect when the health listener is
|
||||
// dropped or updated. This is required as closures can't be compared for
|
||||
// equality.
|
||||
healthData *healthData
|
||||
}
|
||||
|
||||
// healthData holds data related to health state reporting.
|
||||
type healthData struct {
|
||||
// connectivityState stores the most recent connectivity state delivered
|
||||
// to the LB policy. This is stored to avoid sending updates when the
|
||||
// SubConn has already exited connectivity state READY.
|
||||
connectivityState connectivity.State
|
||||
// closeHealthProducer stores function to close the ref counted health
|
||||
// producer. The health producer is automatically closed when the SubConn
|
||||
// state changes.
|
||||
closeHealthProducer func()
|
||||
}
|
||||
|
||||
func newHealthData(s connectivity.State) *healthData {
|
||||
return &healthData{
|
||||
connectivityState: s,
|
||||
closeHealthProducer: func() {},
|
||||
}
|
||||
}
|
||||
|
||||
// updateState is invoked by grpc to push a subConn state update to the
|
||||
// underlying balancer.
|
||||
func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
|
||||
acbw.ccb.serializer.Schedule(func(ctx context.Context) {
|
||||
func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
|
||||
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
// Invalidate all producers on any state change.
|
||||
acbw.closeProducers()
|
||||
|
||||
// Even though it is optional for balancers, gracefulswitch ensures
|
||||
// opts.StateListener is set, so this cannot ever be nil.
|
||||
// TODO: delete this comment when UpdateSubConnState is removed.
|
||||
acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
|
||||
scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
|
||||
if s == connectivity.Ready {
|
||||
setConnectedAddress(&scs, curAddr)
|
||||
}
|
||||
// Invalidate the health listener by updating the healthData.
|
||||
acbw.healthMu.Lock()
|
||||
// A race may occur if a health listener is registered soon after the
|
||||
// connectivity state is set but before the stateListener is called.
|
||||
// Two cases may arise:
|
||||
// 1. The new state is not READY: RegisterHealthListener has checks to
|
||||
// ensure no updates are sent when the connectivity state is not
|
||||
// READY.
|
||||
// 2. The new state is READY: This means that the old state wasn't Ready.
|
||||
// The RegisterHealthListener API mentions that a health listener
|
||||
// must not be registered when a SubConn is not ready to avoid such
|
||||
// races. When this happens, the LB policy would get health updates
|
||||
// on the old listener. When the LB policy registers a new listener
|
||||
// on receiving the connectivity update, the health updates will be
|
||||
// sent to the new health listener.
|
||||
acbw.healthData = newHealthData(scs.ConnectivityState)
|
||||
acbw.healthMu.Unlock()
|
||||
|
||||
acbw.stateListener(scs)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -273,6 +355,7 @@ func (acbw *acBalancerWrapper) Connect() {
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Shutdown() {
|
||||
acbw.closeProducers()
|
||||
acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
|
||||
}
|
||||
|
||||
@@ -280,9 +363,10 @@ func (acbw *acBalancerWrapper) Shutdown() {
|
||||
// ready, blocks until it is or ctx expires. Returns an error when the context
|
||||
// expires or the addrConn is shut down.
|
||||
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
|
||||
transport, err := acbw.ac.getTransport(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
transport := acbw.ac.getReadyTransport()
|
||||
if transport == nil {
|
||||
return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
|
||||
|
||||
}
|
||||
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
|
||||
}
|
||||
@@ -307,15 +391,15 @@ type refCountedProducer struct {
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
acbw.producersMu.Lock()
|
||||
defer acbw.producersMu.Unlock()
|
||||
|
||||
// Look up existing producer from this builder.
|
||||
pData := acbw.producers[pb]
|
||||
if pData == nil {
|
||||
// Not found; create a new one and add it to the producers map.
|
||||
p, close := pb.Build(acbw)
|
||||
pData = &refCountedProducer{producer: p, close: close}
|
||||
p, closeFn := pb.Build(acbw)
|
||||
pData = &refCountedProducer{producer: p, close: closeFn}
|
||||
acbw.producers[pb] = pData
|
||||
}
|
||||
// Account for this new reference.
|
||||
@@ -325,13 +409,112 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
|
||||
// and delete the refCountedProducer from the map if the total reference
|
||||
// count goes to zero.
|
||||
unref := func() {
|
||||
acbw.mu.Lock()
|
||||
acbw.producersMu.Lock()
|
||||
// If closeProducers has already closed this producer instance, refs is
|
||||
// set to 0, so the check after decrementing will never pass, and the
|
||||
// producer will not be double-closed.
|
||||
pData.refs--
|
||||
if pData.refs == 0 {
|
||||
defer pData.close() // Run outside the acbw mutex
|
||||
delete(acbw.producers, pb)
|
||||
}
|
||||
acbw.mu.Unlock()
|
||||
acbw.producersMu.Unlock()
|
||||
}
|
||||
return pData.producer, grpcsync.OnceFunc(unref)
|
||||
return pData.producer, sync.OnceFunc(unref)
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) closeProducers() {
|
||||
acbw.producersMu.Lock()
|
||||
defer acbw.producersMu.Unlock()
|
||||
for pb, pData := range acbw.producers {
|
||||
pData.refs = 0
|
||||
pData.close()
|
||||
delete(acbw.producers, pb)
|
||||
}
|
||||
}
|
||||
|
||||
// healthProducerRegisterFn is a type alias for the health producer's function
|
||||
// for registering listeners.
|
||||
type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
|
||||
|
||||
// healthListenerRegFn returns a function to register a listener for health
|
||||
// updates. If client side health checks are disabled, the registered listener
|
||||
// will get a single READY (raw connectivity state) update.
|
||||
//
|
||||
// Client side health checking is enabled when all the following
|
||||
// conditions are satisfied:
|
||||
// 1. Health checking is not disabled using the dial option.
|
||||
// 2. The health package is imported.
|
||||
// 3. The health check config is present in the service config.
|
||||
func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
|
||||
if acbw.ccb.cc.dopts.disableHealthCheck {
|
||||
return noOpRegisterHealthListenerFn
|
||||
}
|
||||
regHealthLisFn := internal.RegisterClientHealthCheckListener
|
||||
if regHealthLisFn == nil {
|
||||
// The health package is not imported.
|
||||
return noOpRegisterHealthListenerFn
|
||||
}
|
||||
cfg := acbw.ac.cc.healthCheckConfig()
|
||||
if cfg == nil {
|
||||
return noOpRegisterHealthListenerFn
|
||||
}
|
||||
return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
|
||||
return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterHealthListener accepts a health listener from the LB policy. It sends
|
||||
// updates to the health listener as long as the SubConn's connectivity state
|
||||
// doesn't change and a new health listener is not registered. To invalidate
|
||||
// the currently registered health listener, acbw updates the healthData. If a
|
||||
// nil listener is registered, the active health listener is dropped.
|
||||
func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
|
||||
acbw.healthMu.Lock()
|
||||
defer acbw.healthMu.Unlock()
|
||||
acbw.healthData.closeHealthProducer()
|
||||
// listeners should not be registered when the connectivity state
|
||||
// isn't Ready. This may happen when the balancer registers a listener
|
||||
// after the connectivityState is updated, but before it is notified
|
||||
// of the update.
|
||||
if acbw.healthData.connectivityState != connectivity.Ready {
|
||||
return
|
||||
}
|
||||
// Replace the health data to stop sending updates to any previously
|
||||
// registered health listeners.
|
||||
hd := newHealthData(connectivity.Ready)
|
||||
acbw.healthData = hd
|
||||
if listener == nil {
|
||||
return
|
||||
}
|
||||
|
||||
registerFn := acbw.healthListenerRegFn()
|
||||
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
// Don't send updates if a new listener is registered.
|
||||
acbw.healthMu.Lock()
|
||||
defer acbw.healthMu.Unlock()
|
||||
if acbw.healthData != hd {
|
||||
return
|
||||
}
|
||||
// Serialize the health updates from the health producer with
|
||||
// other calls into the LB policy.
|
||||
listenerWrapper := func(scs balancer.SubConnState) {
|
||||
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
|
||||
if ctx.Err() != nil || acbw.ccb.balancer == nil {
|
||||
return
|
||||
}
|
||||
acbw.healthMu.Lock()
|
||||
defer acbw.healthMu.Unlock()
|
||||
if acbw.healthData != hd {
|
||||
return
|
||||
}
|
||||
listener(scs)
|
||||
})
|
||||
}
|
||||
|
||||
hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
|
||||
})
|
||||
}
|
||||
|
Reference in New Issue
Block a user