mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-27 15:59:57 +00:00
Revert "TUN-7065: Remove classic tunnel creation"
This reverts commit c24f275981
.
This commit is contained in:
@@ -14,9 +14,11 @@ import (
|
||||
|
||||
"github.com/cloudflare/cloudflared/connection"
|
||||
"github.com/cloudflare/cloudflared/edgediscovery"
|
||||
"github.com/cloudflare/cloudflared/h2mux"
|
||||
"github.com/cloudflare/cloudflared/orchestration"
|
||||
"github.com/cloudflare/cloudflared/retry"
|
||||
"github.com/cloudflare/cloudflared/signal"
|
||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||
"github.com/cloudflare/cloudflared/tunnelstate"
|
||||
)
|
||||
|
||||
@@ -55,6 +57,7 @@ type Supervisor struct {
|
||||
logTransport *zerolog.Logger
|
||||
|
||||
reconnectCredentialManager *reconnectCredentialManager
|
||||
useReconnectToken bool
|
||||
|
||||
reconnectCh chan ReconnectSignal
|
||||
gracefulShutdownC <-chan struct{}
|
||||
@@ -105,6 +108,11 @@ func NewSupervisor(config *TunnelConfig, orchestrator *orchestration.Orchestrato
|
||||
connAwareLogger: log,
|
||||
}
|
||||
|
||||
useReconnectToken := false
|
||||
if config.ClassicTunnel != nil {
|
||||
useReconnectToken = config.ClassicTunnel.UseReconnectToken
|
||||
}
|
||||
|
||||
return &Supervisor{
|
||||
cloudflaredUUID: cloudflaredUUID,
|
||||
config: config,
|
||||
@@ -117,6 +125,7 @@ func NewSupervisor(config *TunnelConfig, orchestrator *orchestration.Orchestrato
|
||||
log: log,
|
||||
logTransport: config.LogTransport,
|
||||
reconnectCredentialManager: reconnectCredentialManager,
|
||||
useReconnectToken: useReconnectToken,
|
||||
reconnectCh: reconnectCh,
|
||||
gracefulShutdownC: gracefulShutdownC,
|
||||
}, nil
|
||||
@@ -150,6 +159,20 @@ func (s *Supervisor) Run(
|
||||
backoff := retry.BackoffHandler{MaxRetries: s.config.Retries, BaseTime: tunnelRetryDuration, RetryForever: true}
|
||||
var backoffTimer <-chan time.Time
|
||||
|
||||
refreshAuthBackoff := &retry.BackoffHandler{MaxRetries: refreshAuthMaxBackoff, BaseTime: refreshAuthRetryDuration, RetryForever: true}
|
||||
var refreshAuthBackoffTimer <-chan time.Time
|
||||
|
||||
if s.useReconnectToken {
|
||||
if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil {
|
||||
refreshAuthBackoffTimer = timer
|
||||
} else {
|
||||
s.log.Logger().Err(err).
|
||||
Dur("refreshAuthRetryDuration", refreshAuthRetryDuration).
|
||||
Msgf("supervisor: initial refreshAuth failed, retrying in %v", refreshAuthRetryDuration)
|
||||
refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration)
|
||||
}
|
||||
}
|
||||
|
||||
shuttingDown := false
|
||||
for {
|
||||
select {
|
||||
@@ -196,6 +219,16 @@ func (s *Supervisor) Run(
|
||||
}
|
||||
tunnelsActive += len(tunnelsWaiting)
|
||||
tunnelsWaiting = nil
|
||||
// Time to call Authenticate
|
||||
case <-refreshAuthBackoffTimer:
|
||||
newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate)
|
||||
if err != nil {
|
||||
s.log.Logger().Err(err).Msg("supervisor: Authentication failed")
|
||||
// Permanent failure. Leave the `select` without setting the
|
||||
// channel to be non-null, so we'll never hit this case of the `select` again.
|
||||
continue
|
||||
}
|
||||
refreshAuthBackoffTimer = newTimer
|
||||
// Tunnel successfully connected
|
||||
case <-s.nextConnectedSignal:
|
||||
if !s.waitForNextTunnel(s.nextConnectedIndex) && len(tunnelsWaiting) == 0 {
|
||||
@@ -344,3 +377,44 @@ func (s *Supervisor) waitForNextTunnel(index int) bool {
|
||||
func (s *Supervisor) unusedIPs() bool {
|
||||
return s.edgeIPs.AvailableAddrs() > s.config.HAConnections
|
||||
}
|
||||
|
||||
func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int) (tunnelpogs.AuthOutcome, error) {
|
||||
arbitraryEdgeIP, err := s.edgeIPs.GetAddrForRPC()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
edgeConn, err := edgediscovery.DialEdge(ctx, dialTimeout, s.config.EdgeTLSConfigs[connection.H2mux], arbitraryEdgeIP.TCP)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer edgeConn.Close()
|
||||
|
||||
handler := h2mux.MuxedStreamFunc(func(*h2mux.MuxedStream) error {
|
||||
// This callback is invoked by h2mux when the edge initiates a stream.
|
||||
return nil // noop
|
||||
})
|
||||
muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.logTransport)
|
||||
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig, h2mux.ActiveStreams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go muxer.Serve(ctx)
|
||||
defer func() {
|
||||
// If we don't wait for the muxer shutdown here, edgeConn.Close() runs before the muxer connections are done,
|
||||
// and the user sees log noise: "error writing data", "connection closed unexpectedly"
|
||||
<-muxer.Shutdown()
|
||||
}()
|
||||
|
||||
stream, err := muxer.OpenRPCStream(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rpcClient := connection.NewTunnelServerClient(ctx, stream, s.log.Logger())
|
||||
defer rpcClient.Close()
|
||||
|
||||
const arbitraryConnectionID = uint8(0)
|
||||
registrationOptions := s.config.registrationOptions(arbitraryConnectionID, edgeConn.LocalAddr().String(), s.cloudflaredUUID)
|
||||
registrationOptions.NumPreviousAttempts = uint8(numPreviousAttempts)
|
||||
return rpcClient.Authenticate(ctx, s.config.ClassicTunnel, registrationOptions)
|
||||
}
|
||||
|
Reference in New Issue
Block a user