TUN-3470: Replace in-house logger calls with zerolog

This commit is contained in:
Areg Harutyunyan
2020-11-25 00:55:13 -06:00
committed by Adam Chalmers
parent 06404bf3e8
commit 870f5fa907
151 changed files with 7120 additions and 3365 deletions

View File

@@ -13,10 +13,11 @@ import (
"github.com/cloudflare/cloudflared/buffer"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/ingress"
"github.com/cloudflare/cloudflared/logger"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/websocket"
"github.com/pkg/errors"
"github.com/rs/zerolog"
)
const (
@@ -26,15 +27,15 @@ const (
type client struct {
ingressRules ingress.Ingress
tags []tunnelpogs.Tag
logger logger.Service
log *zerolog.Logger
bufferPool *buffer.Pool
}
func NewClient(ingressRules ingress.Ingress, tags []tunnelpogs.Tag, logger logger.Service) connection.OriginClient {
func NewClient(ingressRules ingress.Ingress, tags []tunnelpogs.Tag, log *zerolog.Logger) connection.OriginClient {
return &client{
ingressRules: ingressRules,
tags: tags,
logger: logger,
log: log,
bufferPool: buffer.NewPool(512 * 1024),
}
}
@@ -97,14 +98,14 @@ func (c *client) proxyHTTP(w connection.ResponseWriter, req *http.Request, rule
return nil, errors.Wrap(err, "Error writing response header")
}
if connection.IsServerSentEvent(resp.Header) {
c.logger.Debug("Detected Server-Side Events from Origin")
c.log.Debug().Msg("Detected Server-Side Events from Origin")
c.writeEventStream(w, resp.Body)
} else {
// Use CopyBuffer, because Copy only allocates a 32KiB buffer, and cross-stream
// compression generates dictionary on first write
buf := c.bufferPool.Get()
defer c.bufferPool.Put(buf)
io.CopyBuffer(w, resp.Body, buf)
_, _ = io.CopyBuffer(w, resp.Body, buf)
}
return resp, nil
}
@@ -129,7 +130,7 @@ func (c *client) proxyWebsocket(w connection.ResponseWriter, req *http.Request,
go func() {
// serveCtx is done if req is cancelled, or streamWebsocket returns
<-serveCtx.Done()
conn.Close()
_ = conn.Close()
close(connClosedChan)
}()
@@ -159,7 +160,7 @@ func (c *client) writeEventStream(w connection.ResponseWriter, respBody io.ReadC
if err != nil {
break
}
w.Write(line)
_, _ = w.Write(line)
}
}
@@ -171,46 +172,46 @@ func (c *client) appendTagHeaders(r *http.Request) {
func (c *client) logRequest(r *http.Request, cfRay string, lbProbe bool, ruleNum int) {
if cfRay != "" {
c.logger.Debugf("CF-RAY: %s %s %s %s", cfRay, r.Method, r.URL, r.Proto)
c.log.Debug().Msgf("CF-RAY: %s %s %s %s", cfRay, r.Method, r.URL, r.Proto)
} else if lbProbe {
c.logger.Debugf("CF-RAY: %s Load Balancer health check %s %s %s", cfRay, r.Method, r.URL, r.Proto)
c.log.Debug().Msgf("CF-RAY: %s Load Balancer health check %s %s %s", cfRay, r.Method, r.URL, r.Proto)
} else {
c.logger.Debugf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", r.Method, r.URL, r.Proto)
c.log.Debug().Msgf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", r.Method, r.URL, r.Proto)
}
c.logger.Debugf("CF-RAY: %s Request Headers %+v", cfRay, r.Header)
c.logger.Debugf("CF-RAY: %s Serving with ingress rule %d", cfRay, ruleNum)
c.log.Debug().Msgf("CF-RAY: %s Request Headers %+v", cfRay, r.Header)
c.log.Debug().Msgf("CF-RAY: %s Serving with ingress rule %d", cfRay, ruleNum)
if contentLen := r.ContentLength; contentLen == -1 {
c.logger.Debugf("CF-RAY: %s Request Content length unknown", cfRay)
c.log.Debug().Msgf("CF-RAY: %s Request Content length unknown", cfRay)
} else {
c.logger.Debugf("CF-RAY: %s Request content length %d", cfRay, contentLen)
c.log.Debug().Msgf("CF-RAY: %s Request content length %d", cfRay, contentLen)
}
}
func (c *client) logOriginResponse(r *http.Response, cfRay string, lbProbe bool, ruleNum int) {
responseByCode.WithLabelValues(strconv.Itoa(r.StatusCode)).Inc()
if cfRay != "" {
c.logger.Debugf("CF-RAY: %s Status: %s served by ingress %d", cfRay, r.Status, ruleNum)
c.log.Info().Msgf("CF-RAY: %s Status: %s served by ingress %d", cfRay, r.Status, ruleNum)
} else if lbProbe {
c.logger.Debugf("Response to Load Balancer health check %s", r.Status)
c.log.Debug().Msgf("Response to Load Balancer health check %s", r.Status)
} else {
c.logger.Debugf("Status: %s served by ingress %d", r.Status, ruleNum)
c.log.Debug().Msgf("Status: %s served by ingress %d", r.Status, ruleNum)
}
c.logger.Debugf("CF-RAY: %s Response Headers %+v", cfRay, r.Header)
c.log.Debug().Msgf("CF-RAY: %s Response Headers %+v", cfRay, r.Header)
if contentLen := r.ContentLength; contentLen == -1 {
c.logger.Debugf("CF-RAY: %s Response content length unknown", cfRay)
c.log.Debug().Msgf("CF-RAY: %s Response content length unknown", cfRay)
} else {
c.logger.Debugf("CF-RAY: %s Response content length %d", cfRay, contentLen)
c.log.Debug().Msgf("CF-RAY: %s Response content length %d", cfRay, contentLen)
}
}
func (c *client) logRequestError(err error, cfRay string, ruleNum int) {
requestErrors.Inc()
if cfRay != "" {
c.logger.Errorf("CF-RAY: %s Proxying to ingress %d error: %v", cfRay, ruleNum, err)
c.log.Error().Msgf("CF-RAY: %s Proxying to ingress %d error: %v", cfRay, ruleNum, err)
} else {
c.logger.Errorf("Proxying to ingress %d error: %v", ruleNum, err)
c.log.Error().Msgf("Proxying to ingress %d error: %v", ruleNum, err)
}
}

View File

@@ -16,11 +16,11 @@ import (
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/hello"
"github.com/cloudflare/cloudflared/ingress"
"github.com/cloudflare/cloudflared/logger"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/urfave/cli/v2"
"github.com/gobwas/ws/wsutil"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -49,7 +49,7 @@ func (w *mockHTTPRespWriter) WriteRespHeaders(resp *http.Response) error {
func (w *mockHTTPRespWriter) WriteErrorResponse() {
w.WriteHeader(http.StatusBadGateway)
w.Write([]byte("http response error"))
_, _ = w.Write([]byte("http response error"))
}
func (w *mockHTTPRespWriter) Read(data []byte) (int, error) {
@@ -106,8 +106,7 @@ func (w *mockSSERespWriter) ReadBytes() []byte {
}
func TestProxySingleOrigin(t *testing.T) {
logger, err := logger.New()
require.NoError(t, err)
log := zerolog.Nop()
ctx, cancel := context.WithCancel(context.Background())
@@ -115,18 +114,18 @@ func TestProxySingleOrigin(t *testing.T) {
flagSet.Bool("hello-world", true, "")
cliCtx := cli.NewContext(cli.NewApp(), flagSet, nil)
err = cliCtx.Set("hello-world", "true")
err := cliCtx.Set("hello-world", "true")
require.NoError(t, err)
allowURLFromArgs := false
ingressRule, err := ingress.NewSingleOrigin(cliCtx, allowURLFromArgs, logger)
ingressRule, err := ingress.NewSingleOrigin(cliCtx, allowURLFromArgs)
require.NoError(t, err)
var wg sync.WaitGroup
errC := make(chan error)
ingressRule.StartOrigins(&wg, logger, ctx.Done(), errC)
ingressRule.StartOrigins(&wg, &log, ctx.Done(), errC)
client := NewClient(ingressRule, testTags, logger)
client := NewClient(ingressRule, testTags, &log)
t.Run("testProxyHTTP", testProxyHTTP(t, client))
t.Run("testProxyWebsocket", testProxyWebsocket(t, client))
t.Run("testProxySSE", testProxySSE(t, client))
@@ -191,7 +190,7 @@ func testProxySSE(t *testing.T, client connection.OriginClient) func(t *testing.
return func(t *testing.T) {
var (
pushCount = 50
pushFreq = time.Duration(time.Millisecond * 10)
pushFreq = time.Millisecond * 10
)
respWriter := newMockSSERespWriter()
ctx, cancel := context.WithCancel(context.Background())
@@ -252,15 +251,14 @@ func TestProxyMultipleOrigins(t *testing.T) {
})
require.NoError(t, err)
logger, err := logger.New()
require.NoError(t, err)
log := zerolog.Nop()
ctx, cancel := context.WithCancel(context.Background())
errC := make(chan error)
var wg sync.WaitGroup
ingress.StartOrigins(&wg, logger, ctx.Done(), errC)
ingress.StartOrigins(&wg, &log, ctx.Done(), errC)
client := NewClient(ingress, testTags, logger)
client := NewClient(ingress, testTags, &log)
tests := []struct {
url string
@@ -314,7 +312,7 @@ type mockAPI struct{}
func (ma mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusCreated)
w.Write([]byte("Created"))
_, _ = w.Write([]byte("Created"))
}
type errorOriginTransport struct{}
@@ -336,10 +334,9 @@ func TestProxyError(t *testing.T) {
},
}
logger, err := logger.New()
require.NoError(t, err)
log := zerolog.Nop()
client := NewClient(ingress, testTags, logger)
client := NewClient(ingress, testTags, &log)
respWriter := newMockHTTPRespWriter()
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil)

View File

@@ -6,14 +6,14 @@ import (
"net"
"time"
"github.com/google/uuid"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/edgediscovery"
"github.com/cloudflare/cloudflared/h2mux"
"github.com/cloudflare/cloudflared/logger"
"github.com/cloudflare/cloudflared/signal"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/google/uuid"
"github.com/rs/zerolog"
)
const (
@@ -50,7 +50,7 @@ type Supervisor struct {
nextConnectedIndex int
nextConnectedSignal chan struct{}
logger logger.Service
log *zerolog.Logger
reconnectCredentialManager *reconnectCredentialManager
useReconnectToken bool
@@ -68,9 +68,9 @@ func NewSupervisor(config *TunnelConfig, cloudflaredUUID uuid.UUID) (*Supervisor
err error
)
if len(config.EdgeAddrs) > 0 {
edgeIPs, err = edgediscovery.StaticEdge(config.Logger, config.EdgeAddrs)
edgeIPs, err = edgediscovery.StaticEdge(config.Log, config.EdgeAddrs)
} else {
edgeIPs, err = edgediscovery.ResolveEdge(config.Logger)
edgeIPs, err = edgediscovery.ResolveEdge(config.Log)
}
if err != nil {
return nil, err
@@ -87,7 +87,7 @@ func NewSupervisor(config *TunnelConfig, cloudflaredUUID uuid.UUID) (*Supervisor
edgeIPs: edgeIPs,
tunnelErrors: make(chan tunnelError),
tunnelsConnecting: map[int]chan struct{}{},
logger: config.Logger,
log: config.Log,
reconnectCredentialManager: newReconnectCredentialManager(connection.MetricsNamespace, connection.TunnelSubsystem, config.HAConnections),
useReconnectToken: useReconnectToken,
}, nil
@@ -110,7 +110,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil {
refreshAuthBackoffTimer = timer
} else {
s.logger.Errorf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err)
s.log.Error().Msgf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err)
refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration)
}
}
@@ -129,7 +129,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
case tunnelError := <-s.tunnelErrors:
tunnelsActive--
if tunnelError.err != nil {
s.logger.Infof("supervisor: Tunnel disconnected due to error: %s", tunnelError.err)
s.log.Info().Msgf("supervisor: Tunnel disconnected due to error: %s", tunnelError.err)
tunnelsWaiting = append(tunnelsWaiting, tunnelError.index)
s.waitForNextTunnel(tunnelError.index)
@@ -152,7 +152,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
case <-refreshAuthBackoffTimer:
newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate)
if err != nil {
s.logger.Errorf("supervisor: Authentication failed: %s", err)
s.log.Error().Msgf("supervisor: Authentication failed: %s", err)
// Permanent failure. Leave the `select` without setting the
// channel to be non-null, so we'll never hit this case of the `select` again.
continue
@@ -172,7 +172,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
func (s *Supervisor) initialize(ctx context.Context, connectedSignal *signal.Signal, reconnectCh chan ReconnectSignal) error {
availableAddrs := int(s.edgeIPs.AvailableAddrs())
if s.config.HAConnections > availableAddrs {
s.logger.Infof("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, availableAddrs)
s.log.Info().Msgf("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, availableAddrs)
s.config.HAConnections = availableAddrs
}
@@ -295,7 +295,7 @@ func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int)
// This callback is invoked by h2mux when the edge initiates a stream.
return nil // noop
})
muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.logger)
muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.log)
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig, h2mux.ActiveStreams)
if err != nil {
return nil, err
@@ -311,7 +311,7 @@ func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int)
if err != nil {
return nil, err
}
rpcClient := connection.NewTunnelServerClient(ctx, stream, s.logger)
rpcClient := connection.NewTunnelServerClient(ctx, stream, s.log)
defer rpcClient.Close()
const arbitraryConnectionID = uint8(0)

View File

@@ -13,13 +13,13 @@ import (
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/edgediscovery"
"github.com/cloudflare/cloudflared/h2mux"
"github.com/cloudflare/cloudflared/logger"
"github.com/cloudflare/cloudflared/signal"
"github.com/cloudflare/cloudflared/tunnelrpc"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
@@ -55,7 +55,7 @@ type TunnelConfig struct {
IsFreeTunnel bool
LBPool string
Tags []tunnelpogs.Tag
Logger logger.Service
Log *zerolog.Logger
Observer *connection.Observer
ReportedVersion string
Retries uint
@@ -235,7 +235,7 @@ func waitForBackoff(
}
config.Observer.SendReconnect(connIndex)
config.Logger.Infof("Retrying connection %d in %s seconds, error %v", connIndex, duration, err)
config.Log.Info().Msgf("Retrying connection %d in %s seconds, error %v", connIndex, duration, err)
protobackoff.Backoff(ctx)
if protobackoff.ReachedMaxRetries() {
@@ -247,13 +247,13 @@ func waitForBackoff(
if protobackoff.protocol == fallback {
return err
}
config.Logger.Infof("Fallback to use %s", fallback)
config.Log.Info().Msgf("Fallback to use %s", fallback)
protobackoff.fallback(fallback)
} else if !protobackoff.inFallback {
current := config.ProtocolSelector.Current()
if protobackoff.protocol != current {
protobackoff.protocol = current
config.Logger.Infof("Change protocol to %s", current)
config.Log.Info().Msgf("Change protocol to %s", current)
}
}
return nil
@@ -311,9 +311,16 @@ func ServeH2mux(
cloudflaredUUID uuid.UUID,
reconnectCh chan ReconnectSignal,
) (err error, recoverable bool) {
config.Logger.Debugf("Connecting via h2mux")
config.Log.Debug().Msgf("Connecting via h2mux")
// Returns error from parsing the origin URL or handshake errors
handler, err, recoverable := connection.NewH2muxConnection(ctx, config.ConnectionConfig, config.MuxerConfig, edgeConn, connectionIndex, config.Observer)
handler, err, recoverable := connection.NewH2muxConnection(
ctx,
config.ConnectionConfig,
config.MuxerConfig,
edgeConn,
connectionIndex,
config.Observer,
)
if err != nil {
return err, recoverable
}
@@ -338,29 +345,29 @@ func ServeH2mux(
// don't retry this connection anymore, let supervisor pick new a address
return err, false
case *serverRegisterTunnelError:
config.Logger.Errorf("Register tunnel error from server side: %s", err.cause)
config.Log.Error().Msgf("Register tunnel error from server side: %s", err.cause)
// Don't send registration error return from server to Sentry. They are
// logged on server side
if incidents := config.IncidentLookup.ActiveIncidents(); len(incidents) > 0 {
config.Logger.Error(activeIncidentsMsg(incidents))
config.Log.Error().Msg(activeIncidentsMsg(incidents))
}
return err.cause, !err.permanent
case *clientRegisterTunnelError:
config.Logger.Errorf("Register tunnel error on client side: %s", err.cause)
config.Log.Error().Msgf("Register tunnel error on client side: %s", err.cause)
return err, true
case *muxerShutdownError:
config.Logger.Info("Muxer shutdown")
config.Log.Info().Msg("Muxer shutdown")
return err, true
case *ReconnectSignal:
config.Logger.Infof("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay)
config.Log.Info().Msgf("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay)
err.DelayBeforeReconnect()
return err, true
default:
if err == context.Canceled {
config.Logger.Debugf("Serve tunnel error: %s", err)
config.Log.Debug().Msgf("Serve tunnel error: %s", err)
return err, false
}
config.Logger.Errorf("Serve tunnel error: %s", err)
config.Log.Error().Msgf("Serve tunnel error: %s", err)
return err, true
}
}
@@ -376,8 +383,16 @@ func ServeHTTP2(
connectedFuse connection.ConnectedFuse,
reconnectCh chan ReconnectSignal,
) (err error, recoverable bool) {
config.Logger.Debugf("Connecting via http2")
server := connection.NewHTTP2Connection(tlsServerConn, config.ConnectionConfig, config.NamedTunnel, connOptions, config.Observer, connIndex, connectedFuse)
config.Log.Debug().Msgf("Connecting via http2")
server := connection.NewHTTP2Connection(
tlsServerConn,
config.ConnectionConfig,
config.NamedTunnel,
connOptions,
config.Observer,
connIndex,
connectedFuse,
)
errGroup, serveCtx := errgroup.WithContext(ctx)
errGroup.Go(func() error {

View File

@@ -7,7 +7,8 @@ import (
"time"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/logger"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
)
@@ -31,8 +32,7 @@ func TestWaitForBackoffFallback(t *testing.T) {
BaseTime: time.Millisecond * 10,
}
ctx := context.Background()
logger, err := logger.New()
assert.NoError(t, err)
log := zerolog.Nop()
resolveTTL := time.Duration(0)
namedTunnel := &connection.NamedTunnelConfig{
Credentials: connection.Credentials{
@@ -42,10 +42,16 @@ func TestWaitForBackoffFallback(t *testing.T) {
mockFetcher := dynamicMockFetcher{
percentage: 0,
}
protocolSelector, err := connection.NewProtocolSelector(connection.HTTP2.String(), namedTunnel, mockFetcher.fetch(), resolveTTL, logger)
protocolSelector, err := connection.NewProtocolSelector(
connection.HTTP2.String(),
namedTunnel,
mockFetcher.fetch(),
resolveTTL,
&log,
)
assert.NoError(t, err)
config := &TunnelConfig{
Logger: logger,
Log: &log,
ProtocolSelector: protocolSelector,
Observer: connection.NewObserver(nil, nil, false),
}