TUN-3470: Replace in-house logger calls with zerolog

This commit is contained in:
Areg Harutyunyan
2020-11-25 00:55:13 -06:00
committed by Adam Chalmers
parent 06404bf3e8
commit 870f5fa907
151 changed files with 7120 additions and 3365 deletions

View File

@@ -29,7 +29,7 @@ type RollingConfig struct {
}
func createDefaultConfig() Config {
const minLevel = "fatal"
const minLevel = "info"
const RollingMaxSize = 1 // Mb
const RollingMaxBackups = 5 // files
@@ -57,7 +57,7 @@ func createDefaultConfig() Config {
func CreateConfig(
minLevel string,
disableTerminal bool,
rollingLogPath, nonRollingLogFilePath string,
rollingLogPath, rollingLogFilename, nonRollingLogFilePath string,
) *Config {
var console *ConsoleConfig
if !disableTerminal {
@@ -71,7 +71,7 @@ func CreateConfig(
var rolling *RollingConfig
if rollingLogPath != "" {
rolling = createRollingConfig(rollingLogPath)
rolling = createRollingConfig(rollingLogPath, rollingLogFilename)
}
if minLevel == "" {
@@ -103,14 +103,14 @@ func createFileConfig(filepath string) *FileConfig {
}
}
func createRollingConfig(directory string) *RollingConfig {
func createRollingConfig(directory, filename string) *RollingConfig {
if directory == "" {
directory = defaultConfig.RollingConfig.Directory
}
return &RollingConfig{
Directory: directory,
Filename: defaultConfig.RollingConfig.Filename,
Filename: filename,
maxSize: defaultConfig.RollingConfig.maxSize,
maxBackups: defaultConfig.RollingConfig.maxBackups,
maxAge: defaultConfig.RollingConfig.maxAge,

View File

@@ -1,13 +1,11 @@
package logger
import (
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/alecthomas/units"
"github.com/rs/zerolog"
fallbacklog "github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
)
@@ -24,162 +22,40 @@ const (
LogSSHLevelFlag = "log-level"
)
// Option is to encaspulate actions that will be called by Parse and run later to build an Options struct
type Option func(*Options) error
func newZerolog(loggerConfig *Config) *zerolog.Logger {
var writers []io.Writer
// Options is use to set logging configuration data
type Options struct {
logFileDirectory string
maxFileSize units.Base2Bytes
maxFileCount uint
terminalOutputDisabled bool
supportedFileLevels []Level
supportedTerminalLevels []Level
}
// DisableTerminal stops terminal output for the logger
func DisableTerminal(disable bool) Option {
return func(c *Options) error {
c.terminalOutputDisabled = disable
return nil
if loggerConfig.ConsoleConfig != nil {
writers = append(writers, zerolog.ConsoleWriter{
Out: os.Stderr,
NoColor: loggerConfig.ConsoleConfig.noColor,
})
}
}
// File sets a custom file to log events
func File(path string, size units.Base2Bytes, count uint) Option {
return func(c *Options) error {
c.logFileDirectory = path
c.maxFileSize = size
c.maxFileCount = count
return nil
}
}
// TODO TUN-3472: Support file writer and log rotation
// DefaultFile configures the log options will the defaults
func DefaultFile(directoryPath string) Option {
return func(c *Options) error {
size, err := units.ParseBase2Bytes("1MB")
if err != nil {
return err
}
c.logFileDirectory = directoryPath
c.maxFileSize = size
c.maxFileCount = 5
return nil
}
}
// SupportedFileLevels sets the supported logging levels for the log file
func SupportedFileLevels(supported []Level) Option {
return func(c *Options) error {
c.supportedFileLevels = supported
return nil
}
}
// SupportedTerminalevels sets the supported logging levels for the terminal output
func SupportedTerminalevels(supported []Level) Option {
return func(c *Options) error {
c.supportedTerminalLevels = supported
return nil
}
}
// LogLevelString sets the supported logging levels from a command line flag
func LogLevelString(level string) Option {
return func(c *Options) error {
supported, err := ParseLevelString(level)
if err != nil {
return err
}
c.supportedFileLevels = supported
c.supportedTerminalLevels = supported
return nil
}
}
// Parse builds the Options struct so the caller knows what actions should be run
func Parse(opts ...Option) (*Options, error) {
options := &Options{}
for _, opt := range opts {
if err := opt(options); err != nil {
return nil, err
}
}
return options, nil
}
// New setups a new logger based on the options.
// The default behavior is to write to standard out
func New(opts ...Option) (*OutputWriter, error) {
options, err := Parse(opts...)
multi := zerolog.MultiLevelWriter(writers...)
level, err := zerolog.ParseLevel(loggerConfig.MinLevel)
if err != nil {
return nil, err
failLog := fallbacklog.With().Logger()
fallbacklog.Error().Msgf("Falling back to a default logger due to logger setup failure: %s", err)
return &failLog
}
log := zerolog.New(multi).With().Timestamp().Logger().Level(level)
l := NewOutputWriter(SharedWriteManager)
if options.logFileDirectory != "" {
l.Add(NewFileRollingWriter(SanitizeLogPath(options.logFileDirectory),
"cloudflared",
int64(options.maxFileSize),
options.maxFileCount),
NewDefaultFormatter(time.RFC3339Nano), options.supportedFileLevels...)
}
if !options.terminalOutputDisabled {
terminalFormatter := NewTerminalFormatter(time.RFC3339)
if len(options.supportedTerminalLevels) == 0 {
l.Add(os.Stderr, terminalFormatter, InfoLevel, ErrorLevel, FatalLevel)
} else {
l.Add(os.Stderr, terminalFormatter, options.supportedTerminalLevels...)
}
}
return l, nil
return &log
}
func NewInHouse(loggerConfig *Config) (*OutputWriter, error) {
var loggerOpts []Option
var logPath string
if loggerConfig.FileConfig != nil {
logPath = loggerConfig.FileConfig.Filepath
}
if logPath == "" && loggerConfig.RollingConfig != nil {
logPath = loggerConfig.RollingConfig.Directory
}
if logPath != "" {
loggerOpts = append(loggerOpts, DefaultFile(logPath))
}
loggerOpts = append(loggerOpts, LogLevelString(loggerConfig.MinLevel))
if loggerConfig.ConsoleConfig == nil {
disableOption := DisableTerminal(true)
loggerOpts = append(loggerOpts, disableOption)
}
l, err := New(loggerOpts...)
if err != nil {
return nil, err
}
return l, nil
}
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) {
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal)
}
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) {
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal)
}
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) {
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal)
}
@@ -188,37 +64,26 @@ func createFromContext(
logLevelFlagName,
logDirectoryFlagName string,
disableTerminal bool,
) (*OutputWriter, error) {
) *zerolog.Logger {
logLevel := c.String(logLevelFlagName)
logFile := c.String(LogFileFlag)
logDirectory := c.String(logDirectoryFlagName)
loggerConfig := CreateConfig(logLevel, disableTerminal, logDirectory, logFile)
loggerConfig := CreateConfig(
logLevel,
disableTerminal,
logDirectory,
defaultConfig.RollingConfig.Filename,
logFile,
)
return NewInHouse(loggerConfig)
return newZerolog(loggerConfig)
}
// ParseLevelString returns the expected log levels based on the cmd flag
func ParseLevelString(lvl string) ([]Level, error) {
switch strings.ToLower(lvl) {
case "fatal":
return []Level{FatalLevel}, nil
case "error":
return []Level{FatalLevel, ErrorLevel}, nil
case "info", "warn":
return []Level{FatalLevel, ErrorLevel, InfoLevel}, nil
case "debug":
return []Level{FatalLevel, ErrorLevel, InfoLevel, DebugLevel}, nil
func Create(loggerConfig *Config) *zerolog.Logger {
if loggerConfig == nil {
loggerConfig = &defaultConfig
}
return []Level{}, fmt.Errorf("not a valid log level: %q", lvl)
}
// SanitizeLogPath checks that the logger log path
func SanitizeLogPath(path string) string {
newPath := strings.TrimSpace(path)
// make sure it has a log file extension and is not a directory
if filepath.Ext(newPath) != ".log" && !(isDirectory(newPath) || strings.HasSuffix(newPath, "/")) {
newPath = newPath + ".log"
}
return newPath
return newZerolog(loggerConfig)
}

View File

@@ -1,46 +0,0 @@
package logger
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestLogLevelParse(t *testing.T) {
lvls, err := ParseLevelString("fatal")
assert.NoError(t, err)
assert.Equal(t, []Level{FatalLevel}, lvls)
lvls, err = ParseLevelString("error")
assert.NoError(t, err)
assert.Equal(t, []Level{FatalLevel, ErrorLevel}, lvls)
lvls, err = ParseLevelString("info")
assert.NoError(t, err)
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls)
lvls, err = ParseLevelString("info")
assert.NoError(t, err)
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls)
lvls, err = ParseLevelString("warn")
assert.NoError(t, err)
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls)
lvls, err = ParseLevelString("debug")
assert.NoError(t, err)
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel, DebugLevel}, lvls)
_, err = ParseLevelString("blah")
assert.Error(t, err)
_, err = ParseLevelString("")
assert.Error(t, err)
}
func TestPathSanitizer(t *testing.T) {
assert.Equal(t, "somebad/path/log.bat.log", SanitizeLogPath("\t somebad/path/log.bat\n\n"))
assert.Equal(t, "proper/path/cloudflared.log", SanitizeLogPath("proper/path/cloudflared.log"))
assert.Equal(t, "proper/path/", SanitizeLogPath("proper/path/"))
assert.Equal(t, "proper/path/cloudflared.log", SanitizeLogPath("\tproper/path/cloudflared\n\n"))
}

View File

@@ -1,125 +0,0 @@
package logger
import (
"fmt"
"os"
"path/filepath"
)
// FileRollingWriter maintains a set of log files numbered in order
// to keep a subset of log data to ensure it doesn't grow pass defined limits
type FileRollingWriter struct {
baseFileName string
directory string
maxFileSize int64
maxFileCount uint
fileHandle *os.File
}
// NewFileRollingWriter creates a new rolling file writer.
// directory is the working directory for the files
// baseFileName is the log file name. This writer appends .log to the name for the file name
// maxFileSize is the size in bytes of how large each file can be. Not a hard limit, general limit based after each write
// maxFileCount is the number of rolled files to keep.
func NewFileRollingWriter(directory, baseFileName string, maxFileSize int64, maxFileCount uint) *FileRollingWriter {
return &FileRollingWriter{
directory: directory,
baseFileName: baseFileName,
maxFileSize: maxFileSize,
maxFileCount: maxFileCount,
}
}
// Write is an implementation of io.writer the rolls the file once it reaches its max size
// It is expected the caller to Write is doing so in a thread safe manner (as WriteManager does).
func (w *FileRollingWriter) Write(p []byte) (n int, err error) {
logFile, isSingleFile := buildPath(w.directory, w.baseFileName)
if w.fileHandle == nil {
h, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664)
if err != nil {
return 0, err
}
w.fileHandle = h
}
// get size for rolling check
info, err := w.fileHandle.Stat()
if err != nil {
// failed to stat the file. Close the file handle and attempt to open a new handle on the next write
w.Close()
w.fileHandle = nil
return 0, err
}
// write to the file
written, err := w.fileHandle.Write(p)
// check if the file needs to be rolled
if err == nil && info.Size()+int64(written) > w.maxFileSize && !isSingleFile {
// close the file handle than do the renaming. A new one will be opened on the next write
w.Close()
w.rename(logFile, 1)
}
return written, err
}
// Close closes the file handle if it is open
func (w *FileRollingWriter) Close() {
if w.fileHandle != nil {
w.fileHandle.Close()
w.fileHandle = nil
}
}
// rename is how the files are rolled. It works recursively to move the base log file to the rolled ones
// e.g. cloudflared.log -> cloudflared-1.log,
// but if cloudflared-1.log already exists, it is renamed to cloudflared-2.log,
// then the other files move in to their postion
func (w *FileRollingWriter) rename(sourcePath string, index uint) {
destinationPath, isSingleFile := buildPath(w.directory, fmt.Sprintf("%s-%d", w.baseFileName, index))
if isSingleFile {
return //don't need to rename anything, it is a single file
}
// rolled to the max amount of files allowed on disk
if index >= w.maxFileCount {
os.Remove(destinationPath)
}
// if the rolled path already exist, rename it to cloudflared-2.log, then do this one.
// recursive call since the oldest one needs to be renamed, before the newer ones can be moved
if exists(destinationPath) {
w.rename(destinationPath, index+1)
}
os.Rename(sourcePath, destinationPath)
}
// return the path to the log file and if it is a single file or not.
// true means a single file. false means a rolled file
func buildPath(directory, fileName string) (string, bool) {
if !isDirectory(directory) { // not a directory, so try and treat it as a single file for backwards compatibility sake
return directory, true
}
return filepath.Join(directory, fileName+".log"), false
}
func exists(filePath string) bool {
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return false
}
return true
}
func isDirectory(path string) bool {
if path == "" {
return true
}
fileInfo, err := os.Stat(path)
if err != nil {
return false
}
return fileInfo.IsDir()
}

View File

@@ -1,108 +0,0 @@
package logger
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFileWrite(t *testing.T) {
fileName := "test_file"
fileLog := fileName + ".log"
testData := []byte(string("hello Dalton, how are you doing?"))
defer func() {
os.Remove(fileLog)
}()
w := NewFileRollingWriter("", fileName, 1000, 2)
defer w.Close()
l, err := w.Write(testData)
assert.NoError(t, err)
assert.Equal(t, l, len(testData), "expected write length and data length to match")
d, err := ioutil.ReadFile(fileLog)
assert.FileExists(t, fileLog, "file doesn't exist at expected path")
assert.Equal(t, d, testData, "expected data in file to match test data")
}
func TestRolling(t *testing.T) {
dirName := "testdir"
err := os.Mkdir(dirName, 0755)
assert.NoError(t, err)
fileName := "test_file"
firstFile := filepath.Join(dirName, fileName+".log")
secondFile := filepath.Join(dirName, fileName+"-1.log")
thirdFile := filepath.Join(dirName, fileName+"-2.log")
defer func() {
os.RemoveAll(dirName)
os.Remove(firstFile)
os.Remove(secondFile)
os.Remove(thirdFile)
}()
w := NewFileRollingWriter(dirName, fileName, 1000, 2)
defer w.Close()
for i := 99; i >= 1; i-- {
testData := []byte(fmt.Sprintf("%d bottles of beer on the wall...", i))
w.Write(testData)
}
assert.FileExists(t, firstFile, "first file doesn't exist as expected")
assert.FileExists(t, secondFile, "second file doesn't exist as expected")
assert.FileExists(t, thirdFile, "third file doesn't exist as expected")
assert.False(t, exists(filepath.Join(dirName, fileName+"-3.log")), "limited to two files and there is more")
}
func TestSingleFile(t *testing.T) {
fileName := "test_file"
testData := []byte(string("hello Dalton, how are you doing?"))
defer func() {
os.Remove(fileName)
}()
w := NewFileRollingWriter(fileName, fileName, 1000, 2)
defer w.Close()
l, err := w.Write(testData)
assert.NoError(t, err)
assert.Equal(t, l, len(testData), "expected write length and data length to match")
d, err := ioutil.ReadFile(fileName)
assert.FileExists(t, fileName, "file doesn't exist at expected path")
assert.Equal(t, d, testData, "expected data in file to match test data")
}
func TestSingleFileInDirectory(t *testing.T) {
dirName := "testdir"
err := os.Mkdir(dirName, 0755)
assert.NoError(t, err)
fileName := "test_file"
fullPath := filepath.Join(dirName, fileName+".log")
testData := []byte(string("hello Dalton, how are you doing?"))
defer func() {
os.Remove(fullPath)
os.RemoveAll(dirName)
}()
w := NewFileRollingWriter(fullPath, fileName, 1000, 2)
defer w.Close()
l, err := w.Write(testData)
assert.NoError(t, err)
assert.Equal(t, l, len(testData), "expected write length and data length to match")
d, err := ioutil.ReadFile(fullPath)
assert.FileExists(t, fullPath, "file doesn't exist at expected path")
assert.Equal(t, d, testData, "expected data in file to match test data")
}

View File

@@ -1,138 +0,0 @@
package logger
import (
"fmt"
"runtime"
"time"
"github.com/acmacalister/skittles"
)
// Level of logging, lower number means more verbose logging, higher more terse
type Level int
const (
// DebugLevel is for messages that are intended for purposes debugging only
DebugLevel Level = iota
// InfoLevel is for standard log messages
InfoLevel
// ErrorLevel is for error message to indicate something has gone wrong
ErrorLevel
// FatalLevel is for error message that log and kill the program with an os.exit(1)
FatalLevel
)
// Formatter is the base interface for formatting logging messages before writing them out
type Formatter interface {
Timestamp(Level, time.Time) string // format the timestamp string
Content(Level, string) string // format content string (color for terminal, etc)
}
// DefaultFormatter writes a simple structure timestamp and the message per log line
type DefaultFormatter struct {
format string
}
// NewDefaultFormatter creates the standard log formatter
// format is the time format to use for timestamp formatting
func NewDefaultFormatter(format string) Formatter {
return &DefaultFormatter{
format: format,
}
}
// Timestamp formats a log line timestamp with a brackets around them
func (f *DefaultFormatter) Timestamp(l Level, d time.Time) string {
if f.format == "" {
return ""
}
return fmt.Sprintf("[%s]: ", d.Format(f.format))
}
// Content just writes the log line straight to the sources
func (f *DefaultFormatter) Content(l Level, c string) string {
return c
}
// TerminalFormatter is setup for colored output
type TerminalFormatter struct {
format string
supportsColor bool
}
// UIFormatter is used for streaming logs to UI
type UIFormatter struct {
format string
supportsColor bool
}
// NewTerminalFormatter creates a Terminal formatter for colored output
// format is the time format to use for timestamp formatting
func NewTerminalFormatter(format string) Formatter {
supportsColor := (runtime.GOOS != "windows")
return &TerminalFormatter{
format: format,
supportsColor: supportsColor,
}
}
func NewUIFormatter(format string) Formatter {
supportsColor := (runtime.GOOS != "windows")
return &UIFormatter{
format: format,
supportsColor: supportsColor,
}
}
// Timestamp uses formatting that is tview-specific for UI
func (f *UIFormatter) Timestamp(l Level, d time.Time) string {
t := ""
dateStr := "[" + d.Format(f.format) + "] "
switch l {
case InfoLevel:
t = "[#00ffff]INFO[white]"
case ErrorLevel:
t = "[red]ERROR[white]"
case DebugLevel:
t = "[yellow]DEBUG[white]"
case FatalLevel:
t = "[red]FATAL[white]"
}
return t + dateStr
}
func (f *UIFormatter) Content(l Level, c string) string {
return c
}
// Timestamp returns the log level with a matching color to the log type
func (f *TerminalFormatter) Timestamp(l Level, d time.Time) string {
t := ""
dateStr := "[" + d.Format(f.format) + "] "
switch l {
case InfoLevel:
t = f.output("INFO", skittles.Cyan)
case ErrorLevel:
t = f.output("ERROR", skittles.Red)
case DebugLevel:
t = f.output("DEBUG", skittles.Yellow)
case FatalLevel:
t = f.output("FATAL", skittles.Red)
}
return t + dateStr
}
// Content just writes the log line straight to the sources
func (f *TerminalFormatter) Content(l Level, c string) string {
return c
}
func (f *TerminalFormatter) output(msg string, colorFunc func(interface{}) string) string {
if f.supportsColor {
return colorFunc(msg)
}
return msg
}

View File

@@ -1,59 +0,0 @@
package logger
import "sync"
// SharedWriteManager is a package level variable to allows multiple loggers to use the same write manager.
// This is useful when multiple loggers will write to the same file to ensure they don't clobber each other.
var SharedWriteManager = NewWriteManager()
type writeData struct {
target LogOutput
data []byte
}
// WriteManager is a logging service that handles managing multiple writing streams
type WriteManager struct {
shutdown chan struct{}
writeChan chan writeData
writers map[string]Service
wg sync.WaitGroup
}
// NewWriteManager creates a write manager that implements OutputManager
func NewWriteManager() OutputManager {
m := &WriteManager{
shutdown: make(chan struct{}),
writeChan: make(chan writeData, 1000),
}
go m.run()
return m
}
// Append adds a message to the writer runloop
func (m *WriteManager) Append(data []byte, target LogOutput) {
m.wg.Add(1)
m.writeChan <- writeData{data: data, target: target}
}
// Shutdown stops the sync manager service
func (m *WriteManager) Shutdown() {
m.wg.Wait()
close(m.shutdown)
close(m.writeChan)
}
// run is the main runloop that schedules log messages
func (m *WriteManager) run() {
for {
select {
case event, ok := <-m.writeChan:
if ok {
event.target.WriteLogLine(event.data)
m.wg.Done()
}
case <-m.shutdown:
return
}
}
}

View File

@@ -1,24 +0,0 @@
package logger
import (
"testing"
"github.com/stretchr/testify/assert"
)
type outputFunc func(b []byte)
func (f outputFunc) WriteLogLine(data []byte) {
f(data)
}
func TestWriteManger(t *testing.T) {
testData := []byte(string("hello Austin, how are you doing?"))
waitChan := make(chan []byte)
m := NewWriteManager()
m.Append(testData, outputFunc(func(b []byte) {
waitChan <- b
}))
resp := <-waitChan
assert.Equal(t, testData, resp)
}

View File

@@ -1,18 +0,0 @@
package logger
// MockWriteManager does nothing and is provided for testing purposes
type MockWriteManager struct {
}
// NewMockWriteManager creates an OutputManager that does nothing for testing purposes
func NewMockWriteManager() OutputManager {
return &MockWriteManager{}
}
// Append is a mock stub
func (m *MockWriteManager) Append(data []byte, target LogOutput) {
}
// Shutdown is a mock stub
func (m *MockWriteManager) Shutdown() {
}

View File

@@ -1,157 +0,0 @@
package logger
import (
"fmt"
"io"
"os"
"time"
)
// provided for testing
var osExit = os.Exit
type LogOutput interface {
WriteLogLine([]byte)
}
// OutputManager is used to sync data of Output
type OutputManager interface {
Append([]byte, LogOutput)
Shutdown()
}
// Service is the logging service that is either a group or single log writer
type Service interface {
Error(message string)
Info(message string)
Debug(message string)
Fatal(message string)
Errorf(format string, args ...interface{})
Infof(format string, args ...interface{})
Debugf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Add(writer io.Writer, formatter Formatter, levels ...Level)
}
type sourceGroup struct {
writer io.Writer
formatter Formatter
levelsSupported []Level
}
func (s *sourceGroup) WriteLogLine(data []byte) {
_, _ = s.writer.Write(data)
}
func (s *sourceGroup) supportsLevel(l Level) bool {
for _, level := range s.levelsSupported {
if l == level {
return true
}
}
return false
}
// OutputWriter is the standard logging implementation
type OutputWriter struct {
groups []*sourceGroup
syncWriter OutputManager
minLevel Level
}
// NewOutputWriter creates a new logger
func NewOutputWriter(syncWriter OutputManager) *OutputWriter {
return &OutputWriter{
syncWriter: syncWriter,
groups: nil,
minLevel: FatalLevel,
}
}
// Add a writer and formatter to output to
func (s *OutputWriter) Add(writer io.Writer, formatter Formatter, levels ...Level) {
s.groups = append(s.groups, &sourceGroup{writer: writer, formatter: formatter, levelsSupported: levels})
// track most verbose (lowest) level we need to output
for _, level := range levels {
if level < s.minLevel {
s.minLevel = level
}
}
}
// Error writes an error to the logging sources
func (s *OutputWriter) Error(message string) {
if s.minLevel <= ErrorLevel {
s.output(ErrorLevel, message)
}
}
// Info writes an info string to the logging sources
func (s *OutputWriter) Info(message string) {
if s.minLevel <= InfoLevel {
s.output(InfoLevel, message)
}
}
// Debug writes a debug string to the logging sources
func (s *OutputWriter) Debug(message string) {
if s.minLevel <= DebugLevel {
s.output(DebugLevel, message)
}
}
// Fatal writes a error string to the logging sources and runs does an os.exit()
func (s *OutputWriter) Fatal(message string) {
s.output(FatalLevel, message)
s.syncWriter.Shutdown() // waits for the pending logging to finish
osExit(1)
}
// Errorf writes a formatted error to the logging sources
func (s *OutputWriter) Errorf(format string, args ...interface{}) {
if s.minLevel <= ErrorLevel {
s.output(ErrorLevel, fmt.Sprintf(format, args...))
}
}
// Infof writes a formatted info statement to the logging sources
func (s *OutputWriter) Infof(format string, args ...interface{}) {
if s.minLevel <= InfoLevel {
s.output(InfoLevel, fmt.Sprintf(format, args...))
}
}
// Debugf writes a formatted debug statement to the logging sources
func (s *OutputWriter) Debugf(format string, args ...interface{}) {
if s.minLevel <= DebugLevel {
s.output(DebugLevel, fmt.Sprintf(format, args...))
}
}
// Fatalf writes a writes a formatted error statement and runs does an os.exit()
func (s *OutputWriter) Fatalf(format string, args ...interface{}) {
s.output(FatalLevel, fmt.Sprintf(format, args...))
s.syncWriter.Shutdown() // waits for the pending logging to finish
osExit(1)
}
// output does the actual write to the sync manager
func (s *OutputWriter) output(l Level, content string) {
now := time.Now()
for _, group := range s.groups {
if group.supportsLevel(l) {
logLine := fmt.Sprintf("%s%s\n", group.formatter.Timestamp(l, now),
group.formatter.Content(l, content))
s.syncWriter.Append([]byte(logLine), group)
}
}
}
// Write implements io.Writer to support SetOutput of the log package
func (s *OutputWriter) Write(p []byte) (n int, err error) {
s.Info(string(p))
return len(p), nil
}

View File

@@ -1,106 +0,0 @@
package logger
import (
"bufio"
"bytes"
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestLogLevel(t *testing.T) {
timeFormat := "2006-01-02"
f := NewDefaultFormatter(timeFormat)
m := NewWriteManager()
var testBuffer bytes.Buffer
logger := NewOutputWriter(m)
logger.Add(&testBuffer, f, InfoLevel, DebugLevel)
testTime := f.Timestamp(InfoLevel, time.Now())
testInfo := "hello Dalton, how are you doing?"
logger.Info(testInfo)
tesErr := "hello Austin, how did it break today?"
logger.Error(tesErr)
testDebug := "hello Bill, who are you?"
logger.Debug(testDebug)
m.Shutdown()
lines := strings.Split(testBuffer.String(), "\n")
assert.Len(t, lines, 3, "only expected two strings in the buffer")
infoLine := lines[0]
debugLine := lines[1]
compareInfo := fmt.Sprintf("%s%s", testTime, testInfo)
assert.Equal(t, compareInfo, infoLine, "expect the strings to match")
compareDebug := fmt.Sprintf("%s%s", testTime, testDebug)
assert.Equal(t, compareDebug, debugLine, "expect the strings to match")
}
func TestOutputWrite(t *testing.T) {
timeFormat := "2006-01-02"
f := NewDefaultFormatter(timeFormat)
m := NewWriteManager()
var testBuffer bytes.Buffer
logger := NewOutputWriter(m)
logger.Add(&testBuffer, f, InfoLevel)
logger.Debugf("debug message not logged here")
testData := "hello Bob Bork, how are you doing?"
logger.Info(testData)
testTime := f.Timestamp(InfoLevel, time.Now())
m.Shutdown()
scanner := bufio.NewScanner(&testBuffer)
scanner.Scan()
line := scanner.Text()
assert.NoError(t, scanner.Err())
compareLine := fmt.Sprintf("%s%s", testTime, testData)
assert.Equal(t, compareLine, line, "expect the strings to match")
}
func TestFatalWrite(t *testing.T) {
timeFormat := "2006-01-02"
f := NewDefaultFormatter(timeFormat)
m := NewWriteManager()
var testBuffer bytes.Buffer
logger := NewOutputWriter(m)
logger.Add(&testBuffer, f, FatalLevel)
oldOsExit := osExit
defer func() { osExit = oldOsExit }()
var got int
myExit := func(code int) {
got = code
}
osExit = myExit
testData := "so long y'all"
logger.Fatal(testData)
testTime := f.Timestamp(FatalLevel, time.Now())
scanner := bufio.NewScanner(&testBuffer)
scanner.Scan()
line := scanner.Text()
assert.NoError(t, scanner.Err())
compareLine := fmt.Sprintf("%s%s", testTime, testData)
assert.Equal(t, compareLine, line, "expect the strings to match")
assert.Equal(t, got, 1, "exit code should be one for a fatal log")
}