TUN-7066: Bump coredns to v1.10.0

closes #857
This commit is contained in:
Devin Carr
2023-01-10 16:12:59 -08:00
parent 513855df5c
commit 207f4e2c8d
288 changed files with 10199 additions and 10941 deletions

View File

@@ -49,12 +49,23 @@ func newOverlapZone() *zoneOverlap {
// registerAndCheck adds a new zoneAddr for validation, it returns information about existing or overlapping with already registered
// we consider that an unbound address is overlapping all bound addresses for same zone, same port
func (zo *zoneOverlap) registerAndCheck(z zoneAddr) (existingZone *zoneAddr, overlappingZone *zoneAddr) {
existingZone, overlappingZone = zo.check(z)
if existingZone != nil || overlappingZone != nil {
return existingZone, overlappingZone
}
// there is no overlap, keep the current zoneAddr for future checks
zo.registeredAddr[z] = z
zo.unboundOverlap[z.unbound()] = z
return nil, nil
}
// check validates a zoneAddr for overlap without registering it
func (zo *zoneOverlap) check(z zoneAddr) (existingZone *zoneAddr, overlappingZone *zoneAddr) {
if exist, ok := zo.registeredAddr[z]; ok {
// exact same zone already registered
return &exist, nil
}
uz := zoneAddr{Zone: z.Zone, Address: "", Port: z.Port, Transport: z.Transport}
uz := z.unbound()
if already, ok := zo.unboundOverlap[uz]; ok {
if z.Address == "" {
// current is not bound to an address, but there is already another zone with a bind address registered
@@ -65,8 +76,11 @@ func (zo *zoneOverlap) registerAndCheck(z zoneAddr) (existingZone *zoneAddr, ove
return nil, &uz
}
}
// there is no overlap, keep the current zoneAddr for future checks
zo.registeredAddr[z] = z
zo.unboundOverlap[uz] = z
// there is no overlap
return nil, nil
}
// unbound returns an unbound version of the zoneAddr
func (z zoneAddr) unbound() zoneAddr {
return zoneAddr{Zone: z.Zone, Address: "", Port: z.Port, Transport: z.Transport}
}

View File

@@ -1,12 +1,14 @@
package dnsserver
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"github.com/coredns/caddy"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/request"
)
// Config configuration for a single server.
@@ -28,6 +30,9 @@ type Config struct {
// Debug controls the panic/recover mechanism that is enabled by default.
Debug bool
// Stacktrace controls including stacktrace as part of log from recover mechanism, it is disabled by default.
Stacktrace bool
// The transport we implement, normally just "dns" over TCP/UDP, but could be
// DNS-over-TLS or DNS-over-gRPC.
Transport string
@@ -37,9 +42,20 @@ type Config struct {
// may depend on it.
HTTPRequestValidateFunc func(*http.Request) bool
// FilterFuncs is used to further filter access
// to this handler. E.g. to limit access to a reverse zone
// on a non-octet boundary, i.e. /17
FilterFuncs []FilterFunc
// ViewName is the name of the Viewer PLugin defined in the Config
ViewName string
// TLSConfig when listening for encrypted connections (gRPC, DNS-over-TLS).
TLSConfig *tls.Config
// TSIG secrets, [name]key.
TsigSecret map[string]string
// Plugin stack.
Plugin []plugin.Plugin
@@ -54,8 +70,14 @@ type Config struct {
// firstConfigInBlock is used to reference the first config in a server block, for the
// purpose of sharing single instance of each plugin among all zones in a server block.
firstConfigInBlock *Config
// metaCollector references the first MetadataCollector plugin, if one exists
metaCollector MetadataCollector
}
// FilterFunc is a function that filters requests from the Config
type FilterFunc func(context.Context, *request.Request) bool
// keyForConfig builds a key for identifying the configs during setup time
func keyForConfig(blocIndex int, blocKeyIndex int) string {
return fmt.Sprintf("%d:%d", blocIndex, blocKeyIndex)

View File

@@ -2,17 +2,31 @@ package dnsserver
import (
"fmt"
"regexp"
"sort"
"github.com/coredns/coredns/plugin/pkg/dnsutil"
)
// checkZoneSyntax() checks whether the given string match 1035 Preferred Syntax or not.
// The root zone, and all reverse zones always return true even though they technically don't meet 1035 Preferred Syntax
func checkZoneSyntax(zone string) bool {
if zone == "." || dnsutil.IsReverse(zone) != 0 {
return true
}
regex1035PreferredSyntax, _ := regexp.MatchString(`^(([A-Za-z]([A-Za-z0-9-]*[A-Za-z0-9])?)\.)+$`, zone)
return regex1035PreferredSyntax
}
// startUpZones creates the text that we show when starting up:
// grpc://example.com.:1055
// example.com.:1053 on 127.0.0.1
func startUpZones(protocol, addr string, zones map[string]*Config) string {
func startUpZones(protocol, addr string, zones map[string][]*Config) string {
s := ""
keys := make([]string, len(zones))
i := 0
for k := range zones {
keys[i] = k
i++
@@ -20,6 +34,9 @@ func startUpZones(protocol, addr string, zones map[string]*Config) string {
sort.Strings(keys)
for _, zone := range keys {
if !checkZoneSyntax(zone) {
s += fmt.Sprintf("Warning: Domain %q does not follow RFC1035 preferred syntax\n", zone)
}
// split addr into protocol, IP and Port
_, ip, port, err := SplitProtocolHostPort(addr)

View File

@@ -138,14 +138,6 @@ func (h *dnsContext) InspectServerBlocks(sourceFile string, serverBlocks []caddy
// MakeServers uses the newly-created siteConfigs to create and return a list of server instances.
func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
// Now that all Keys and Directives are parsed and initialized
// lets verify that there is no overlap on the zones and addresses to listen for
errValid := h.validateZonesAndListeningAddresses()
if errValid != nil {
return nil, errValid
}
// Copy the Plugin, ListenHosts and Debug from first config in the block
// to all other config in the same block . Doing this results in zones
// sharing the same plugin instances and settings as other zones in
@@ -154,7 +146,9 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
c.Plugin = c.firstConfigInBlock.Plugin
c.ListenHosts = c.firstConfigInBlock.ListenHosts
c.Debug = c.firstConfigInBlock.Debug
c.Stacktrace = c.firstConfigInBlock.Stacktrace
c.TLSConfig = c.firstConfigInBlock.TLSConfig
c.TsigSecret = c.firstConfigInBlock.TsigSecret
}
// we must map (group) each config to a bind address
@@ -195,7 +189,27 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
}
servers = append(servers, s)
}
}
// For each server config, check for View Filter plugins
for _, c := range h.configs {
// Add filters in the plugin.cfg order for consistent filter func evaluation order.
for _, d := range Directives {
if vf, ok := c.registry[d].(Viewer); ok {
if c.ViewName != "" {
return nil, fmt.Errorf("multiple views defined in server block")
}
c.ViewName = vf.ViewName()
c.FilterFuncs = append(c.FilterFuncs, vf.Filter)
}
}
}
// Verify that there is no overlap on the zones and listen addresses
// for unfiltered server configs
errValid := h.validateZonesAndListeningAddresses()
if errValid != nil {
return nil, errValid
}
return servers, nil
@@ -253,18 +267,24 @@ func (h *dnsContext) validateZonesAndListeningAddresses() error {
for _, h := range conf.ListenHosts {
// Validate the overlapping of ZoneAddr
akey := zoneAddr{Transport: conf.Transport, Zone: conf.Zone, Address: h, Port: conf.Port}
existZone, overlapZone := checker.registerAndCheck(akey)
var existZone, overlapZone *zoneAddr
if len(conf.FilterFuncs) > 0 {
// This config has filters. Check for overlap with other (unfiltered) configs.
existZone, overlapZone = checker.check(akey)
} else {
// This config has no filters. Check for overlap with other (unfiltered) configs,
// and register the zone to prevent subsequent zones from overlapping with it.
existZone, overlapZone = checker.registerAndCheck(akey)
}
if existZone != nil {
return fmt.Errorf("cannot serve %s - it is already defined", akey.String())
}
if overlapZone != nil {
return fmt.Errorf("cannot serve %s - zone overlap listener capacity with %v", akey.String(), overlapZone.String())
}
}
}
return nil
}
// groupSiteConfigsByListenAddr groups site configs by their listen

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"net"
"runtime"
"runtime/debug"
"strings"
"sync"
"time"
@@ -36,22 +37,30 @@ type Server struct {
server [2]*dns.Server // 0 is a net.Listener, 1 is a net.PacketConn (a *UDPConn) in our case.
m sync.Mutex // protects the servers
zones map[string]*Config // zones keyed by their address
dnsWg sync.WaitGroup // used to wait on outstanding connections
graceTimeout time.Duration // the maximum duration of a graceful shutdown
trace trace.Trace // the trace plugin for the server
debug bool // disable recover()
classChaos bool // allow non-INET class queries
zones map[string][]*Config // zones keyed by their address
dnsWg sync.WaitGroup // used to wait on outstanding connections
graceTimeout time.Duration // the maximum duration of a graceful shutdown
trace trace.Trace // the trace plugin for the server
debug bool // disable recover()
stacktrace bool // enable stacktrace in recover error log
classChaos bool // allow non-INET class queries
tsigSecret map[string]string
}
// MetadataCollector is a plugin that can retrieve metadata functions from all metadata providing plugins
type MetadataCollector interface {
Collect(context.Context, request.Request) context.Context
}
// NewServer returns a new CoreDNS server and compiles all plugins in to it. By default CH class
// queries are blocked unless queries from enableChaos are loaded.
func NewServer(addr string, group []*Config) (*Server, error) {
s := &Server{
Addr: addr,
zones: make(map[string]*Config),
zones: make(map[string][]*Config),
graceTimeout: 5 * time.Second,
tsigSecret: make(map[string]string),
}
// We have to bound our wg with one increment
@@ -67,8 +76,15 @@ func NewServer(addr string, group []*Config) (*Server, error) {
s.debug = true
log.D.Set()
}
// set the config per zone
s.zones[site.Zone] = site
s.stacktrace = site.Stacktrace
// append the config to the zone's configs
s.zones[site.Zone] = append(s.zones[site.Zone], site)
// copy tsig secrets
for key, secret := range site.TsigSecret {
s.tsigSecret[key] = secret
}
// compile custom plugin for everything
var stack plugin.Handler
@@ -78,6 +94,12 @@ func NewServer(addr string, group []*Config) (*Server, error) {
// register the *handler* also
site.registerHandler(stack)
// If the current plugin is a MetadataCollector, bookmark it for later use. This loop traverses the plugin
// list backwards, so the first MetadataCollector plugin wins.
if mdc, ok := stack.(MetadataCollector); ok {
site.metaCollector = mdc
}
if s.trace == nil && stack.Name() == "trace" {
// we have to stash away the plugin, not the
// Tracer object, because the Tracer won't be initialized yet
@@ -112,7 +134,7 @@ func (s *Server) Serve(l net.Listener) error {
ctx := context.WithValue(context.Background(), Key{}, s)
ctx = context.WithValue(ctx, LoopKey{}, 0)
s.ServeDNS(ctx, w, r)
})}
}), TsigSecret: s.tsigSecret}
s.m.Unlock()
return s.server[tcp].ActivateAndServe()
@@ -126,7 +148,7 @@ func (s *Server) ServePacket(p net.PacketConn) error {
ctx := context.WithValue(context.Background(), Key{}, s)
ctx = context.WithValue(ctx, LoopKey{}, 0)
s.ServeDNS(ctx, w, r)
})}
}), TsigSecret: s.tsigSecret}
s.m.Unlock()
return s.server[udp].ActivateAndServe()
@@ -163,7 +185,6 @@ func (s *Server) ListenPacket() (net.PacketConn, error) {
// immediately.
// This implements Caddy.Stopper interface.
func (s *Server) Stop() (err error) {
if runtime.GOOS != "windows" {
// force connections to close after timeout
done := make(chan struct{})
@@ -213,7 +234,11 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
// In case the user doesn't enable error plugin, we still
// need to make sure that we stay alive up here
if rec := recover(); rec != nil {
log.Errorf("Recovered from panic in server: %q %v", s.Addr, rec)
if s.stacktrace {
log.Errorf("Recovered from panic in server: %q %v\n%s", s.Addr, rec, string(debug.Stack()))
} else {
log.Errorf("Recovered from panic in server: %q %v", s.Addr, rec)
}
vars.Panic.Inc()
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)
}
@@ -241,24 +266,39 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
)
for {
if h, ok := s.zones[q[off:]]; ok {
if h.pluginChain == nil { // zone defined, but has not got any plugins
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)
return
}
if r.Question[0].Qtype != dns.TypeDS {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
errorFunc(s.Addr, w, r, rcode)
if z, ok := s.zones[q[off:]]; ok {
for _, h := range z {
if h.pluginChain == nil { // zone defined, but has not got any plugins
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)
return
}
if h.metaCollector != nil {
// Collect metadata now, so it can be used before we send a request down the plugin chain.
ctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})
}
// If all filter funcs pass, use this config.
if passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {
if h.ViewName != "" {
// if there was a view defined for this Config, set the view name in the context
ctx = context.WithValue(ctx, ViewKey{}, h.ViewName)
}
if r.Question[0].Qtype != dns.TypeDS {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
errorFunc(s.Addr, w, r, rcode)
}
return
}
// The type is DS, keep the handler, but keep on searching as maybe we are serving
// the parent as well and the DS should be routed to it - this will probably *misroute* DS
// queries to a possibly grand parent, but there is no way for us to know at this point
// if there is an actual delegation from grandparent -> parent -> zone.
// In all fairness: direct DS queries should not be needed.
dshandler = h
}
return
}
// The type is DS, keep the handler, but keep on searching as maybe we are serving
// the parent as well and the DS should be routed to it - this will probably *misroute* DS
// queries to a possibly grand parent, but there is no way for us to know at this point
// if there is an actual delegation from grandparent -> parent -> zone.
// In all fairness: direct DS queries should not be needed.
dshandler = h
}
off, end = dns.NextLabel(q, off)
if end {
@@ -276,18 +316,46 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := s.zones["."]; ok && h.pluginChain != nil {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
errorFunc(s.Addr, w, r, rcode)
if z, ok := s.zones["."]; ok {
for _, h := range z {
if h.pluginChain == nil {
continue
}
if h.metaCollector != nil {
// Collect metadata now, so it can be used before we send a request down the plugin chain.
ctx = h.metaCollector.Collect(ctx, request.Request{Req: r, W: w})
}
// If all filter funcs pass, use this config.
if passAllFilterFuncs(ctx, h.FilterFuncs, &request.Request{Req: r, W: w}) {
if h.ViewName != "" {
// if there was a view defined for this Config, set the view name in the context
ctx = context.WithValue(ctx, ViewKey{}, h.ViewName)
}
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
errorFunc(s.Addr, w, r, rcode)
}
return
}
}
return
}
// Still here? Error out with REFUSED.
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)
}
// passAllFilterFuncs returns true if all filter funcs evaluate to true for the given request
func passAllFilterFuncs(ctx context.Context, filterFuncs []FilterFunc, req *request.Request) bool {
for _, ff := range filterFuncs {
if !ff(ctx, req) {
return false
}
}
return true
}
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming Quiet is false.
func (s *Server) OnStartupComplete() {
@@ -328,7 +396,7 @@ func errorAndMetricsFunc(server string, w dns.ResponseWriter, r *dns.Msg, rc int
answer.SetRcode(r, rc)
state.SizeAndDo(answer)
vars.Report(server, state, vars.Dropped, rcode.ToString(rc), "" /* plugin */, answer.Len(), time.Now())
vars.Report(server, state, vars.Dropped, "", rcode.ToString(rc), "" /* plugin */, answer.Len(), time.Now())
w.WriteMsg(answer)
}
@@ -344,6 +412,9 @@ type (
// LoopKey is the context key to detect server wide loops.
LoopKey struct{}
// ViewKey is the context key for the current view, if defined
ViewKey struct{}
)
// EnableChaos is a map with plugin names for which we should open CH class queries as we block these by default.

View File

@@ -22,6 +22,7 @@ import (
// ServergRPC represents an instance of a DNS-over-gRPC server.
type ServergRPC struct {
*Server
*pb.UnimplementedDnsServiceServer
grpcServer *grpc.Server
listenAddr net.Addr
tlsConfig *tls.Config
@@ -36,9 +37,11 @@ func NewServergRPC(addr string, group []*Config) (*ServergRPC, error) {
// The *tls* plugin must make sure that multiple conflicting
// TLS configuration returns an error: it can only be specified once.
var tlsConfig *tls.Config
for _, conf := range s.zones {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
for _, z := range s.zones {
for _, conf := range z {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
}
}
// http/2 is required when using gRPC. We need to specify it in next protos
// or the upgrade won't happen.
@@ -81,7 +84,6 @@ func (s *ServergRPC) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServergRPC) Listen() (net.Listener, error) {
l, err := reuseport.Listen("tcp", s.Addr[len(transport.GRPC+"://"):])
if err != nil {
return nil, err

View File

@@ -4,14 +4,17 @@ import (
"context"
"crypto/tls"
"fmt"
stdlog "log"
"net"
"net/http"
"strconv"
"time"
"github.com/coredns/caddy"
"github.com/coredns/coredns/plugin/metrics/vars"
"github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/coredns/coredns/plugin/pkg/doh"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/coredns/coredns/plugin/pkg/response"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/coredns/coredns/plugin/pkg/transport"
@@ -26,6 +29,18 @@ type ServerHTTPS struct {
validRequest func(*http.Request) bool
}
// loggerAdapter is a simple adapter around CoreDNS logger made to implement io.Writer in order to log errors from HTTP server
type loggerAdapter struct {
}
func (l *loggerAdapter) Write(p []byte) (n int, err error) {
clog.Debug(string(p))
return len(p), nil
}
// HTTPRequestKey is the context key for the current processed HTTP request (if current processed request was done over DOH)
type HTTPRequestKey struct{}
// NewServerHTTPS returns a new CoreDNS HTTPS server and compiles all plugins in to it.
func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
s, err := NewServer(addr, group)
@@ -35,9 +50,11 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
// The *tls* plugin must make sure that multiple conflicting
// TLS configuration returns an error: it can only be specified once.
var tlsConfig *tls.Config
for _, conf := range s.zones {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
for _, z := range s.zones {
for _, conf := range z {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
}
}
// http/2 is recommended when using DoH. We need to specify it in next protos
@@ -48,8 +65,10 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
// Use a custom request validation func or use the standard DoH path check.
var validator func(*http.Request) bool
for _, conf := range s.zones {
validator = conf.HTTPRequestValidateFunc
for _, z := range s.zones {
for _, conf := range z {
validator = conf.HTTPRequestValidateFunc
}
}
if validator == nil {
validator = func(r *http.Request) bool { return r.URL.Path == doh.Path }
@@ -59,6 +78,7 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
IdleTimeout: 120 * time.Second,
ErrorLog: stdlog.New(&loggerAdapter{}, "", 0),
}
sh := &ServerHTTPS{
Server: s, tlsConfig: tlsConfig, httpsServer: srv, validRequest: validator,
@@ -88,7 +108,6 @@ func (s *ServerHTTPS) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServerHTTPS) Listen() (net.Listener, error) {
l, err := reuseport.Listen("tcp", s.Addr[len(transport.HTTPS+"://"):])
if err != nil {
return nil, err
@@ -125,15 +144,16 @@ func (s *ServerHTTPS) Stop() error {
// ServeHTTP is the handler that gets the HTTP request and converts to the dns format, calls the plugin
// chain, converts it back and write it to the client.
func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if !s.validRequest(r) {
http.Error(w, "", http.StatusNotFound)
s.countResponse(http.StatusNotFound)
return
}
msg, err := doh.RequestToMsg(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
s.countResponse(http.StatusBadRequest)
return
}
@@ -150,6 +170,7 @@ func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// We should expect a packet to be returned that we can send to the client.
ctx := context.WithValue(context.Background(), Key{}, s.Server)
ctx = context.WithValue(ctx, LoopKey{}, 0)
ctx = context.WithValue(ctx, HTTPRequestKey{}, r)
s.ServeDNS(ctx, dw, msg)
// See section 4.2.1 of RFC 8484.
@@ -157,6 +178,7 @@ func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// handler has not provided any response message.
if dw.Msg == nil {
http.Error(w, "No response", http.StatusInternalServerError)
s.countResponse(http.StatusInternalServerError)
return
}
@@ -169,10 +191,15 @@ func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", fmt.Sprintf("max-age=%f", age.Seconds()))
w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
w.WriteHeader(http.StatusOK)
s.countResponse(http.StatusOK)
w.Write(buf)
}
func (s *ServerHTTPS) countResponse(status int) {
vars.HTTPSResponsesCount.WithLabelValues(s.Addr, strconv.Itoa(status)).Inc()
}
// Shutdown stops the server (non gracefully).
func (s *ServerHTTPS) Shutdown() error {
if s.httpsServer != nil {

View File

@@ -28,9 +28,11 @@ func NewServerTLS(addr string, group []*Config) (*ServerTLS, error) {
// The *tls* plugin must make sure that multiple conflicting
// TLS configuration returns an error: it can only be specified once.
var tlsConfig *tls.Config
for _, conf := range s.zones {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
for _, z := range s.zones {
for _, conf := range z {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
}
}
return &ServerTLS{Server: s, tlsConfig: tlsConfig}, nil

View File

@@ -0,0 +1,20 @@
package dnsserver
import (
"context"
"github.com/coredns/coredns/request"
)
// Viewer - If Viewer is implemented by a plugin in a server block, its Filter()
// is added to the server block's filter functions when starting the server. When a running server
// serves a DNS request, it will route the request to the first Config (server block) that passes
// all its filter functions.
type Viewer interface {
// Filter returns true if the server should use the server block in which the implementing plugin resides, and the
// name of the view for metrics logging.
Filter(ctx context.Context, req *request.Request) bool
// ViewName returns the name of the view
ViewName() string
}

View File

@@ -34,6 +34,7 @@ var Directives = []string{
"any",
"chaos",
"loadbalance",
"tsig",
"cache",
"rewrite",
"header",
@@ -59,4 +60,5 @@ var Directives = []string{
"whoami",
"on",
"sign",
"view",
}

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"log"
"os"
"path/filepath"
"runtime"
"strings"
@@ -95,7 +96,7 @@ func confLoader(serverType string) (caddy.Input, error) {
return caddy.CaddyfileFromPipe(os.Stdin, serverType)
}
contents, err := os.ReadFile(conf)
contents, err := os.ReadFile(filepath.Clean(conf))
if err != nil {
return nil, err
}

View File

@@ -2,7 +2,7 @@ package coremain
// Various CoreDNS constants.
const (
CoreVersion = "1.8.7"
CoreVersion = "1.10.0"
coreName = "CoreDNS"
serverType = "dns"
)

View File

@@ -2,11 +2,18 @@
# from: https://github.com/golang/protobuf to make this work.
# The generate dns.pb.go is checked into git, so for normal builds we don't need
# to run this generation step.
# Note: The following has been used when regenerate pb:
# curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.19.4/protoc-3.19.4-linux-x86_64.zip
# go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.27.1
# go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2.0
# export PATH="$PATH:$(go env GOPATH)/bin"
# rm pb/dns.pb.go pb/dns_grpc.pb.go
# make pb
all: dns.pb.go
dns.pb.go: dns.proto
protoc --go_out=plugins=grpc:. dns.proto
protoc --go_out=. --go-grpc_out=. dns.proto
.PHONY: clean
clean:

View File

@@ -1,156 +1,147 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.19.4
// source: dns.proto
package pb
import (
context "context"
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
/* Miek: disabled this manually, because I don't know what the heck */
/*
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
*/
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type DnsPacket struct {
Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
}
func (m *DnsPacket) Reset() { *m = DnsPacket{} }
func (m *DnsPacket) String() string { return proto.CompactTextString(m) }
func (*DnsPacket) ProtoMessage() {}
func (x *DnsPacket) Reset() {
*x = DnsPacket{}
if protoimpl.UnsafeEnabled {
mi := &file_dns_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DnsPacket) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DnsPacket) ProtoMessage() {}
func (x *DnsPacket) ProtoReflect() protoreflect.Message {
mi := &file_dns_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DnsPacket.ProtoReflect.Descriptor instead.
func (*DnsPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_638ff8d8aaf3d8ae, []int{0}
return file_dns_proto_rawDescGZIP(), []int{0}
}
func (m *DnsPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DnsPacket.Unmarshal(m, b)
}
func (m *DnsPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DnsPacket.Marshal(b, m, deterministic)
}
func (m *DnsPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_DnsPacket.Merge(m, src)
}
func (m *DnsPacket) XXX_Size() int {
return xxx_messageInfo_DnsPacket.Size(m)
}
func (m *DnsPacket) XXX_DiscardUnknown() {
xxx_messageInfo_DnsPacket.DiscardUnknown(m)
}
var xxx_messageInfo_DnsPacket proto.InternalMessageInfo
func (m *DnsPacket) GetMsg() []byte {
if m != nil {
return m.Msg
func (x *DnsPacket) GetMsg() []byte {
if x != nil {
return x.Msg
}
return nil
}
func init() {
proto.RegisterType((*DnsPacket)(nil), "coredns.dns.DnsPacket")
var File_dns_proto protoreflect.FileDescriptor
var file_dns_proto_rawDesc = []byte{
0x0a, 0x09, 0x64, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x63, 0x6f, 0x72,
0x65, 0x64, 0x6e, 0x73, 0x2e, 0x64, 0x6e, 0x73, 0x22, 0x1d, 0x0a, 0x09, 0x44, 0x6e, 0x73, 0x50,
0x61, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x32, 0x45, 0x0a, 0x0a, 0x44, 0x6e, 0x73, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x16,
0x2e, 0x63, 0x6f, 0x72, 0x65, 0x64, 0x6e, 0x73, 0x2e, 0x64, 0x6e, 0x73, 0x2e, 0x44, 0x6e, 0x73,
0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x16, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x64, 0x6e, 0x73,
0x2e, 0x64, 0x6e, 0x73, 0x2e, 0x44, 0x6e, 0x73, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x06,
0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
func init() { proto.RegisterFile("dns.proto", fileDescriptor_638ff8d8aaf3d8ae) }
var (
file_dns_proto_rawDescOnce sync.Once
file_dns_proto_rawDescData = file_dns_proto_rawDesc
)
var fileDescriptor_638ff8d8aaf3d8ae = []byte{
// 120 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xc9, 0x2b, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0xce, 0x2f, 0x4a, 0x05, 0x71, 0x53, 0xf2, 0x8a,
0x95, 0x64, 0xb9, 0x38, 0x5d, 0xf2, 0x8a, 0x03, 0x12, 0x93, 0xb3, 0x53, 0x4b, 0x84, 0x04, 0xb8,
0x98, 0x73, 0x8b, 0xd3, 0x25, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x23, 0x57, 0x2e,
0x2e, 0x97, 0xbc, 0xe2, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x73, 0x2e, 0xd6, 0xc0,
0xd2, 0xd4, 0xa2, 0x4a, 0x21, 0x31, 0x3d, 0x24, 0x33, 0xf4, 0xe0, 0x06, 0x48, 0xe1, 0x10, 0x77,
0x62, 0x89, 0x62, 0x2a, 0x48, 0x4a, 0x62, 0x03, 0xdb, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
0xf5, 0xd1, 0x3f, 0x26, 0x8c, 0x00, 0x00, 0x00,
func file_dns_proto_rawDescGZIP() []byte {
file_dns_proto_rawDescOnce.Do(func() {
file_dns_proto_rawDescData = protoimpl.X.CompressGZIP(file_dns_proto_rawDescData)
})
return file_dns_proto_rawDescData
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// DnsServiceClient is the client API for DnsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DnsServiceClient interface {
Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error)
var file_dns_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
var file_dns_proto_goTypes = []interface{}{
(*DnsPacket)(nil), // 0: coredns.dns.DnsPacket
}
var file_dns_proto_depIdxs = []int32{
0, // 0: coredns.dns.DnsService.Query:input_type -> coredns.dns.DnsPacket
0, // 1: coredns.dns.DnsService.Query:output_type -> coredns.dns.DnsPacket
1, // [1:2] is the sub-list for method output_type
0, // [0:1] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
type dnsServiceClient struct {
cc *grpc.ClientConn
}
func NewDnsServiceClient(cc *grpc.ClientConn) DnsServiceClient {
return &dnsServiceClient{cc}
}
func (c *dnsServiceClient) Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error) {
out := new(DnsPacket)
err := c.cc.Invoke(ctx, "/coredns.dns.DnsService/Query", in, out, opts...)
if err != nil {
return nil, err
func init() { file_dns_proto_init() }
func file_dns_proto_init() {
if File_dns_proto != nil {
return
}
return out, nil
}
// DnsServiceServer is the server API for DnsService service.
type DnsServiceServer interface {
Query(context.Context, *DnsPacket) (*DnsPacket, error)
}
func RegisterDnsServiceServer(s *grpc.Server, srv DnsServiceServer) {
s.RegisterService(&_DnsService_serviceDesc, srv)
}
func _DnsService_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DnsPacket)
if err := dec(in); err != nil {
return nil, err
if !protoimpl.UnsafeEnabled {
file_dns_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DnsPacket); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
if interceptor == nil {
return srv.(DnsServiceServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/coredns.dns.DnsService/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DnsServiceServer).Query(ctx, req.(*DnsPacket))
}
return interceptor(ctx, in, info, handler)
}
var _DnsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "coredns.dns.DnsService",
HandlerType: (*DnsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _DnsService_Query_Handler,
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_dns_proto_rawDesc,
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
NumServices: 1,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "dns.proto",
GoTypes: file_dns_proto_goTypes,
DependencyIndexes: file_dns_proto_depIdxs,
MessageInfos: file_dns_proto_msgTypes,
}.Build()
File_dns_proto = out.File
file_dns_proto_rawDesc = nil
file_dns_proto_goTypes = nil
file_dns_proto_depIdxs = nil
}

View File

@@ -1,7 +1,7 @@
syntax = "proto3";
package coredns.dns;
option go_package = "pb";
option go_package = ".;pb";
message DnsPacket {
bytes msg = 1;

105
vendor/github.com/coredns/coredns/pb/dns_grpc.pb.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.19.4
// source: dns.proto
package pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// DnsServiceClient is the client API for DnsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type DnsServiceClient interface {
Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error)
}
type dnsServiceClient struct {
cc grpc.ClientConnInterface
}
func NewDnsServiceClient(cc grpc.ClientConnInterface) DnsServiceClient {
return &dnsServiceClient{cc}
}
func (c *dnsServiceClient) Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error) {
out := new(DnsPacket)
err := c.cc.Invoke(ctx, "/coredns.dns.DnsService/Query", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// DnsServiceServer is the server API for DnsService service.
// All implementations must embed UnimplementedDnsServiceServer
// for forward compatibility
type DnsServiceServer interface {
Query(context.Context, *DnsPacket) (*DnsPacket, error)
mustEmbedUnimplementedDnsServiceServer()
}
// UnimplementedDnsServiceServer must be embedded to have forward compatible implementations.
type UnimplementedDnsServiceServer struct {
}
func (UnimplementedDnsServiceServer) Query(context.Context, *DnsPacket) (*DnsPacket, error) {
return nil, status.Errorf(codes.Unimplemented, "method Query not implemented")
}
func (UnimplementedDnsServiceServer) mustEmbedUnimplementedDnsServiceServer() {}
// UnsafeDnsServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to DnsServiceServer will
// result in compilation errors.
type UnsafeDnsServiceServer interface {
mustEmbedUnimplementedDnsServiceServer()
}
func RegisterDnsServiceServer(s grpc.ServiceRegistrar, srv DnsServiceServer) {
s.RegisterService(&DnsService_ServiceDesc, srv)
}
func _DnsService_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DnsPacket)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DnsServiceServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/coredns.dns.DnsService/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DnsServiceServer).Query(ctx, req.(*DnsPacket))
}
return interceptor(ctx, in, info, handler)
}
// DnsService_ServiceDesc is the grpc.ServiceDesc for DnsService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var DnsService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "coredns.dns.DnsService",
HandlerType: (*DnsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _DnsService_Query_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "dns.proto",
}

View File

@@ -14,22 +14,22 @@ import (
)
// A returns A records from Backend or an error.
func A(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
func A(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, truncated bool, err error) {
services, err := checkForApex(ctx, b, zone, state, opt)
if err != nil {
return nil, err
return nil, false, err
}
dup := make(map[string]struct{})
for _, serv := range services {
what, ip := serv.HostType()
switch what {
case dns.TypeCNAME:
if Name(state.Name()).Matches(dns.Fqdn(serv.Host)) {
// x CNAME x is a direct loop, don't add those
// in etcd/skydns w.x CNAME x is also direct loop due to the "recursive" nature of search results
continue
}
@@ -44,7 +44,7 @@ func A(ctx context.Context, b ServiceBackend, zone string, state request.Request
if dns.IsSubDomain(zone, dns.Fqdn(serv.Host)) {
state1 := state.NewWithQuestion(serv.Host, state.QType())
state1.Zone = zone
nextRecords, err := A(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
nextRecords, tc, err := A(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
@@ -53,6 +53,9 @@ func A(ctx context.Context, b ServiceBackend, zone string, state request.Request
records = append(records, nextRecords...)
}
}
if tc {
truncated = true
}
continue
}
// This means we can not complete the CNAME, try to look else where.
@@ -62,6 +65,9 @@ func A(ctx context.Context, b ServiceBackend, zone string, state request.Request
if e1 != nil {
continue
}
if m1.Truncated {
truncated = true
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
@@ -77,20 +83,19 @@ func A(ctx context.Context, b ServiceBackend, zone string, state request.Request
// nada
}
}
return records, nil
return records, truncated, nil
}
// AAAA returns AAAA records from Backend or an error.
func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, truncated bool, err error) {
services, err := checkForApex(ctx, b, zone, state, opt)
if err != nil {
return nil, err
return nil, false, err
}
dup := make(map[string]struct{})
for _, serv := range services {
what, ip := serv.HostType()
switch what {
@@ -98,6 +103,7 @@ func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Requ
// Try to resolve as CNAME if it's not an IP, but only if we don't create loops.
if Name(state.Name()).Matches(dns.Fqdn(serv.Host)) {
// x CNAME x is a direct loop, don't add those
// in etcd/skydns w.x CNAME x is also direct loop due to the "recursive" nature of search results
continue
}
@@ -112,7 +118,7 @@ func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Requ
if dns.IsSubDomain(zone, dns.Fqdn(serv.Host)) {
state1 := state.NewWithQuestion(serv.Host, state.QType())
state1.Zone = zone
nextRecords, err := AAAA(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
nextRecords, tc, err := AAAA(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
@@ -121,6 +127,9 @@ func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Requ
records = append(records, nextRecords...)
}
}
if tc {
truncated = true
}
continue
}
// This means we can not complete the CNAME, try to look else where.
@@ -129,6 +138,9 @@ func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Requ
if e1 != nil {
continue
}
if m1.Truncated {
truncated = true
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
@@ -145,7 +157,7 @@ func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Requ
}
}
}
return records, nil
return records, truncated, nil
}
// SRV returns SRV records from the Backend.
@@ -223,7 +235,7 @@ func SRV(ctx context.Context, b ServiceBackend, zone string, state request.Reque
// Internal name, we should have some info on them, either v4 or v6
// Clients expect a complete answer, because we are a recursor in their view.
state1 := state.NewWithQuestion(srv.Target, dns.TypeA)
addr, e1 := A(ctx, b, zone, state1, nil, opt)
addr, _, e1 := A(ctx, b, zone, state1, nil, opt)
if e1 == nil {
extra = append(extra, addr...)
}
@@ -289,7 +301,7 @@ func MX(ctx context.Context, b ServiceBackend, zone string, state request.Reques
}
// Internal name
state1 := state.NewWithQuestion(mx.Mx, dns.TypeA)
addr, e1 := A(ctx, b, zone, state1, nil, opt)
addr, _, e1 := A(ctx, b, zone, state1, nil, opt)
if e1 == nil {
extra = append(extra, addr...)
}
@@ -329,23 +341,22 @@ func CNAME(ctx context.Context, b ServiceBackend, zone string, state request.Req
}
// TXT returns TXT records from Backend or an error.
func TXT(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
services, err := b.Services(ctx, state, true, opt)
func TXT(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, truncated bool, err error) {
services, err := b.Services(ctx, state, false, opt)
if err != nil {
return nil, err
return nil, false, err
}
dup := make(map[string]struct{})
for _, serv := range services {
what, _ := serv.HostType()
switch what {
case dns.TypeCNAME:
if Name(state.Name()).Matches(dns.Fqdn(serv.Host)) {
// x CNAME x is a direct loop, don't add those
// in etcd/skydns w.x CNAME x is also direct loop due to the "recursive" nature of search results
continue
}
@@ -360,8 +371,10 @@ func TXT(ctx context.Context, b ServiceBackend, zone string, state request.Reque
if dns.IsSubDomain(zone, dns.Fqdn(serv.Host)) {
state1 := state.NewWithQuestion(serv.Host, state.QType())
state1.Zone = zone
nextRecords, err := TXT(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
nextRecords, tc, err := TXT(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
if tc {
truncated = true
}
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
@@ -384,15 +397,14 @@ func TXT(ctx context.Context, b ServiceBackend, zone string, state request.Reque
continue
case dns.TypeTXT:
if _, ok := dup[serv.Host]; !ok {
dup[serv.Host] = struct{}{}
return append(records, serv.NewTXT(state.QName())), nil
if _, ok := dup[serv.Text]; !ok {
dup[serv.Text] = struct{}{}
records = append(records, serv.NewTXT(state.QName()))
}
}
}
return records, nil
return records, truncated, nil
}
// PTR returns the PTR records from the backend, only services that have a domain name as host are included.
@@ -490,7 +502,6 @@ func BackendError(ctx context.Context, b ServiceBackend, zone string, rcode int,
}
func newAddress(s msg.Service, name string, ip net.IP, what uint16) dns.RR {
hdr := dns.RR_Header{Name: name, Rrtype: what, Class: dns.ClassINET, Ttl: s.TTL}
if what == dns.TypeA {

View File

@@ -37,7 +37,9 @@ cache [TTL] [ZONES...] {
success CAPACITY [TTL] [MINTTL]
denial CAPACITY [TTL] [MINTTL]
prefetch AMOUNT [[DURATION] [PERCENTAGE%]]
serve_stale [DURATION]
serve_stale [DURATION] [REFRESH_MODE]
servfail DURATION
disable success|denial [ZONES...]
}
~~~
@@ -54,10 +56,20 @@ cache [TTL] [ZONES...] {
**DURATION** defaults to 1m. Prefetching will happen when the TTL drops below **PERCENTAGE**,
which defaults to `10%`, or latest 1 second before TTL expiration. Values should be in the range `[10%, 90%]`.
Note the percent sign is mandatory. **PERCENTAGE** is treated as an `int`.
* `serve_stale`, when serve\_stale is set, cache always will serve an expired entry to a client if there is one
available. When this happens, cache will attempt to refresh the cache entry after sending the expired cache
entry to the client. The responses have a TTL of 0. **DURATION** is how far back to consider
stale responses as fresh. The default duration is 1h.
* `serve_stale`, when serve\_stale is set, cache will always serve an expired entry to a client if there is one
available as long as it has not been expired for longer than **DURATION** (default 1 hour). By default, the _cache_ plugin will
attempt to refresh the cache entry after sending the expired cache entry to the client. The
responses have a TTL of 0. **REFRESH_MODE** controls the timing of the expired cache entry refresh.
`verify` will first verify that an entry is still unavailable from the source before sending the expired entry to the client.
`immediate` will immediately send the expired entry to the client before
checking to see if the entry is available from the source. **REFRESH_MODE** defaults to `immediate`. Setting this
value to `verify` can lead to increased latency when serving stale responses, but will prevent stale entries
from ever being served if an updated response can be retrieved from the source.
* `servfail` cache SERVFAIL responses for **DURATION**. Setting **DURATION** to 0 will disable caching of SERVFAIL
responses. If this option is not set, SERVFAIL responses will be cached for 5 seconds. **DURATION** may not be
greater than 5 minutes.
* `disable` disable the success or denial cache for the listed **ZONES**. If no **ZONES** are given, the specified
cache will be disabled for all zones.
## Capacity and Eviction
@@ -73,14 +85,14 @@ Entries with 0 TTL will remain in the cache until randomly evicted when the shar
If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported:
* `coredns_cache_entries{server, type}` - Total elements in the cache by cache type.
* `coredns_cache_hits_total{server, type}` - Counter of cache hits by cache type.
* `coredns_cache_misses_total{server}` - Counter of cache misses. - Deprecated, derive misses from cache hits/requests counters.
* `coredns_cache_requests_total{server}` - Counter of cache requests.
* `coredns_cache_prefetch_total{server}` - Counter of times the cache has prefetched a cached item.
* `coredns_cache_drops_total{server}` - Counter of responses excluded from the cache due to request/response question name mismatch.
* `coredns_cache_served_stale_total{server}` - Counter of requests served from stale cache entries.
* `coredns_cache_evictions_total{server, type}` - Counter of cache evictions.
* `coredns_cache_entries{server, type, zones, view}` - Total elements in the cache by cache type.
* `coredns_cache_hits_total{server, type, zones, view}` - Counter of cache hits by cache type.
* `coredns_cache_misses_total{server, zones, view}` - Counter of cache misses. - Deprecated, derive misses from cache hits/requests counters.
* `coredns_cache_requests_total{server, zones, view}` - Counter of cache requests.
* `coredns_cache_prefetch_total{server, zones, view}` - Counter of times the cache has prefetched a cached item.
* `coredns_cache_drops_total{server, zones, view}` - Counter of responses excluded from the cache due to request/response question name mismatch.
* `coredns_cache_served_stale_total{server, zones, view}` - Counter of requests served from stale cache entries.
* `coredns_cache_evictions_total{server, type, zones, view}` - Counter of cache evictions.
Cache types are either "denial" or "success". `Server` is the server handling the request, see the
prometheus plugin for documentation.
@@ -115,3 +127,13 @@ example.org {
}
}
~~~
Enable caching for `example.org`, but do not cache denials in `sub.example.org`:
~~~ corefile
example.org {
cache {
disable denial sub.example.org
}
}
~~~

View File

@@ -21,6 +21,9 @@ type Cache struct {
Next plugin.Handler
Zones []string
zonesMetricLabel string
viewMetricLabel string
ncache *cache.Cache
ncap int
nttl time.Duration
@@ -30,13 +33,20 @@ type Cache struct {
pcap int
pttl time.Duration
minpttl time.Duration
failttl time.Duration // TTL for caching SERVFAIL responses
// Prefetch.
prefetch int
duration time.Duration
percentage int
staleUpTo time.Duration
// Stale serve
staleUpTo time.Duration
verifyStale bool
// Positive/negative zone exceptions
pexcept []string
nexcept []string
// Testing.
now func() time.Time
@@ -55,6 +65,7 @@ func New() *Cache {
ncache: cache.New(defaultCap),
nttl: maxNTTL,
minnttl: minNTTL,
failttl: minNTTL,
prefetch: 0,
duration: 1 * time.Minute,
percentage: 10,
@@ -105,8 +116,14 @@ type ResponseWriter struct {
server string // Server handling the request.
do bool // When true the original request had the DO bit set.
ad bool // When true the original request had the AD bit set.
prefetch bool // When true write nothing back to the client.
remoteAddr net.Addr
wildcardFunc func() string // function to retrieve wildcard name that synthesized the result.
pexcept []string // positive zone exceptions
nexcept []string // negative zone exceptions
}
// newPrefetchResponseWriter returns a Cache ResponseWriter to be used in
@@ -153,8 +170,7 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
if mt == response.NameError || mt == response.NoData {
duration = computeTTL(msgTTL, w.minnttl, w.nttl)
} else if mt == response.ServerError {
// use default ttl which is 5s
duration = minTTL
duration = w.failttl
} else {
duration = computeTTL(msgTTL, w.minpttl, w.pttl)
}
@@ -162,11 +178,11 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
if hasKey && duration > 0 {
if w.state.Match(res) {
w.set(res, key, mt, duration)
cacheSize.WithLabelValues(w.server, Success).Set(float64(w.pcache.Len()))
cacheSize.WithLabelValues(w.server, Denial).Set(float64(w.ncache.Len()))
cacheSize.WithLabelValues(w.server, Success, w.zonesMetricLabel, w.viewMetricLabel).Set(float64(w.pcache.Len()))
cacheSize.WithLabelValues(w.server, Denial, w.zonesMetricLabel, w.viewMetricLabel).Set(float64(w.ncache.Len()))
} else {
// Don't log it, but increment counter
cacheDrops.WithLabelValues(w.server).Inc()
cacheDrops.WithLabelValues(w.server, w.zonesMetricLabel, w.viewMetricLabel).Inc()
}
}
@@ -181,8 +197,10 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
res.Ns = filterRRSlice(res.Ns, ttl, w.do, false)
res.Extra = filterRRSlice(res.Extra, ttl, w.do, false)
if !w.do {
res.AuthenticatedData = false // unset AD bit if client is not OK with DNSSEC
if !w.do && !w.ad {
// unset AD bit if requester is not OK with DNSSEC
// But retain AD bit if requester set the AD bit in the request, per RFC6840 5.7-5.8
res.AuthenticatedData = false
}
return w.ResponseWriter.WriteMsg(res)
@@ -193,9 +211,16 @@ func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration
// and key is valid
switch mt {
case response.NoError, response.Delegation:
if plugin.Zones(w.pexcept).Matches(m.Question[0].Name) != "" {
// zone is in exception list, do not cache
return
}
i := newItem(m, w.now(), duration)
if w.wildcardFunc != nil {
i.wildcard = w.wildcardFunc()
}
if w.pcache.Add(key, i) {
evictions.WithLabelValues(w.server, Success).Inc()
evictions.WithLabelValues(w.server, Success, w.zonesMetricLabel, w.viewMetricLabel).Inc()
}
// when pre-fetching, remove the negative cache entry if it exists
if w.prefetch {
@@ -203,9 +228,16 @@ func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration
}
case response.NameError, response.NoData, response.ServerError:
if plugin.Zones(w.nexcept).Matches(m.Question[0].Name) != "" {
// zone is in exception list, do not cache
return
}
i := newItem(m, w.now(), duration)
if w.wildcardFunc != nil {
i.wildcard = w.wildcardFunc()
}
if w.ncache.Add(key, i) {
evictions.WithLabelValues(w.server, Denial).Inc()
evictions.WithLabelValues(w.server, Denial, w.zonesMetricLabel, w.viewMetricLabel).Inc()
}
case response.OtherError:
@@ -225,6 +257,33 @@ func (w *ResponseWriter) Write(buf []byte) (int, error) {
return n, err
}
// verifyStaleResponseWriter is a response writer that only writes messages if they should replace a
// stale cache entry, and otherwise discards them.
type verifyStaleResponseWriter struct {
*ResponseWriter
refreshed bool // set to true if the last WriteMsg wrote to ResponseWriter, false otherwise.
}
// newVerifyStaleResponseWriter returns a ResponseWriter to be used when verifying stale cache
// entries. It only forward writes if an entry was successfully refreshed according to RFC8767,
// section 4 (response is NoError or NXDomain), and ignores any other response.
func newVerifyStaleResponseWriter(w *ResponseWriter) *verifyStaleResponseWriter {
return &verifyStaleResponseWriter{
w,
false,
}
}
// WriteMsg implements the dns.ResponseWriter interface.
func (w *verifyStaleResponseWriter) WriteMsg(res *dns.Msg) error {
w.refreshed = false
if res.Rcode == dns.RcodeSuccess || res.Rcode == dns.RcodeNameError {
w.refreshed = true
return w.ResponseWriter.WriteMsg(res) // stores to the cache and send to client
}
return nil // else discard
}
const (
maxTTL = dnsutil.MaximumDefaulTTL
minTTL = dnsutil.MinimalDefaultTTL

View File

@@ -1,4 +1,4 @@
// +build gofuzz
//go:build gofuzz
package cache

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metadata"
"github.com/coredns/coredns/plugin/metrics"
"github.com/coredns/coredns/request"
@@ -17,6 +18,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
rc := r.Copy() // We potentially modify r, to prevent other plugins from seeing this (r is a pointer), copy r into rc.
state := request.Request{W: w, Req: rc}
do := state.Do()
ad := r.AuthenticatedData
zone := plugin.Zones(c.Zones).Matches(state.Name())
if zone == "" {
@@ -35,31 +37,59 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
ttl := 0
i := c.getIgnoreTTL(now, state, server)
if i != nil {
ttl = i.ttl(now)
}
if i == nil {
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do}
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, ad: ad,
nexcept: c.nexcept, pexcept: c.pexcept, wildcardFunc: wildcardFunc(ctx)}
return c.doRefresh(ctx, state, crr)
}
ttl = i.ttl(now)
if ttl < 0 {
servedStale.WithLabelValues(server).Inc()
// serve stale behavior
if c.verifyStale {
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do}
cw := newVerifyStaleResponseWriter(crr)
ret, err := c.doRefresh(ctx, state, cw)
if cw.refreshed {
return ret, err
}
}
// Adjust the time to get a 0 TTL in the reply built from a stale item.
now = now.Add(time.Duration(ttl) * time.Second)
cw := newPrefetchResponseWriter(server, state, c)
go c.doPrefetch(ctx, state, cw, i, now)
if !c.verifyStale {
cw := newPrefetchResponseWriter(server, state, c)
go c.doPrefetch(ctx, state, cw, i, now)
}
servedStale.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
} else if c.shouldPrefetch(i, now) {
cw := newPrefetchResponseWriter(server, state, c)
go c.doPrefetch(ctx, state, cw, i, now)
}
resp := i.toMsg(r, now, do)
w.WriteMsg(resp)
if i.wildcard != "" {
// Set wildcard source record name to metadata
metadata.SetValueFunc(ctx, "zone/wildcard", func() string {
return i.wildcard
})
}
resp := i.toMsg(r, now, do, ad)
w.WriteMsg(resp)
return dns.RcodeSuccess, nil
}
func wildcardFunc(ctx context.Context) func() string {
return func() string {
// Get wildcard source record name from metadata
if f := metadata.ValueFunc(ctx, "zone/wildcard"); f != nil {
return f()
}
return ""
}
}
func (c *Cache) doPrefetch(ctx context.Context, state request.Request, cw *ResponseWriter, i *item, now time.Time) {
cachePrefetches.WithLabelValues(cw.server).Inc()
cachePrefetches.WithLabelValues(cw.server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
c.doRefresh(ctx, state, cw)
// When prefetching we loose the item i, and with it the frequency
@@ -70,7 +100,7 @@ func (c *Cache) doPrefetch(ctx context.Context, state request.Request, cw *Respo
}
}
func (c *Cache) doRefresh(ctx context.Context, state request.Request, cw *ResponseWriter) (int, error) {
func (c *Cache) doRefresh(ctx context.Context, state request.Request, cw dns.ResponseWriter) (int, error) {
if !state.Do() {
setDo(state.Req)
}
@@ -89,43 +119,28 @@ func (c *Cache) shouldPrefetch(i *item, now time.Time) bool {
// Name implements the Handler interface.
func (c *Cache) Name() string { return "cache" }
func (c *Cache) get(now time.Time, state request.Request, server string) (*item, bool) {
k := hash(state.Name(), state.QType())
cacheRequests.WithLabelValues(server).Inc()
if i, ok := c.ncache.Get(k); ok && i.(*item).ttl(now) > 0 {
cacheHits.WithLabelValues(server, Denial).Inc()
return i.(*item), true
}
if i, ok := c.pcache.Get(k); ok && i.(*item).ttl(now) > 0 {
cacheHits.WithLabelValues(server, Success).Inc()
return i.(*item), true
}
cacheMisses.WithLabelValues(server).Inc()
return nil, false
}
// getIgnoreTTL unconditionally returns an item if it exists in the cache.
func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item {
k := hash(state.Name(), state.QType())
cacheRequests.WithLabelValues(server).Inc()
cacheRequests.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
if i, ok := c.ncache.Get(k); ok {
ttl := i.(*item).ttl(now)
if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) {
cacheHits.WithLabelValues(server, Denial).Inc()
itm := i.(*item)
ttl := itm.ttl(now)
if itm.matches(state) && (ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds()))) {
cacheHits.WithLabelValues(server, Denial, c.zonesMetricLabel, c.viewMetricLabel).Inc()
return i.(*item)
}
}
if i, ok := c.pcache.Get(k); ok {
ttl := i.(*item).ttl(now)
if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) {
cacheHits.WithLabelValues(server, Success).Inc()
itm := i.(*item)
ttl := itm.ttl(now)
if itm.matches(state) && (ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds()))) {
cacheHits.WithLabelValues(server, Success, c.zonesMetricLabel, c.viewMetricLabel).Inc()
return i.(*item)
}
}
cacheMisses.WithLabelValues(server).Inc()
cacheMisses.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
return nil
}

View File

@@ -1,20 +1,25 @@
package cache
import (
"strings"
"time"
"github.com/coredns/coredns/plugin/cache/freq"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
type item struct {
Name string
QType uint16
Rcode int
AuthenticatedData bool
RecursionAvailable bool
Answer []dns.RR
Ns []dns.RR
Extra []dns.RR
wildcard string
origTTL uint32
stored time.Time
@@ -24,6 +29,10 @@ type item struct {
func newItem(m *dns.Msg, now time.Time, d time.Duration) *item {
i := new(item)
if len(m.Question) != 0 {
i.Name = m.Question[0].Name
i.QType = m.Question[0].Qtype
}
i.Rcode = m.Rcode
i.AuthenticatedData = m.AuthenticatedData
i.RecursionAvailable = m.RecursionAvailable
@@ -56,7 +65,7 @@ func newItem(m *dns.Msg, now time.Time, d time.Duration) *item {
// So we're forced to always set this to 1; regardless if the answer came from the cache or not.
// On newer systems(e.g. ubuntu 16.04 with glib version 2.23), this issue is resolved.
// So we may set this bit back to 0 in the future ?
func (i *item) toMsg(m *dns.Msg, now time.Time, do bool) *dns.Msg {
func (i *item) toMsg(m *dns.Msg, now time.Time, do bool, ad bool) *dns.Msg {
m1 := new(dns.Msg)
m1.SetReply(m)
@@ -65,8 +74,10 @@ func (i *item) toMsg(m *dns.Msg, now time.Time, do bool) *dns.Msg {
// just set it to true.
m1.Authoritative = true
m1.AuthenticatedData = i.AuthenticatedData
if !do {
m1.AuthenticatedData = false // when DNSSEC was not wanted, it can't be authenticated data.
if !do && !ad {
// When DNSSEC was not wanted, it can't be authenticated data.
// However, retain the AD bit if the requester set the AD bit, per RFC6840 5.7-5.8
m1.AuthenticatedData = false
}
m1.RecursionAvailable = i.RecursionAvailable
m1.Rcode = i.Rcode
@@ -87,3 +98,10 @@ func (i *item) ttl(now time.Time) int {
ttl := int(i.origTTL) - int(now.UTC().Sub(i.stored).Seconds())
return ttl
}
func (i *item) matches(state request.Request) bool {
if state.QType() == i.QType && strings.EqualFold(state.QName(), i.Name) {
return true
}
return false
}

View File

@@ -14,54 +14,54 @@ var (
Subsystem: "cache",
Name: "entries",
Help: "The number of elements in the cache.",
}, []string{"server", "type"})
}, []string{"server", "type", "zones", "view"})
// cacheRequests is a counter of all requests through the cache.
cacheRequests = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "requests_total",
Help: "The count of cache requests.",
}, []string{"server"})
}, []string{"server", "zones", "view"})
// cacheHits is counter of cache hits by cache type.
cacheHits = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "hits_total",
Help: "The count of cache hits.",
}, []string{"server", "type"})
}, []string{"server", "type", "zones", "view"})
// cacheMisses is the counter of cache misses. - Deprecated
cacheMisses = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "misses_total",
Help: "The count of cache misses. Deprecated, derive misses from cache hits/requests counters.",
}, []string{"server"})
}, []string{"server", "zones", "view"})
// cachePrefetches is the number of time the cache has prefetched a cached item.
cachePrefetches = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "prefetch_total",
Help: "The number of times the cache has prefetched a cached item.",
}, []string{"server"})
}, []string{"server", "zones", "view"})
// cacheDrops is the number responses that are not cached, because the reply is malformed.
cacheDrops = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "drops_total",
Help: "The number responses that are not cached, because the reply is malformed.",
}, []string{"server"})
}, []string{"server", "zones", "view"})
// servedStale is the number of requests served from stale cache entries.
servedStale = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "served_stale_total",
Help: "The number of requests served from stale cache entries.",
}, []string{"server"})
}, []string{"server", "zones", "view"})
// evictions is the counter of cache evictions.
evictions = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "evictions_total",
Help: "The count of cache evictions.",
}, []string{"server", "type"})
}, []string{"server", "type", "zones", "view"})
)

View File

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/coredns/caddy"
@@ -22,6 +23,12 @@ func setup(c *caddy.Controller) error {
if err != nil {
return plugin.Error("cache", err)
}
c.OnStartup(func() error {
ca.viewMetricLabel = dnsserver.GetConfig(c).ViewName
return nil
})
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
ca.Next = next
return ca
@@ -165,11 +172,11 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
case "serve_stale":
args := c.RemainingArgs()
if len(args) > 1 {
if len(args) > 2 {
return nil, c.ArgErr()
}
ca.staleUpTo = 1 * time.Hour
if len(args) == 1 {
if len(args) > 0 {
d, err := time.ParseDuration(args[0])
if err != nil {
return nil, err
@@ -179,12 +186,67 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
}
ca.staleUpTo = d
}
ca.verifyStale = false
if len(args) > 1 {
mode := strings.ToLower(args[1])
if mode != "immediate" && mode != "verify" {
return nil, fmt.Errorf("invalid value for serve_stale refresh mode: %s", mode)
}
ca.verifyStale = mode == "verify"
}
case "servfail":
args := c.RemainingArgs()
if len(args) != 1 {
return nil, c.ArgErr()
}
d, err := time.ParseDuration(args[0])
if err != nil {
return nil, err
}
if d < 0 {
return nil, errors.New("invalid negative ttl for servfail")
}
if d > 5*time.Minute {
// RFC 2308 prohibits caching SERVFAIL longer than 5 minutes
return nil, errors.New("caching SERVFAIL responses over 5 minutes is not permitted")
}
ca.failttl = d
case "disable":
// disable [success|denial] [zones]...
args := c.RemainingArgs()
if len(args) < 1 {
return nil, c.ArgErr()
}
var zones []string
if len(args) > 1 {
for _, z := range args[1:] { // args[1:] define the list of zones to disable
nz := plugin.Name(z).Normalize()
if nz == "" {
return nil, fmt.Errorf("invalid disabled zone: %s", z)
}
zones = append(zones, nz)
}
} else {
// if no zones specified, default to root
zones = []string{"."}
}
switch args[0] { // args[0] defines which cache to disable
case Denial:
ca.nexcept = zones
case Success:
ca.pexcept = zones
default:
return nil, fmt.Errorf("cache type for disable must be %q or %q", Success, Denial)
}
default:
return nil, c.ArgErr()
}
}
ca.Zones = origins
ca.zonesMetricLabel = strings.Join(origins, ",")
ca.pcache = cache.New(ca.pcap)
ca.ncache = cache.New(ca.ncap)
}

View File

@@ -22,6 +22,9 @@ func Path(s, prefix string) string {
// Domain is the opposite of Path.
func Domain(s string) string {
l := strings.Split(s, "/")
if l[len(l)-1] == "" {
l = l[:len(l)-1]
}
// start with 1, to strip /skydns
for i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]

View File

@@ -154,7 +154,6 @@ func split255(s string) []string {
} else {
sx = append(sx, s[p:])
break
}
p, i = p+255, i+255
}

View File

@@ -15,11 +15,9 @@ import (
//
// Note that a service can double/triple as a TXT record or MX record.
func (s *Service) HostType() (what uint16, normalized net.IP) {
ip := net.ParseIP(s.Host)
switch {
case ip == nil:
if len(s.Text) == 0 {
return dns.TypeCNAME, nil

View File

@@ -0,0 +1,49 @@
# metadata
## Name
*metadata* - enables a metadata collector.
## Description
By enabling *metadata* any plugin that implements [metadata.Provider
interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) will be called for
each DNS query, at the beginning of the process for that query, in order to add its own metadata to
context.
The metadata collected will be available for all plugins, via the Context parameter provided in the
ServeDNS function. The package (code) documentation has examples on how to inspect and retrieve
metadata a plugin might be interested in.
The metadata is added by setting a label with a value in the context. These labels should be named
`plugin/NAME`, where **NAME** is something descriptive. The only hard requirement the *metadata*
plugin enforces is that the labels contain a slash. See the documentation for
`metadata.SetValueFunc`.
The value stored is a string. The empty string signals "no metadata". See the documentation for
`metadata.ValueFunc` on how to retrieve this.
## Syntax
~~~
metadata [ZONES... ]
~~~
* **ZONES** zones metadata should be invoked for.
## Plugins
`metadata.Provider` interface needs to be implemented by each plugin willing to provide metadata
information for other plugins. It will be called by metadata and gather the information from all
plugins in context.
Note: this method should work quickly, because it is called for every request.
## Examples
The *rewrite* plugin uses meta data to rewrite requests.
## See Also
The [Provider interface](https://godoc.org/github.com/coredns/coredns/plugin/metadata#Provider) and
the [package level](https://godoc.org/github.com/coredns/coredns/plugin/metadata) documentation.

View File

@@ -0,0 +1,44 @@
package metadata
import (
"context"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
// Metadata implements collecting metadata information from all plugins that
// implement the Provider interface.
type Metadata struct {
Zones []string
Providers []Provider
Next plugin.Handler
}
// Name implements the Handler interface.
func (m *Metadata) Name() string { return "metadata" }
// ContextWithMetadata is exported for use by provider tests
func ContextWithMetadata(ctx context.Context) context.Context {
return context.WithValue(ctx, key{}, md{})
}
// ServeDNS implements the plugin.Handler interface.
func (m *Metadata) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
rcode, err := plugin.NextOrFailure(m.Name(), m.Next, ctx, w, r)
return rcode, err
}
// Collect will retrieve metadata functions from each metadata provider and update the context
func (m *Metadata) Collect(ctx context.Context, state request.Request) context.Context {
ctx = ContextWithMetadata(ctx)
if plugin.Zones(m.Zones).Matches(state.Name()) != "" {
// Go through all Providers and collect metadata.
for _, p := range m.Providers {
ctx = p.Metadata(ctx, state)
}
}
return ctx
}

View File

@@ -0,0 +1,127 @@
// Package metadata provides an API that allows plugins to add metadata to the context.
// Each metadata is stored under a label that has the form <plugin>/<name>. Each metadata
// is returned as a Func. When Func is called the metadata is returned. If Func is expensive to
// execute it is its responsibility to provide some form of caching. During the handling of a
// query it is expected the metadata stays constant.
//
// Basic example:
//
// Implement the Provider interface for a plugin p:
//
// func (p P) Metadata(ctx context.Context, state request.Request) context.Context {
// metadata.SetValueFunc(ctx, "test/something", func() string { return "myvalue" })
// return ctx
// }
//
// Basic example with caching:
//
// func (p P) Metadata(ctx context.Context, state request.Request) context.Context {
// cached := ""
// f := func() string {
// if cached != "" {
// return cached
// }
// cached = expensiveFunc()
// return cached
// }
// metadata.SetValueFunc(ctx, "test/something", f)
// return ctx
// }
//
// If you need access to this metadata from another plugin:
//
// // ...
// valueFunc := metadata.ValueFunc(ctx, "test/something")
// value := valueFunc()
// // use 'value'
//
package metadata
import (
"context"
"strings"
"github.com/coredns/coredns/request"
)
// Provider interface needs to be implemented by each plugin willing to provide
// metadata information for other plugins.
type Provider interface {
// Metadata adds metadata to the context and returns a (potentially) new context.
// Note: this method should work quickly, because it is called for every request
// from the metadata plugin.
Metadata(ctx context.Context, state request.Request) context.Context
}
// Func is the type of function in the metadata, when called they return the value of the label.
type Func func() string
// IsLabel checks that the provided name is a valid label name, i.e. two or more words separated by a slash.
func IsLabel(label string) bool {
p := strings.Index(label, "/")
if p <= 0 || p >= len(label)-1 {
// cannot accept namespace empty nor label empty
return false
}
return true
}
// Labels returns all metadata keys stored in the context. These label names should be named
// as: plugin/NAME, where NAME is something descriptive.
func Labels(ctx context.Context) []string {
if metadata := ctx.Value(key{}); metadata != nil {
if m, ok := metadata.(md); ok {
return keys(m)
}
}
return nil
}
// ValueFuncs returns the map[string]Func from the context, or nil if it does not exist.
func ValueFuncs(ctx context.Context) map[string]Func {
if metadata := ctx.Value(key{}); metadata != nil {
if m, ok := metadata.(md); ok {
return m
}
}
return nil
}
// ValueFunc returns the value function of label. If none can be found nil is returned. Calling the
// function returns the value of the label.
func ValueFunc(ctx context.Context, label string) Func {
if metadata := ctx.Value(key{}); metadata != nil {
if m, ok := metadata.(md); ok {
return m[label]
}
}
return nil
}
// SetValueFunc set the metadata label to the value function. If no metadata can be found this is a noop and
// false is returned. Any existing value is overwritten.
func SetValueFunc(ctx context.Context, label string, f Func) bool {
if metadata := ctx.Value(key{}); metadata != nil {
if m, ok := metadata.(md); ok {
m[label] = f
return true
}
}
return false
}
// md is metadata information storage.
type md map[string]Func
// key defines the type of key that is used to save metadata into the context.
type key struct{}
func keys(m map[string]Func) []string {
s := make([]string, len(m))
i := 0
for k := range m {
s[i] = k
i++
}
return s
}

View File

@@ -0,0 +1,44 @@
package metadata
import (
"github.com/coredns/caddy"
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
)
func init() { plugin.Register("metadata", setup) }
func setup(c *caddy.Controller) error {
m, err := metadataParse(c)
if err != nil {
return err
}
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
m.Next = next
return m
})
c.OnStartup(func() error {
plugins := dnsserver.GetConfig(c).Handlers()
for _, p := range plugins {
if met, ok := p.(Provider); ok {
m.Providers = append(m.Providers, met)
}
}
return nil
})
return nil
}
func metadataParse(c *caddy.Controller) (*Metadata, error) {
m := &Metadata{}
c.Next()
m.Zones = plugin.OriginsFromArgsOrServerBlock(c.RemainingArgs(), c.ServerBlockKeys)
if c.NextBlock() || c.Next() {
return nil, plugin.Error("metadata", c.ArgErr())
}
return m, nil
}

View File

@@ -8,19 +8,22 @@
With *prometheus* you export metrics from CoreDNS and any plugin that has them.
The default location for the metrics is `localhost:9153`. The metrics path is fixed to `/metrics`.
The following metrics are exported:
In addition to the default Go metrics exported by the [Prometheus Go client](https://prometheus.io/docs/guides/go-application/),
the following metrics are exported:
* `coredns_build_info{version, revision, goversion}` - info about CoreDNS itself.
* `coredns_panics_total{}` - total number of panics.
* `coredns_dns_requests_total{server, zone, proto, family, type}` - total query count.
* `coredns_dns_request_duration_seconds{server, zone, type}` - duration to process each query.
* `coredns_dns_request_size_bytes{server, zone, proto}` - size of the request in bytes.
* `coredns_dns_do_requests_total{server, zone}` - queries that have the DO bit set
* `coredns_dns_response_size_bytes{server, zone, proto}` - response size in bytes.
* `coredns_dns_responses_total{server, zone, rcode, plugin}` - response per zone, rcode and plugin.
* `coredns_plugin_enabled{server, zone, name}` - indicates whether a plugin is enabled on per server and zone basis.
* `coredns_dns_requests_total{server, zone, view, proto, family, type}` - total query count.
* `coredns_dns_request_duration_seconds{server, zone, view, type}` - duration to process each query.
* `coredns_dns_request_size_bytes{server, zone, view, proto}` - size of the request in bytes.
* `coredns_dns_do_requests_total{server, view, zone}` - queries that have the DO bit set
* `coredns_dns_response_size_bytes{server, zone, view, proto}` - response size in bytes.
* `coredns_dns_responses_total{server, zone, view, rcode, plugin}` - response per zone, rcode and plugin.
* `coredns_dns_https_responses_total{server, status}` - responses per server and http status code.
* `coredns_plugin_enabled{server, zone, view, name}` - indicates whether a plugin is enabled on per server, zone and view basis.
Each counter has a label `zone` which is the zonename used for the request/response.
Almost each counter has a label `zone` which is the zonename used for the request/response.
Extra labels used are:
@@ -32,12 +35,20 @@ Extra labels used are:
* `type` which holds the query type. It holds most common types (A, AAAA, MX, SOA, CNAME, PTR, TXT,
NS, SRV, DS, DNSKEY, RRSIG, NSEC, NSEC3, HTTPS, IXFR, AXFR and ANY) and "other" which lumps together all
other types.
* `status` which holds the https status code. Possible values are:
* 200 - request is processed,
* 404 - request has been rejected on validation,
* 400 - request to dns message conversion failed,
* 500 - processing ended up with no response.
* the `plugin` label holds the name of the plugin that made the write to the client. If the server
did the write (on error for instance), the value is empty.
If monitoring is enabled, queries that do not enter the plugin chain are exported under the fake
name "dropped" (without a closing dot - this is never a valid domain name).
Other plugins may export additional stats when the _prometheus_ plugin is enabled. Those stats are documented in each
plugin's README.
This plugin can only be used once per Server Block.
## Syntax

View File

@@ -22,3 +22,16 @@ func WithServer(ctx context.Context) string {
}
return srv.(*dnsserver.Server).Addr
}
// WithView returns the name of the view currently handling the request, if a view is defined.
//
// Basic usage with a metric:
//
// <metric>.WithLabelValues(metrics.WithView(ctx), labels..).Add(1)
func WithView(ctx context.Context) string {
v := ctx.Value(dnsserver.ViewKey{})
if v == nil {
return ""
}
return v.(string)
}

View File

@@ -34,7 +34,7 @@ func (m *Metrics) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg
rc = status
}
plugin := m.authoritativePlugin(rw.Caller)
vars.Report(WithServer(ctx), state, zone, rcode.ToString(rc), plugin, rw.Len, rw.Start)
vars.Report(WithServer(ctx), state, zone, WithView(ctx), rcode.ToString(rc), plugin, rw.Len, rw.Start)
return status, err
}

View File

@@ -24,7 +24,5 @@ func (r *Recorder) WriteMsg(res *dns.Msg) error {
_, r.Caller[0], _, _ = runtime.Caller(1)
_, r.Caller[1], _, _ = runtime.Caller(2)
_, r.Caller[2], _, _ = runtime.Caller(3)
r.Len += res.Len()
r.Msg = res
return r.ResponseWriter.WriteMsg(res)
return r.Recorder.WriteMsg(res)
}

View File

@@ -39,7 +39,7 @@ func setup(c *caddy.Controller) error {
for _, h := range conf.ListenHosts {
addrstr := conf.Transport + "://" + net.JoinHostPort(h, conf.Port)
for _, p := range conf.Handlers() {
vars.PluginEnabled.WithLabelValues(addrstr, conf.Zone, p.Name()).Set(1)
vars.PluginEnabled.WithLabelValues(addrstr, conf.Zone, conf.ViewName, p.Name()).Set(1)
}
}
return nil
@@ -49,7 +49,7 @@ func setup(c *caddy.Controller) error {
for _, h := range conf.ListenHosts {
addrstr := conf.Transport + "://" + net.JoinHostPort(h, conf.Port)
for _, p := range conf.Handlers() {
vars.PluginEnabled.WithLabelValues(addrstr, conf.Zone, p.Name()).Set(1)
vars.PluginEnabled.WithLabelValues(addrstr, conf.Zone, conf.ViewName, p.Name()).Set(1)
}
}
return nil

View File

@@ -9,7 +9,7 @@ import (
// Report reports the metrics data associated with request. This function is exported because it is also
// called from core/dnsserver to report requests hitting the server that should not be handled and are thus
// not sent down the plugin chain.
func Report(server string, req request.Request, zone, rcode, plugin string, size int, start time.Time) {
func Report(server string, req request.Request, zone, view, rcode, plugin string, size int, start time.Time) {
// Proto and Family.
net := req.Proto()
fam := "1"
@@ -18,16 +18,16 @@ func Report(server string, req request.Request, zone, rcode, plugin string, size
}
if req.Do() {
RequestDo.WithLabelValues(server, zone).Inc()
RequestDo.WithLabelValues(server, zone, view).Inc()
}
qType := qTypeString(req.QType())
RequestCount.WithLabelValues(server, zone, net, fam, qType).Inc()
RequestCount.WithLabelValues(server, zone, view, net, fam, qType).Inc()
RequestDuration.WithLabelValues(server, zone).Observe(time.Since(start).Seconds())
RequestDuration.WithLabelValues(server, zone, view).Observe(time.Since(start).Seconds())
ResponseSize.WithLabelValues(server, zone, net).Observe(float64(size))
RequestSize.WithLabelValues(server, zone, net).Observe(float64(req.Len()))
ResponseSize.WithLabelValues(server, zone, view, net).Observe(float64(size))
RequestSize.WithLabelValues(server, zone, view, net).Observe(float64(req.Len()))
ResponseRcode.WithLabelValues(server, zone, rcode, plugin).Inc()
ResponseRcode.WithLabelValues(server, zone, view, rcode, plugin).Inc()
}

View File

@@ -14,7 +14,7 @@ var (
Subsystem: subsystem,
Name: "requests_total",
Help: "Counter of DNS requests made per zone, protocol and family.",
}, []string{"server", "zone", "proto", "family", "type"})
}, []string{"server", "zone", "view", "proto", "family", "type"})
RequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
@@ -22,7 +22,7 @@ var (
Name: "request_duration_seconds",
Buckets: plugin.TimeBuckets,
Help: "Histogram of the time (in seconds) each request took per zone.",
}, []string{"server", "zone"})
}, []string{"server", "zone", "view"})
RequestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
@@ -30,14 +30,14 @@ var (
Name: "request_size_bytes",
Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol.",
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
}, []string{"server", "zone", "proto"})
}, []string{"server", "zone", "view", "proto"})
RequestDo = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "do_requests_total",
Help: "Counter of DNS requests with DO bit set per zone.",
}, []string{"server", "zone"})
}, []string{"server", "zone", "view"})
ResponseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
@@ -45,14 +45,14 @@ var (
Name: "response_size_bytes",
Help: "Size of the returned response in bytes.",
Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3},
}, []string{"server", "zone", "proto"})
}, []string{"server", "zone", "view", "proto"})
ResponseRcode = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "responses_total",
Help: "Counter of response status codes.",
}, []string{"server", "zone", "rcode", "plugin"})
}, []string{"server", "zone", "view", "rcode", "plugin"})
Panic = promauto.NewCounter(prometheus.CounterOpts{
Namespace: plugin.Namespace,
@@ -64,7 +64,14 @@ var (
Namespace: plugin.Namespace,
Name: "plugin_enabled",
Help: "A metric that indicates whether a plugin is enabled on per server and zone basis.",
}, []string{"server", "zone", "name"})
}, []string{"server", "zone", "view", "name"})
HTTPSResponsesCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "https_responses_total",
Help: "Counter of DoH responses per server and http status code.",
}, []string{"server", "status"})
)
const (

View File

@@ -125,7 +125,6 @@ func (h Host) NormalizeExact() []string {
}
for i := range hosts {
hosts[i] = Name(hosts[i]).Normalize()
}
return hosts
}

View File

@@ -66,7 +66,7 @@ func (c *Cache) Remove(key uint64) {
// Len returns the number of elements in the cache.
func (c *Cache) Len() int {
l := 0
for _, s := range c.shards {
for _, s := range &c.shards {
l += s.Len()
}
return l
@@ -74,7 +74,7 @@ func (c *Cache) Len() int {
// Walk walks each shard in the cache.
func (c *Cache) Walk(f func(map[uint64]interface{}, uint64) bool) {
for _, s := range c.shards {
for _, s := range &c.shards {
s.Walk(f)
}
}

View File

@@ -38,7 +38,7 @@ func Split(n *net.IPNet) []string {
func nets(network *net.IPNet, newPrefixLen int) []*net.IPNet {
prefixLen, _ := network.Mask.Size()
maxSubnets := int(math.Exp2(float64(newPrefixLen)) / math.Exp2(float64(prefixLen)))
nets := []*net.IPNet{{network.IP, net.CIDRMask(newPrefixLen, 8*len(network.IP))}}
nets := []*net.IPNet{{IP: network.IP, Mask: net.CIDRMask(newPrefixLen, 8*len(network.IP))}}
for i := 1; i < maxSubnets; i++ {
next, exceeds := cidr.NextSubnet(nets[len(nets)-1], newPrefixLen)

View File

@@ -92,7 +92,7 @@ func requestToMsgGet(req *http.Request) (*dns.Msg, error) {
}
func toMsg(r io.ReadCloser) (*dns.Msg, error) {
buf, err := io.ReadAll(r)
buf, err := io.ReadAll(http.MaxBytesReader(nil, r, 65536))
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,141 @@
package log
import (
"sync"
)
// Listener listens for all log prints of plugin loggers aka loggers with plugin name.
// When a plugin logger gets called, it should first call the same method in the Listener object.
// A usage example is, the external plugin k8s_event will replicate log prints to Kubernetes events.
type Listener interface {
Name() string
Debug(plugin string, v ...interface{})
Debugf(plugin string, format string, v ...interface{})
Info(plugin string, v ...interface{})
Infof(plugin string, format string, v ...interface{})
Warning(plugin string, v ...interface{})
Warningf(plugin string, format string, v ...interface{})
Error(plugin string, v ...interface{})
Errorf(plugin string, format string, v ...interface{})
Fatal(plugin string, v ...interface{})
Fatalf(plugin string, format string, v ...interface{})
}
type listeners struct {
listeners []Listener
sync.RWMutex
}
var ls *listeners
func init() {
ls = &listeners{}
ls.listeners = make([]Listener, 0)
}
// RegisterListener register a listener object.
func RegisterListener(new Listener) error {
ls.Lock()
defer ls.Unlock()
for k, l := range ls.listeners {
if l.Name() == new.Name() {
ls.listeners[k] = new
return nil
}
}
ls.listeners = append(ls.listeners, new)
return nil
}
// DeregisterListener deregister a listener object.
func DeregisterListener(old Listener) error {
ls.Lock()
defer ls.Unlock()
for k, l := range ls.listeners {
if l.Name() == old.Name() {
ls.listeners = append(ls.listeners[:k], ls.listeners[k+1:]...)
return nil
}
}
return nil
}
func (ls *listeners) debug(plugin string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Debug(plugin, v...)
}
ls.RUnlock()
}
func (ls *listeners) debugf(plugin string, format string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Debugf(plugin, format, v...)
}
ls.RUnlock()
}
func (ls *listeners) info(plugin string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Info(plugin, v...)
}
ls.RUnlock()
}
func (ls *listeners) infof(plugin string, format string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Infof(plugin, format, v...)
}
ls.RUnlock()
}
func (ls *listeners) warning(plugin string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Warning(plugin, v...)
}
ls.RUnlock()
}
func (ls *listeners) warningf(plugin string, format string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Warningf(plugin, format, v...)
}
ls.RUnlock()
}
func (ls *listeners) error(plugin string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Error(plugin, v...)
}
ls.RUnlock()
}
func (ls *listeners) errorf(plugin string, format string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Errorf(plugin, format, v...)
}
ls.RUnlock()
}
func (ls *listeners) fatal(plugin string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Fatal(plugin, v...)
}
ls.RUnlock()
}
func (ls *listeners) fatalf(plugin string, format string, v ...interface{}) {
ls.RLock()
for _, l := range ls.listeners {
l.Fatalf(plugin, format, v...)
}
ls.RUnlock()
}

View File

@@ -27,6 +27,7 @@ func (p P) Debug(v ...interface{}) {
if !D.Value() {
return
}
ls.debug(p.plugin, v...)
p.log(debug, v...)
}
@@ -35,29 +36,56 @@ func (p P) Debugf(format string, v ...interface{}) {
if !D.Value() {
return
}
ls.debugf(p.plugin, format, v...)
p.logf(debug, format, v...)
}
// Info logs as log.Info.
func (p P) Info(v ...interface{}) { p.log(info, v...) }
func (p P) Info(v ...interface{}) {
ls.info(p.plugin, v...)
p.log(info, v...)
}
// Infof logs as log.Infof.
func (p P) Infof(format string, v ...interface{}) { p.logf(info, format, v...) }
func (p P) Infof(format string, v ...interface{}) {
ls.infof(p.plugin, format, v...)
p.logf(info, format, v...)
}
// Warning logs as log.Warning.
func (p P) Warning(v ...interface{}) { p.log(warning, v...) }
func (p P) Warning(v ...interface{}) {
ls.warning(p.plugin, v...)
p.log(warning, v...)
}
// Warningf logs as log.Warningf.
func (p P) Warningf(format string, v ...interface{}) { p.logf(warning, format, v...) }
func (p P) Warningf(format string, v ...interface{}) {
ls.warningf(p.plugin, format, v...)
p.logf(warning, format, v...)
}
// Error logs as log.Error.
func (p P) Error(v ...interface{}) { p.log(err, v...) }
func (p P) Error(v ...interface{}) {
ls.error(p.plugin, v...)
p.log(err, v...)
}
// Errorf logs as log.Errorf.
func (p P) Errorf(format string, v ...interface{}) { p.logf(err, format, v...) }
func (p P) Errorf(format string, v ...interface{}) {
ls.errorf(p.plugin, format, v...)
p.logf(err, format, v...)
}
// Fatal logs as log.Fatal and calls os.Exit(1).
func (p P) Fatal(v ...interface{}) { p.log(fatal, v...); os.Exit(1) }
func (p P) Fatal(v ...interface{}) {
ls.fatal(p.plugin, v...)
p.log(fatal, v...)
os.Exit(1)
}
// Fatalf logs as log.Fatalf and calls os.Exit(1).
func (p P) Fatalf(format string, v ...interface{}) { p.logf(fatal, format, v...); os.Exit(1) }
func (p P) Fatalf(format string, v ...interface{}) {
ls.fatalf(p.plugin, format, v...)
p.logf(fatal, format, v...)
os.Exit(1)
}

View File

@@ -32,7 +32,6 @@ func stripZone(host string) string {
func HostPortOrFile(s ...string) ([]string, error) {
var servers []string
for _, h := range s {
trans, host := Transport(h)
addr, _, err := net.SplitHostPort(host)

View File

@@ -1,4 +1,4 @@
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
//go:build !go1.11 || (!aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd)
package reuseport

View File

@@ -1,5 +1,4 @@
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
//go:build go1.11 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd)
package reuseport

View File

@@ -82,6 +82,9 @@ func PTR(rr string) *dns.PTR { r, _ := dns.NewRR(rr); return r.(*dns.PTR) }
// TXT returns a TXT record from rr. It panics on errors.
func TXT(rr string) *dns.TXT { r, _ := dns.NewRR(rr); return r.(*dns.TXT) }
// CAA returns a CAA record from rr. It panics on errors.
func CAA(rr string) *dns.CAA { r, _ := dns.NewRR(rr); return r.(*dns.CAA) }
// HINFO returns a HINFO record from rr. It panics on errors.
func HINFO(rr string) *dns.HINFO { r, _ := dns.NewRR(rr); return r.(*dns.HINFO) }
@@ -282,7 +285,6 @@ func SortAndCheck(resp *dns.Msg, tc Case) error {
}
if err := Section(tc, Ns, resp.Ns); err != nil {
return err
}
return Section(tc, Extra, resp.Extra)
}

View File

@@ -12,6 +12,7 @@ import (
type ResponseWriter struct {
TCP bool // if TCP is true we return an TCP connection instead of an UDP one.
RemoteIP string
Zone string
}
// LocalAddr returns the local address, 127.0.0.1:53 (UDP, TCP if t.TCP is true).
@@ -33,9 +34,9 @@ func (t *ResponseWriter) RemoteAddr() net.Addr {
ip := net.ParseIP(remoteIP)
port := 40212
if t.TCP {
return &net.TCPAddr{IP: ip, Port: port, Zone: ""}
return &net.TCPAddr{IP: ip, Port: port, Zone: t.Zone}
}
return &net.UDPAddr{IP: ip, Port: port, Zone: ""}
return &net.UDPAddr{IP: ip, Port: port, Zone: t.Zone}
}
// WriteMsg implements dns.ResponseWriter interface.

View File

@@ -80,7 +80,6 @@ func Scrape(url string) []*MetricFamily {
// ScrapeMetricAsInt provides a sum of all metrics collected for the name and label provided.
// if the metric is not a numeric value, it will be counted a 0.
func ScrapeMetricAsInt(addr string, name string, label string, nometricvalue int) int {
valueToInt := func(m metric) int {
v := m.Value
r, err := strconv.Atoi(v)
@@ -141,7 +140,6 @@ func MetricValueLabel(name, label string, mfs []*MetricFamily) (string, map[stri
return m.(metric).Value, m.(metric).Labels
}
}
}
}
}

View File

@@ -313,7 +313,6 @@ func (r *Request) Class() string {
}
return dns.Class(r.Req.Question[0].Qclass).String()
}
// QClass returns the class of the question in the request.
@@ -327,7 +326,6 @@ func (r *Request) QClass() uint16 {
}
return r.Req.Question[0].Qclass
}
// Clear clears all caching from Request s.