TUN-3375: Upgrade coredns and prometheus dependencies

This commit is contained in:
Igor Postelnik
2020-09-09 13:09:42 -05:00
parent 7acea1ac99
commit 741cd66c9e
757 changed files with 86868 additions and 32428 deletions

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Copyright 2016-2020 The CoreDNS authors and contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -6,6 +6,8 @@ import (
"strings"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/pkg/parse"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/miekg/dns"
)
@@ -18,7 +20,7 @@ type zoneAddr struct {
Address string // used for bound zoneAddr - validation of overlapping
}
// String return the string representation of z.
// String returns the string representation of z.
func (z zoneAddr) String() string {
s := z.Transport + "://" + z.Zone + ":" + z.Port
if z.Address != "" {
@@ -27,43 +29,10 @@ func (z zoneAddr) String() string {
return s
}
// Transport returns the protocol of the string s
func Transport(s string) string {
switch {
case strings.HasPrefix(s, TransportTLS+"://"):
return TransportTLS
case strings.HasPrefix(s, TransportDNS+"://"):
return TransportDNS
case strings.HasPrefix(s, TransportGRPC+"://"):
return TransportGRPC
case strings.HasPrefix(s, TransportHTTPS+"://"):
return TransportHTTPS
}
return TransportDNS
}
// normalizeZone parses an zone string into a structured format with separate
// normalizeZone parses a zone string into a structured format with separate
// host, and port portions, as well as the original input string.
func normalizeZone(str string) (zoneAddr, error) {
var err error
// Default to DNS if there isn't a transport protocol prefix.
trans := TransportDNS
switch {
case strings.HasPrefix(str, TransportTLS+"://"):
trans = TransportTLS
str = str[len(TransportTLS+"://"):]
case strings.HasPrefix(str, TransportDNS+"://"):
trans = TransportDNS
str = str[len(TransportDNS+"://"):]
case strings.HasPrefix(str, TransportGRPC+"://"):
trans = TransportGRPC
str = str[len(TransportGRPC+"://"):]
case strings.HasPrefix(str, TransportHTTPS+"://"):
trans = TransportHTTPS
str = str[len(TransportHTTPS+"://"):]
}
trans, str := parse.Transport(str)
host, port, ipnet, err := plugin.SplitHostPort(str)
if err != nil {
@@ -71,17 +40,15 @@ func normalizeZone(str string) (zoneAddr, error) {
}
if port == "" {
if trans == TransportDNS {
switch trans {
case transport.DNS:
port = Port
}
if trans == TransportTLS {
port = TLSPort
}
if trans == TransportGRPC {
port = GRPCPort
}
if trans == TransportHTTPS {
port = HTTPSPort
case transport.TLS:
port = transport.TLSPort
case transport.GRPC:
port = transport.GRPCPort
case transport.HTTPS:
port = transport.HTTPSPort
}
}
@@ -103,17 +70,9 @@ func SplitProtocolHostPort(address string) (protocol string, ip string, port str
}
}
// Supported transports.
const (
TransportDNS = "dns"
TransportTLS = "tls"
TransportGRPC = "grpc"
TransportHTTPS = "https"
)
type zoneOverlap struct {
registeredAddr map[zoneAddr]zoneAddr // each zoneAddr is registered once by its key
unboundOverlap map[zoneAddr]zoneAddr // the "no bind" equiv ZoneAdddr is registered by its original key
unboundOverlap map[zoneAddr]zoneAddr // the "no bind" equiv ZoneAddr is registered by its original key
}
func newOverlapZone() *zoneOverlap {

View File

@@ -6,7 +6,7 @@ import (
"github.com/coredns/coredns/plugin"
"github.com/mholt/caddy"
"github.com/caddyserver/caddy"
)
// Config configuration for a single server.
@@ -21,7 +21,7 @@ type Config struct {
// The port to listen on.
Port string
// Root points to a base directory we we find user defined "things".
// Root points to a base directory we find user defined "things".
// First consumer is the file plugin to looks for zone files in this place.
Root string

View File

@@ -9,9 +9,11 @@ import (
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/coredns/coredns/plugin/pkg/parse"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyfile"
"github.com/caddyserver/caddy"
"github.com/caddyserver/caddy/caddyfile"
)
const serverType = "dns"
@@ -26,7 +28,7 @@ func init() {
DefaultInput: func() caddy.Input {
return caddy.CaddyfileInput{
Filepath: "Corefile",
Contents: []byte(".:" + Port + " {\nwhoami\n}\n"),
Contents: []byte(".:" + Port + " {\nwhoami\nlog\n}\n"),
ServerTypeName: serverType,
}
},
@@ -50,6 +52,9 @@ func (h *dnsContext) saveConfig(key string, cfg *Config) {
h.keysToConfigs[key] = cfg
}
// Compile-time check to ensure dnsContext implements the caddy.Context interface
var _ caddy.Context = &dnsContext{}
// InspectServerBlocks make sure that everything checks out before
// executing directives and otherwise prepares the directives to
// be parsed and executed.
@@ -111,29 +116,29 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
var servers []caddy.Server
for addr, group := range groups {
// switch on addr
switch Transport(addr) {
case TransportDNS:
switch tr, _ := parse.Transport(addr); tr {
case transport.DNS:
s, err := NewServer(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
case TransportTLS:
case transport.TLS:
s, err := NewServerTLS(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
case TransportGRPC:
case transport.GRPC:
s, err := NewServergRPC(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
case TransportHTTPS:
case transport.HTTPS:
s, err := NewServerHTTPS(addr, group)
if err != nil {
return nil, err
@@ -234,16 +239,8 @@ func groupConfigsByListenAddr(configs []*Config) (map[string][]*Config, error) {
return groups, nil
}
const (
// DefaultPort is the default port.
DefaultPort = "53"
// TLSPort is the default port for DNS-over-TLS.
TLSPort = "853"
// GRPCPort is the default port for DNS-over-gRPC.
GRPCPort = "443"
// HTTPSPort is the default port for DNS-over-HTTPS.
HTTPSPort = "443"
)
// DefaultPort is the default port.
const DefaultPort = transport.Port
// These "soft defaults" are configurable by
// command line flags, etc.

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"net"
"runtime"
"strings"
"sync"
"time"
@@ -14,9 +15,12 @@ import (
"github.com/coredns/coredns/plugin/pkg/edns"
"github.com/coredns/coredns/plugin/pkg/log"
"github.com/coredns/coredns/plugin/pkg/rcode"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/coredns/coredns/plugin/pkg/trace"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/coredns/coredns/request"
"github.com/caddyserver/caddy"
"github.com/miekg/dns"
ot "github.com/opentracing/opentracing-go"
)
@@ -32,12 +36,12 @@ type Server struct {
server [2]*dns.Server // 0 is a net.Listener, 1 is a net.PacketConn (a *UDPConn) in our case.
m sync.Mutex // protects the servers
zones map[string]*Config // zones keyed by their address
dnsWg sync.WaitGroup // used to wait on outstanding connections
connTimeout time.Duration // the maximum duration of a graceful shutdown
trace trace.Trace // the trace plugin for the server
debug bool // disable recover()
classChaos bool // allow non-INET class queries
zones map[string]*Config // zones keyed by their address
dnsWg sync.WaitGroup // used to wait on outstanding connections
graceTimeout time.Duration // the maximum duration of a graceful shutdown
trace trace.Trace // the trace plugin for the server
debug bool // disable recover()
classChaos bool // allow non-INET class queries
}
// NewServer returns a new CoreDNS server and compiles all plugins in to it. By default CH class
@@ -45,9 +49,9 @@ type Server struct {
func NewServer(addr string, group []*Config) (*Server, error) {
s := &Server{
Addr: addr,
zones: make(map[string]*Config),
connTimeout: 5 * time.Second, // TODO(miek): was configurable
Addr: addr,
zones: make(map[string]*Config),
graceTimeout: 5 * time.Second,
}
// We have to bound our wg with one increment
@@ -61,26 +65,16 @@ func NewServer(addr string, group []*Config) (*Server, error) {
for _, site := range group {
if site.Debug {
s.debug = true
log.D = true
log.D.Set()
} else {
// When reloading we need to explicitly disable debug logging if it is now disabled.
s.debug = false
log.D.Clear()
}
// set the config per zone
s.zones[site.Zone] = site
// compile custom plugin for everything
if site.registry != nil {
// this config is already computed with the chain of plugin
// set classChaos in accordance with previously registered plugins
for name := range enableChaos {
if _, ok := site.registry[name]; ok {
s.classChaos = true
break
}
}
// set trace handler in accordance with previously registered "trace" plugin
if handler, ok := site.registry["trace"]; ok {
s.trace = handler.(trace.Trace)
}
continue
}
var stack plugin.Handler
for i := len(site.Plugin) - 1; i >= 0; i-- {
stack = site.Plugin[i](stack)
@@ -96,7 +90,7 @@ func NewServer(addr string, group []*Config) (*Server, error) {
}
}
// Unblock CH class queries when any of these plugins are loaded.
if _, ok := enableChaos[stack.Name()]; ok {
if _, ok := EnableChaos[stack.Name()]; ok {
s.classChaos = true
}
}
@@ -106,6 +100,9 @@ func NewServer(addr string, group []*Config) (*Server, error) {
return s, nil
}
// Compile-time check to ensure Server implements the caddy.GracefulServer interface
var _ caddy.GracefulServer = &Server{}
// Serve starts the server with an existing listener. It blocks until the server stops.
// This implements caddy.TCPServer interface.
func (s *Server) Serve(l net.Listener) error {
@@ -134,16 +131,21 @@ func (s *Server) ServePacket(p net.PacketConn) error {
// Listen implements caddy.TCPServer interface.
func (s *Server) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportDNS+"://"):])
l, err := reuseport.Listen("tcp", s.Addr[len(transport.DNS+"://"):])
if err != nil {
return nil, err
}
return l, nil
}
// WrapListener Listen implements caddy.GracefulServer interface.
func (s *Server) WrapListener(ln net.Listener) net.Listener {
return ln
}
// ListenPacket implements caddy.UDPServer interface.
func (s *Server) ListenPacket() (net.PacketConn, error) {
p, err := net.ListenPacket("udp", s.Addr[len(TransportDNS+"://"):])
p, err := reuseport.ListenPacket("udp", s.Addr[len(transport.DNS+"://"):])
if err != nil {
return nil, err
}
@@ -171,7 +173,7 @@ func (s *Server) Stop() (err error) {
// Wait for remaining connections to finish or
// force them all to close after timeout
select {
case <-time.After(s.connTimeout):
case <-time.After(s.graceTimeout):
case <-done:
}
}
@@ -199,7 +201,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
// The default dns.Mux checks the question section size, but we have our
// own mux here. Check if we have a question section. If not drop them here.
if r == nil || len(r.Question) == 0 {
DefaultErrorFunc(ctx, w, r, dns.RcodeServerFailure)
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)
return
}
@@ -208,14 +210,15 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
// In case the user doesn't enable error plugin, we still
// need to make sure that we stay alive up here
if rec := recover(); rec != nil {
log.Errorf("Recovered from panic in server: %q", s.Addr)
vars.Panic.Inc()
DefaultErrorFunc(ctx, w, r, dns.RcodeServerFailure)
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeServerFailure)
}
}()
}
if !s.classChaos && r.Question[0].Qclass != dns.ClassINET {
DefaultErrorFunc(ctx, w, r, dns.RcodeRefused)
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)
return
}
@@ -224,40 +227,23 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
return
}
ctx, err := incrementDepthAndCheck(ctx)
if err != nil {
DefaultErrorFunc(ctx, w, r, dns.RcodeServerFailure)
return
}
// Wrap the response writer in a ScrubWriter so we automatically make the reply fit in the client's buffer.
w = request.NewScrubWriter(r, w)
q := r.Question[0].Name
b := make([]byte, len(q))
var off int
var end bool
var dshandler *Config
q := strings.ToLower(r.Question[0].Name)
var (
off int
end bool
dshandler *Config
)
for {
l := len(q[off:])
for i := 0; i < l; i++ {
b[i] = q[off+i]
// normalize the name for the lookup
if b[i] >= 'A' && b[i] <= 'Z' {
b[i] |= ('a' - 'A')
}
}
if h, ok := s.zones[string(b[:l])]; ok {
// Set server's address in the context so plugins can reference back to this,
// This will makes those metrics unique.
ctx = context.WithValue(ctx, plugin.ServerCtx{}, s.Addr)
if h, ok := s.zones[q[off:]]; ok {
if r.Question[0].Qtype != dns.TypeDS {
if h.FilterFunc == nil {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
errorFunc(s.Addr, w, r, rcode)
}
return
}
@@ -266,7 +252,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
if h.FilterFunc(q) {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
errorFunc(s.Addr, w, r, rcode)
}
return
}
@@ -274,7 +260,7 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
// The type is DS, keep the handler, but keep on searching as maybe we are serving
// the parent as well and the DS should be routed to it - this will probably *misroute* DS
// queries to a possibly grand parent, but there is no way for us to know at this point
// if there is an actually delegation from grandparent -> parent -> zone.
// if there is an actual delegation from grandparent -> parent -> zone.
// In all fairness: direct DS queries should not be needed.
dshandler = h
}
@@ -288,26 +274,22 @@ func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
// DS request, and we found a zone, use the handler for the query.
rcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
errorFunc(s.Addr, w, r, rcode)
}
return
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := s.zones["."]; ok && h.pluginChain != nil {
// See comment above.
ctx = context.WithValue(ctx, plugin.ServerCtx{}, s.Addr)
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
errorFunc(s.Addr, w, r, rcode)
}
return
}
// Still here? Error out with REFUSED.
DefaultErrorFunc(ctx, w, r, dns.RcodeRefused)
errorAndMetricsFunc(s.Addr, w, r, dns.RcodeRefused)
}
// OnStartupComplete lists the sites served by this server
@@ -321,7 +303,6 @@ func (s *Server) OnStartupComplete() {
if out != "" {
fmt.Print(out)
}
return
}
// Tracer returns the tracer in the server if defined.
@@ -333,56 +314,42 @@ func (s *Server) Tracer() ot.Tracer {
return s.trace.Tracer()
}
// DefaultErrorFunc responds to an DNS request with an error.
func DefaultErrorFunc(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, rc int) {
// errorFunc responds to an DNS request with an error.
func errorFunc(server string, w dns.ResponseWriter, r *dns.Msg, rc int) {
state := request.Request{W: w, Req: r}
answer := new(dns.Msg)
answer.SetRcode(r, rc)
state.SizeAndDo(answer)
vars.Report(ctx, state, vars.Dropped, rcode.ToString(rc), answer.Len(), time.Now())
w.WriteMsg(answer)
}
// incrementDepthAndCheck increments the loop counter in the context, and returns an error if
// the counter exceeds the max number of re-entries
func incrementDepthAndCheck(ctx context.Context) (context.Context, error) {
// Loop counter for self directed lookups
loop := ctx.Value(loopKey{})
if loop == nil {
ctx = context.WithValue(ctx, loopKey{}, 0)
return ctx, nil
}
func errorAndMetricsFunc(server string, w dns.ResponseWriter, r *dns.Msg, rc int) {
state := request.Request{W: w, Req: r}
iloop := loop.(int) + 1
if iloop > maxreentries {
return ctx, fmt.Errorf("too deep")
}
ctx = context.WithValue(ctx, loopKey{}, iloop)
return ctx, nil
answer := new(dns.Msg)
answer.SetRcode(r, rc)
state.SizeAndDo(answer)
vars.Report(server, state, vars.Dropped, rcode.ToString(rc), answer.Len(), time.Now())
w.WriteMsg(answer)
}
const (
tcp = 0
udp = 1
maxreentries = 10
tcp = 0
udp = 1
)
type (
// Key is the context key for the current server
Key struct{}
loopKey struct{} // loopKey is the context key for counting self loops
)
// Key is the context key for the current server added to the context.
type Key struct{}
// enableChaos is a map with plugin names for which we should open CH class queries as
// we block these by default.
var enableChaos = map[string]bool{
"chaos": true,
"forward": true,
"proxy": true,
// EnableChaos is a map with plugin names for which we should open CH class queries as we block these by default.
var EnableChaos = map[string]struct{}{
"chaos": {},
"forward": {},
"proxy": {},
}
// Quiet mode will not show any informative output on initialization.

View File

@@ -8,8 +8,10 @@ import (
"net"
"github.com/coredns/coredns/pb"
"github.com/coredns/coredns/plugin/pkg/watch"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/caddyserver/caddy"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/miekg/dns"
"github.com/opentracing/opentracing-go"
@@ -23,7 +25,6 @@ type ServergRPC struct {
grpcServer *grpc.Server
listenAddr net.Addr
tlsConfig *tls.Config
watch watch.Watcher
}
// NewServergRPC returns a new CoreDNS GRPC server and compiles all plugin in to it.
@@ -40,9 +41,12 @@ func NewServergRPC(addr string, group []*Config) (*ServergRPC, error) {
tlsConfig = conf.TLSConfig
}
return &ServergRPC{Server: s, tlsConfig: tlsConfig, watch: watch.NewWatcher(watchables(s.zones))}, nil
return &ServergRPC{Server: s, tlsConfig: tlsConfig}, nil
}
// Compile-time check to ensure Server implements the caddy.GracefulServer interface
var _ caddy.GracefulServer = &Server{}
// Serve implements caddy.TCPServer interface.
func (s *ServergRPC) Serve(l net.Listener) error {
s.m.Lock()
@@ -73,7 +77,7 @@ func (s *ServergRPC) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServergRPC) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportGRPC+"://"):])
l, err := reuseport.Listen("tcp", s.Addr[len(transport.GRPC+"://"):])
if err != nil {
return nil, err
}
@@ -90,11 +94,10 @@ func (s *ServergRPC) OnStartupComplete() {
return
}
out := startUpZones(TransportGRPC+"://", s.Addr, s.zones)
out := startUpZones(transport.GRPC+"://", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}
// Stop stops the server. It blocks until the server is
@@ -102,9 +105,6 @@ func (s *ServergRPC) OnStartupComplete() {
func (s *ServergRPC) Stop() (err error) {
s.m.Lock()
defer s.m.Unlock()
if s.watch != nil {
s.watch.Stop()
}
if s.grpcServer != nil {
s.grpcServer.GracefulStop()
}
@@ -133,7 +133,8 @@ func (s *ServergRPC) Query(ctx context.Context, in *pb.DnsPacket) (*pb.DnsPacket
w := &gRPCresponse{localAddr: s.listenAddr, remoteAddr: a, Msg: msg}
s.ServeDNS(ctx, w, msg)
dnsCtx := context.WithValue(ctx, Key{}, s.Server)
s.ServeDNS(dnsCtx, w, msg)
packed, err := w.Msg.Pack()
if err != nil {
@@ -143,12 +144,6 @@ func (s *ServergRPC) Query(ctx context.Context, in *pb.DnsPacket) (*pb.DnsPacket
return &pb.DnsPacket{Msg: packed}, nil
}
// Watch is the entrypoint called by the gRPC layer when the user asks
// to watch a query.
func (s *ServergRPC) Watch(stream pb.DnsService_WatchServer) error {
return s.watch.Watch(stream)
}
// Shutdown stops the server (non gracefully).
func (s *ServergRPC) Shutdown() error {
if s.grpcServer != nil {
@@ -164,7 +159,7 @@ type gRPCresponse struct {
}
// Write is the hack that makes this work. It does not actually write the message
// but returns the bytes we need to to write in r. We can then pick this up in Query
// but returns the bytes we need to write in r. We can then pick this up in Query
// and write a proper protobuf back to the client.
func (r *gRPCresponse) Write(b []byte) (int, error) {
r.Msg = new(dns.Msg)
@@ -174,8 +169,8 @@ func (r *gRPCresponse) Write(b []byte) (int, error) {
// These methods implement the dns.ResponseWriter interface from Go DNS.
func (r *gRPCresponse) Close() error { return nil }
func (r *gRPCresponse) TsigStatus() error { return nil }
func (r *gRPCresponse) TsigTimersOnly(b bool) { return }
func (r *gRPCresponse) Hijack() { return }
func (r *gRPCresponse) TsigTimersOnly(b bool) {}
func (r *gRPCresponse) Hijack() {}
func (r *gRPCresponse) LocalAddr() net.Addr { return r.localAddr }
func (r *gRPCresponse) RemoteAddr() net.Addr { return r.remoteAddr }
func (r *gRPCresponse) WriteMsg(m *dns.Msg) error { r.Msg = m; return nil }

View File

@@ -12,6 +12,10 @@ import (
"github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/coredns/coredns/plugin/pkg/doh"
"github.com/coredns/coredns/plugin/pkg/response"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/caddyserver/caddy"
)
// ServerHTTPS represents an instance of a DNS-over-HTTPS server.
@@ -42,6 +46,9 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
return sh, nil
}
// Compile-time check to ensure Server implements the caddy.GracefulServer interface
var _ caddy.GracefulServer = &Server{}
// Serve implements caddy.TCPServer interface.
func (s *ServerHTTPS) Serve(l net.Listener) error {
s.m.Lock()
@@ -60,7 +67,7 @@ func (s *ServerHTTPS) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServerHTTPS) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportHTTPS+"://"):])
l, err := reuseport.Listen("tcp", s.Addr[len(transport.HTTPS+"://"):])
if err != nil {
return nil, err
}
@@ -77,11 +84,10 @@ func (s *ServerHTTPS) OnStartupComplete() {
return
}
out := startUpZones(TransportHTTPS+"://", s.Addr, s.zones)
out := startUpZones(transport.HTTPS+"://", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}
// Stop stops the server. It blocks until the server is totally stopped.
@@ -116,7 +122,16 @@ func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// We just call the normal chain handler - all error handling is done there.
// We should expect a packet to be returned that we can send to the client.
s.ServeDNS(context.Background(), dw, msg)
ctx := context.WithValue(context.Background(), Key{}, s.Server)
s.ServeDNS(ctx, dw, msg)
// See section 4.2.1 of RFC 8484.
// We are using code 500 to indicate an unexpected situation when the chain
// handler has not provided any response message.
if dw.Msg == nil {
http.Error(w, "No response", http.StatusInternalServerError)
return
}
buf, _ := dw.Msg.Pack()

View File

@@ -6,6 +6,10 @@ import (
"fmt"
"net"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/caddyserver/caddy"
"github.com/miekg/dns"
)
@@ -32,6 +36,9 @@ func NewServerTLS(addr string, group []*Config) (*ServerTLS, error) {
return &ServerTLS{Server: s, tlsConfig: tlsConfig}, nil
}
// Compile-time check to ensure Server implements the caddy.GracefulServer interface
var _ caddy.GracefulServer = &Server{}
// Serve implements caddy.TCPServer interface.
func (s *ServerTLS) Serve(l net.Listener) error {
s.m.Lock()
@@ -42,7 +49,7 @@ func (s *ServerTLS) Serve(l net.Listener) error {
// Only fill out the TCP server for this one.
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp-tls", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
ctx := context.Background()
ctx := context.WithValue(context.Background(), Key{}, s.Server)
s.ServeDNS(ctx, w, r)
})}
s.m.Unlock()
@@ -55,7 +62,7 @@ func (s *ServerTLS) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServerTLS) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportTLS+"://"):])
l, err := reuseport.Listen("tcp", s.Addr[len(transport.TLS+"://"):])
if err != nil {
return nil, err
}
@@ -72,9 +79,8 @@ func (s *ServerTLS) OnStartupComplete() {
return
}
out := startUpZones(TransportTLS+"://", s.Addr, s.zones)
out := startUpZones(transport.TLS+"://", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}

View File

@@ -1,18 +0,0 @@
package dnsserver
import (
"github.com/coredns/coredns/plugin/pkg/watch"
)
func watchables(zones map[string]*Config) []watch.Watchable {
var w []watch.Watchable
for _, config := range zones {
plugins := config.Handlers()
for _, p := range plugins {
if x, ok := p.(watch.Watchable); ok {
w = append(w, x)
}
}
}
return w
}

View File

@@ -11,19 +11,25 @@ package dnsserver
// care what plugin above them are doing.
var Directives = []string{
"metadata",
"cancel",
"tls",
"reload",
"nsid",
"bufsize",
"root",
"bind",
"debug",
"trace",
"ready",
"health",
"pprof",
"prometheus",
"errors",
"log",
"dnstap",
"dns64",
"acl",
"any",
"chaos",
"loadbalance",
"cache",
@@ -31,17 +37,22 @@ var Directives = []string{
"dnssec",
"autopath",
"template",
"transfer",
"hosts",
"route53",
"federation",
"azure",
"clouddns",
"k8s_external",
"kubernetes",
"file",
"auto",
"secondary",
"etcd",
"loop",
"forward",
"proxy",
"grpc",
"erratic",
"whoami",
"on",
"sign",
}

View File

@@ -2,20 +2,17 @@
package coremain
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime"
"strconv"
"strings"
"github.com/coredns/coredns/core/dnsserver"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/mholt/caddy"
"github.com/caddyserver/caddy"
)
func init() {
@@ -24,7 +21,6 @@ func init() {
setVersion()
flag.StringVar(&conf, "conf", "", "Corefile to load (default \""+caddy.DefaultConfigFile+"\")")
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file")
flag.BoolVar(&version, "version", false, "Show version")
@@ -42,7 +38,7 @@ func Run() {
caddy.TrapSignals()
// Reset flag.CommandLine to get rid of unwanted flags for instance from glog (used in kubernetes).
// And readd the once we want to keep.
// And read the ones we want to keep.
flag.VisitAll(func(f *flag.Flag) {
if _, ok := flagsBlacklist[f.Name]; ok {
return
@@ -62,7 +58,7 @@ func Run() {
}
log.SetOutput(os.Stdout)
log.SetFlags(log.LstdFlags)
log.SetFlags(0) // Set to 0 because we're doing our own time, with timezone
if version {
showVersion()
@@ -73,11 +69,6 @@ func Run() {
os.Exit(0)
}
// Set CPU cap
if err := setCPU(cpu); err != nil {
mustLogFatal(err)
}
// Get Corefile input
corefile, err := caddy.LoadCaddyfile(serverType)
if err != nil {
@@ -90,14 +81,10 @@ func Run() {
mustLogFatal(err)
}
logVersion()
if !dnsserver.Quiet {
showVersion()
}
// Execute instantiation events
caddy.EmitEvent(caddy.InstanceStartupEvent, instance)
// Twiddle your thumbs
instance.Wait()
}
@@ -152,12 +139,6 @@ func defaultLoader(serverType string) (caddy.Input, error) {
}, nil
}
// logVersion logs the version that is starting.
func logVersion() {
clog.Info(versionString())
clog.Info(releaseString())
}
// showVersion prints the version that is starting.
func showVersion() {
fmt.Print(versionString())
@@ -189,54 +170,16 @@ func setVersion() {
// Only set the appVersion if -ldflags was used
if gitNearestTag != "" || gitTag != "" {
if devBuild && gitNearestTag != "" {
appVersion = fmt.Sprintf("%s (+%s %s)",
strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate)
appVersion = fmt.Sprintf("%s (+%s %s)", strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate)
} else if gitTag != "" {
appVersion = strings.TrimPrefix(gitTag, "v")
}
}
}
// setCPU parses string cpu and sets GOMAXPROCS
// according to its value. It accepts either
// a number (e.g. 3) or a percent (e.g. 50%).
func setCPU(cpu string) error {
var numCPU int
availCPU := runtime.NumCPU()
if strings.HasSuffix(cpu, "%") {
// Percent
var percent float32
pctStr := cpu[:len(cpu)-1]
pctInt, err := strconv.Atoi(pctStr)
if err != nil || pctInt < 1 || pctInt > 100 {
return errors.New("invalid CPU value: percentage must be between 1-100")
}
percent = float32(pctInt) / 100
numCPU = int(float32(availCPU) * percent)
} else {
// Number
num, err := strconv.Atoi(cpu)
if err != nil || num < 1 {
return errors.New("invalid CPU value: provide a number or percent greater than 0")
}
numCPU = num
}
if numCPU > availCPU {
numCPU = availCPU
}
runtime.GOMAXPROCS(numCPU)
return nil
}
// Flags that control program flow or startup
var (
conf string
cpu string
logfile bool
version bool
plugins bool
)
@@ -257,14 +200,14 @@ var (
)
// flagsBlacklist removes flags with these names from our flagset.
var flagsBlacklist = map[string]bool{
"logtostderr": true,
"alsologtostderr": true,
"v": true,
"stderrthreshold": true,
"vmodule": true,
"log_backtrace_at": true,
"log_dir": true,
var flagsBlacklist = map[string]struct{}{
"logtostderr": {},
"alsologtostderr": {},
"v": {},
"stderrthreshold": {},
"vmodule": {},
"log_backtrace_at": {},
"log_dir": {},
}
var flagsToKeep []*flag.Flag

View File

@@ -2,7 +2,7 @@ package coremain
// Various CoreDNS constants.
const (
CoreVersion = "1.2.0"
CoreVersion = "1.7.0"
coreName = "CoreDNS"
serverType = "dns"
)

View File

@@ -6,6 +6,8 @@
all: dns.pb.go
dns.pb.go: dns.proto
protoc --go_out=plugins=grpc:. dns.proto && \
sed -e s?golang.org/x/net/context?context? < dns.pb.go > dns.pb.go.tmp && \
mv dns.pb.go.tmp dns.pb.go
protoc --go_out=plugins=grpc:. dns.proto
.PHONY: clean
clean:
rm dns.pb.go

View File

@@ -1,27 +1,14 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: dns.proto
/*
Package pb is a generated protocol buffer package.
It is generated from these files:
dns.proto
It has these top-level messages:
DnsPacket
WatchRequest
WatchCreateRequest
WatchCancelRequest
WatchResponse
*/
package pb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "context"
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
grpc "google.golang.org/grpc"
)
@@ -30,20 +17,46 @@ var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
/* Miek: disabled this manually, because I don't know what the heck */
/*
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
*/
type DnsPacket struct {
Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DnsPacket) Reset() { *m = DnsPacket{} }
func (m *DnsPacket) String() string { return proto.CompactTextString(m) }
func (*DnsPacket) ProtoMessage() {}
func (*DnsPacket) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *DnsPacket) Reset() { *m = DnsPacket{} }
func (m *DnsPacket) String() string { return proto.CompactTextString(m) }
func (*DnsPacket) ProtoMessage() {}
func (*DnsPacket) Descriptor() ([]byte, []int) {
return fileDescriptor_638ff8d8aaf3d8ae, []int{0}
}
func (m *DnsPacket) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DnsPacket.Unmarshal(m, b)
}
func (m *DnsPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DnsPacket.Marshal(b, m, deterministic)
}
func (m *DnsPacket) XXX_Merge(src proto.Message) {
xxx_messageInfo_DnsPacket.Merge(m, src)
}
func (m *DnsPacket) XXX_Size() int {
return xxx_messageInfo_DnsPacket.Size(m)
}
func (m *DnsPacket) XXX_DiscardUnknown() {
xxx_messageInfo_DnsPacket.DiscardUnknown(m)
}
var xxx_messageInfo_DnsPacket proto.InternalMessageInfo
func (m *DnsPacket) GetMsg() []byte {
if m != nil {
@@ -52,223 +65,22 @@ func (m *DnsPacket) GetMsg() []byte {
return nil
}
type WatchRequest struct {
// request_union is a request to either create a new watcher or cancel an existing watcher.
//
// Types that are valid to be assigned to RequestUnion:
// *WatchRequest_CreateRequest
// *WatchRequest_CancelRequest
RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
}
func (m *WatchRequest) Reset() { *m = WatchRequest{} }
func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
func (*WatchRequest) ProtoMessage() {}
func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
type isWatchRequest_RequestUnion interface {
isWatchRequest_RequestUnion()
}
type WatchRequest_CreateRequest struct {
CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,oneof"`
}
type WatchRequest_CancelRequest struct {
CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,oneof"`
}
func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {}
func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {}
func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
if m != nil {
return m.RequestUnion
}
return nil
}
func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
return x.CreateRequest
}
return nil
}
func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
return x.CancelRequest
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
(*WatchRequest_CreateRequest)(nil),
(*WatchRequest_CancelRequest)(nil),
}
}
func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*WatchRequest)
// request_union
switch x := m.RequestUnion.(type) {
case *WatchRequest_CreateRequest:
b.EncodeVarint(1<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.CreateRequest); err != nil {
return err
}
case *WatchRequest_CancelRequest:
b.EncodeVarint(2<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.CancelRequest); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
}
return nil
}
func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*WatchRequest)
switch tag {
case 1: // request_union.create_request
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(WatchCreateRequest)
err := b.DecodeMessage(msg)
m.RequestUnion = &WatchRequest_CreateRequest{msg}
return true, err
case 2: // request_union.cancel_request
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(WatchCancelRequest)
err := b.DecodeMessage(msg)
m.RequestUnion = &WatchRequest_CancelRequest{msg}
return true, err
default:
return false, nil
}
}
func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
m := msg.(*WatchRequest)
// request_union
switch x := m.RequestUnion.(type) {
case *WatchRequest_CreateRequest:
s := proto.Size(x.CreateRequest)
n += proto.SizeVarint(1<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *WatchRequest_CancelRequest:
s := proto.Size(x.CancelRequest)
n += proto.SizeVarint(2<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
type WatchCreateRequest struct {
Query *DnsPacket `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"`
}
func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} }
func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
func (*WatchCreateRequest) ProtoMessage() {}
func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *WatchCreateRequest) GetQuery() *DnsPacket {
if m != nil {
return m.Query
}
return nil
}
type WatchCancelRequest struct {
// watch_id is the watcher id to cancel
WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId" json:"watch_id,omitempty"`
}
func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} }
func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
func (*WatchCancelRequest) ProtoMessage() {}
func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *WatchCancelRequest) GetWatchId() int64 {
if m != nil {
return m.WatchId
}
return 0
}
type WatchResponse struct {
// watch_id is the ID of the watcher that corresponds to the response.
WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId" json:"watch_id,omitempty"`
// created is set to true if the response is for a create watch request.
// The client should record the watch_id and expect to receive DNS replies
// from the same stream.
// All replies sent to the created watcher will attach with the same watch_id.
Created bool `protobuf:"varint,2,opt,name=created" json:"created,omitempty"`
// canceled is set to true if the response is for a cancel watch request.
// No further events will be sent to the canceled watcher.
Canceled bool `protobuf:"varint,3,opt,name=canceled" json:"canceled,omitempty"`
Qname string `protobuf:"bytes,4,opt,name=qname" json:"qname,omitempty"`
Err string `protobuf:"bytes,5,opt,name=err" json:"err,omitempty"`
}
func (m *WatchResponse) Reset() { *m = WatchResponse{} }
func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
func (*WatchResponse) ProtoMessage() {}
func (*WatchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *WatchResponse) GetWatchId() int64 {
if m != nil {
return m.WatchId
}
return 0
}
func (m *WatchResponse) GetCreated() bool {
if m != nil {
return m.Created
}
return false
}
func (m *WatchResponse) GetCanceled() bool {
if m != nil {
return m.Canceled
}
return false
}
func (m *WatchResponse) GetQname() string {
if m != nil {
return m.Qname
}
return ""
}
func (m *WatchResponse) GetErr() string {
if m != nil {
return m.Err
}
return ""
}
func init() {
proto.RegisterType((*DnsPacket)(nil), "coredns.dns.DnsPacket")
proto.RegisterType((*WatchRequest)(nil), "coredns.dns.WatchRequest")
proto.RegisterType((*WatchCreateRequest)(nil), "coredns.dns.WatchCreateRequest")
proto.RegisterType((*WatchCancelRequest)(nil), "coredns.dns.WatchCancelRequest")
proto.RegisterType((*WatchResponse)(nil), "coredns.dns.WatchResponse")
}
func init() { proto.RegisterFile("dns.proto", fileDescriptor_638ff8d8aaf3d8ae) }
var fileDescriptor_638ff8d8aaf3d8ae = []byte{
// 120 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xc9, 0x2b, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0xce, 0x2f, 0x4a, 0x05, 0x71, 0x53, 0xf2, 0x8a,
0x95, 0x64, 0xb9, 0x38, 0x5d, 0xf2, 0x8a, 0x03, 0x12, 0x93, 0xb3, 0x53, 0x4b, 0x84, 0x04, 0xb8,
0x98, 0x73, 0x8b, 0xd3, 0x25, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x23, 0x57, 0x2e,
0x2e, 0x97, 0xbc, 0xe2, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x73, 0x2e, 0xd6, 0xc0,
0xd2, 0xd4, 0xa2, 0x4a, 0x21, 0x31, 0x3d, 0x24, 0x33, 0xf4, 0xe0, 0x06, 0x48, 0xe1, 0x10, 0x77,
0x62, 0x89, 0x62, 0x2a, 0x48, 0x4a, 0x62, 0x03, 0xdb, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
0xf5, 0xd1, 0x3f, 0x26, 0x8c, 0x00, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -279,11 +91,11 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for DnsService service
// DnsServiceClient is the client API for DnsService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type DnsServiceClient interface {
Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error)
Watch(ctx context.Context, opts ...grpc.CallOption) (DnsService_WatchClient, error)
}
type dnsServiceClient struct {
@@ -296,49 +108,16 @@ func NewDnsServiceClient(cc *grpc.ClientConn) DnsServiceClient {
func (c *dnsServiceClient) Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error) {
out := new(DnsPacket)
err := grpc.Invoke(ctx, "/coredns.dns.DnsService/Query", in, out, c.cc, opts...)
err := c.cc.Invoke(ctx, "/coredns.dns.DnsService/Query", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dnsServiceClient) Watch(ctx context.Context, opts ...grpc.CallOption) (DnsService_WatchClient, error) {
stream, err := grpc.NewClientStream(ctx, &_DnsService_serviceDesc.Streams[0], c.cc, "/coredns.dns.DnsService/Watch", opts...)
if err != nil {
return nil, err
}
x := &dnsServiceWatchClient{stream}
return x, nil
}
type DnsService_WatchClient interface {
Send(*WatchRequest) error
Recv() (*WatchResponse, error)
grpc.ClientStream
}
type dnsServiceWatchClient struct {
grpc.ClientStream
}
func (x *dnsServiceWatchClient) Send(m *WatchRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *dnsServiceWatchClient) Recv() (*WatchResponse, error) {
m := new(WatchResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for DnsService service
// DnsServiceServer is the server API for DnsService service.
type DnsServiceServer interface {
Query(context.Context, *DnsPacket) (*DnsPacket, error)
Watch(DnsService_WatchServer) error
}
func RegisterDnsServiceServer(s *grpc.Server, srv DnsServiceServer) {
@@ -363,32 +142,6 @@ func _DnsService_Query_Handler(srv interface{}, ctx context.Context, dec func(in
return interceptor(ctx, in, info, handler)
}
func _DnsService_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(DnsServiceServer).Watch(&dnsServiceWatchServer{stream})
}
type DnsService_WatchServer interface {
Send(*WatchResponse) error
Recv() (*WatchRequest, error)
grpc.ServerStream
}
type dnsServiceWatchServer struct {
grpc.ServerStream
}
func (x *dnsServiceWatchServer) Send(m *WatchResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *dnsServiceWatchServer) Recv() (*WatchRequest, error) {
m := new(WatchRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _DnsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "coredns.dns.DnsService",
HandlerType: (*DnsServiceServer)(nil),
@@ -398,40 +151,6 @@ var _DnsService_serviceDesc = grpc.ServiceDesc{
Handler: _DnsService_Query_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Watch",
Handler: _DnsService_Watch_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "dns.proto",
}
func init() { proto.RegisterFile("dns.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 333 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x41, 0x4f, 0x32, 0x31,
0x14, 0xfc, 0x0a, 0xec, 0x07, 0x3c, 0x40, 0xcd, 0x8b, 0x31, 0xcb, 0x26, 0x46, 0xb2, 0x27, 0x0e,
0x06, 0x0d, 0x1e, 0xbc, 0xaf, 0x1c, 0xf0, 0xa6, 0xf5, 0x60, 0xe2, 0x85, 0x2c, 0xdd, 0x17, 0x25,
0x4a, 0x17, 0xda, 0x45, 0xe3, 0x3f, 0xd0, 0xdf, 0xe3, 0x1f, 0x34, 0xfb, 0xba, 0x90, 0x35, 0x88,
0xb7, 0xce, 0x74, 0x3a, 0xed, 0xcc, 0x2b, 0x34, 0x13, 0x6d, 0x07, 0x0b, 0x93, 0x66, 0x29, 0xb6,
0x54, 0x6a, 0x28, 0x87, 0x89, 0xb6, 0xe1, 0x31, 0x34, 0x47, 0xda, 0xde, 0xc4, 0xea, 0x99, 0x32,
0x3c, 0x80, 0xea, 0xdc, 0x3e, 0xfa, 0xa2, 0x27, 0xfa, 0x6d, 0x99, 0x2f, 0xc3, 0x2f, 0x01, 0xed,
0xfb, 0x38, 0x53, 0x4f, 0x92, 0x96, 0x2b, 0xb2, 0x19, 0x8e, 0x61, 0x4f, 0x19, 0x8a, 0x33, 0x9a,
0x18, 0xc7, 0xb0, 0xba, 0x35, 0x3c, 0x19, 0x94, 0x5c, 0x07, 0x7c, 0xe4, 0x8a, 0x75, 0xc5, 0xc1,
0xf1, 0x3f, 0xd9, 0x51, 0x65, 0x82, 0x9d, 0x62, 0xad, 0xe8, 0x65, 0xe3, 0x54, 0xd9, 0xe9, 0xc4,
0xba, 0xb2, 0x53, 0x99, 0x88, 0xf6, 0xa1, 0x53, 0x58, 0x4c, 0x56, 0x7a, 0x96, 0xea, 0x30, 0x02,
0xdc, 0x7e, 0x01, 0x9e, 0x82, 0xb7, 0x5c, 0x91, 0x79, 0x2f, 0x5e, 0x7c, 0xf4, 0xe3, 0x9e, 0x4d,
0x09, 0xd2, 0x89, 0xc2, 0xb3, 0xb5, 0x47, 0xf9, 0x2a, 0xec, 0x42, 0xe3, 0x2d, 0x67, 0x27, 0xb3,
0x84, 0x6d, 0xaa, 0xb2, 0xce, 0xf8, 0x3a, 0x09, 0x3f, 0x04, 0x74, 0x8a, 0xaa, 0xec, 0x22, 0xd5,
0x96, 0xfe, 0x10, 0xa3, 0x0f, 0x75, 0xd7, 0x46, 0xc2, 0xa9, 0x1b, 0x72, 0x0d, 0x31, 0x80, 0x86,
0x4b, 0x47, 0x89, 0x5f, 0xe5, 0xad, 0x0d, 0xc6, 0x43, 0xf0, 0x96, 0x3a, 0x9e, 0x93, 0x5f, 0xeb,
0x89, 0x7e, 0x53, 0x3a, 0x90, 0x4f, 0x8d, 0x8c, 0xf1, 0x3d, 0xe6, 0xf2, 0xe5, 0xf0, 0x53, 0x00,
0x8c, 0xb4, 0xbd, 0x23, 0xf3, 0x3a, 0x53, 0x84, 0x97, 0xe0, 0xdd, 0xe6, 0x99, 0x70, 0x47, 0xe4,
0x60, 0x07, 0x8f, 0x11, 0x78, 0x9c, 0x08, 0xbb, 0xdb, 0x33, 0x29, 0x1a, 0x09, 0x82, 0xdf, 0xb6,
0x5c, 0x01, 0x7d, 0x71, 0x2e, 0xa2, 0xda, 0x43, 0x65, 0x31, 0x9d, 0xfe, 0xe7, 0xaf, 0x77, 0xf1,
0x1d, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x5b, 0x8c, 0xe1, 0x87, 0x02, 0x00, 0x00,
}

View File

@@ -9,41 +9,4 @@ message DnsPacket {
service DnsService {
rpc Query (DnsPacket) returns (DnsPacket);
rpc Watch (stream WatchRequest) returns (stream WatchResponse);
}
message WatchRequest {
// request_union is a request to either create a new watcher or cancel an existing watcher.
oneof request_union {
WatchCreateRequest create_request = 1;
WatchCancelRequest cancel_request = 2;
}
}
message WatchCreateRequest {
DnsPacket query = 1;
}
message WatchCancelRequest {
// watch_id is the watcher id to cancel
int64 watch_id = 1;
}
message WatchResponse {
// watch_id is the ID of the watcher that corresponds to the response.
int64 watch_id = 1;
// created is set to true if the response is for a create watch request.
// The client should record the watch_id and expect to receive DNS replies
// from the same stream.
// All replies sent to the created watcher will attach with the same watch_id.
bool created = 2;
// canceled is set to true if the response is for a cancel watch request.
// No further events will be sent to the canceled watcher.
bool canceled = 3;
string qname = 4;
string err = 5;
}

View File

@@ -13,18 +13,18 @@ import (
type ServiceBackend interface {
// Services communicates with the backend to retrieve the service definitions. Exact indicates
// on exact match should be returned.
Services(state request.Request, exact bool, opt Options) ([]msg.Service, error)
Services(ctx context.Context, state request.Request, exact bool, opt Options) ([]msg.Service, error)
// Reverse communicates with the backend to retrieve service definition based on a IP address
// instead of a name. I.e. a reverse DNS lookup.
Reverse(state request.Request, exact bool, opt Options) ([]msg.Service, error)
Reverse(ctx context.Context, state request.Request, exact bool, opt Options) ([]msg.Service, error)
// Lookup is used to find records else where.
Lookup(state request.Request, name string, typ uint16) (*dns.Msg, error)
Lookup(ctx context.Context, state request.Request, name string, typ uint16) (*dns.Msg, error)
// Returns _all_ services that matches a certain name.
// Note: it does not implement a specific service.
Records(state request.Request, exact bool) ([]msg.Service, error)
Records(ctx context.Context, state request.Request, exact bool) ([]msg.Service, error)
// IsNameError return true if err indicated a record not found condition
IsNameError(err error) bool

View File

@@ -1,6 +1,7 @@
package plugin
import (
"context"
"fmt"
"math"
"net"
@@ -13,13 +14,13 @@ import (
)
// A returns A records from Backend or an error.
func A(b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
services, err := checkForApex(b, zone, state, opt)
func A(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
services, err := checkForApex(ctx, b, zone, state, opt)
if err != nil {
return nil, err
}
dup := make(map[string]bool)
dup := make(map[string]struct{})
for _, serv := range services {
@@ -40,26 +41,24 @@ func A(b ServiceBackend, zone string, state request.Request, previousRecords []d
if dnsutil.DuplicateCNAME(newRecord, previousRecords) {
continue
}
if dns.IsSubDomain(zone, dns.Fqdn(serv.Host)) {
state1 := state.NewWithQuestion(serv.Host, state.QType())
state1.Zone = zone
nextRecords, err := A(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
state1 := state.NewWithQuestion(serv.Host, state.QType())
nextRecords, err := A(b, zone, state1, append(previousRecords, newRecord), opt)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
if dns.IsSubDomain(zone, target) {
// We should already have found it
continue
}
// Lookup
m1, e1 := b.Lookup(state, target, state.QType())
m1, e1 := b.Lookup(ctx, state, target, state.QType())
if e1 != nil {
continue
}
@@ -70,7 +69,7 @@ func A(b ServiceBackend, zone string, state request.Request, previousRecords []d
case dns.TypeA:
if _, ok := dup[serv.Host]; !ok {
dup[serv.Host] = true
dup[serv.Host] = struct{}{}
records = append(records, serv.NewA(state.QName(), ip))
}
@@ -82,13 +81,13 @@ func A(b ServiceBackend, zone string, state request.Request, previousRecords []d
}
// AAAA returns AAAA records from Backend or an error.
func AAAA(b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
services, err := checkForApex(b, zone, state, opt)
func AAAA(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
services, err := checkForApex(ctx, b, zone, state, opt)
if err != nil {
return nil, err
}
dup := make(map[string]bool)
dup := make(map[string]struct{})
for _, serv := range services {
@@ -110,22 +109,23 @@ func AAAA(b ServiceBackend, zone string, state request.Request, previousRecords
if dnsutil.DuplicateCNAME(newRecord, previousRecords) {
continue
}
if dns.IsSubDomain(zone, dns.Fqdn(serv.Host)) {
state1 := state.NewWithQuestion(serv.Host, state.QType())
state1.Zone = zone
nextRecords, err := AAAA(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
state1 := state.NewWithQuestion(serv.Host, state.QType())
nextRecords, err := AAAA(b, zone, state1, append(previousRecords, newRecord), opt)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
m1, e1 := b.Lookup(state, target, state.QType())
m1, e1 := b.Lookup(ctx, state, target, state.QType())
if e1 != nil {
continue
}
@@ -140,7 +140,7 @@ func AAAA(b ServiceBackend, zone string, state request.Request, previousRecords
case dns.TypeAAAA:
if _, ok := dup[serv.Host]; !ok {
dup[serv.Host] = true
dup[serv.Host] = struct{}{}
records = append(records, serv.NewAAAA(state.QName(), ip))
}
}
@@ -150,14 +150,14 @@ func AAAA(b ServiceBackend, zone string, state request.Request, previousRecords
// SRV returns SRV records from the Backend.
// If the Target is not a name but an IP address, a name is created on the fly.
func SRV(b ServiceBackend, zone string, state request.Request, opt Options) (records, extra []dns.RR, err error) {
services, err := b.Services(state, false, opt)
func SRV(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) (records, extra []dns.RR, err error) {
services, err := b.Services(ctx, state, false, opt)
if err != nil {
return nil, nil, err
}
dup := make(map[item]bool)
lookup := make(map[string]bool)
dup := make(map[item]struct{})
lookup := make(map[string]struct{})
// Looping twice to get the right weight vs priority. This might break because we may drop duplicate SRV records latter on.
w := make(map[int]int)
@@ -173,6 +173,11 @@ func SRV(b ServiceBackend, zone string, state request.Request, opt Options) (rec
w[serv.Priority] += weight
}
for _, serv := range services {
// Don't add the entry if the port is -1 (invalid). The kubernetes plugin uses port -1 when a service/endpoint
// does not have any declared ports.
if serv.Port == -1 {
continue
}
w1 := 100.0 / float64(w[serv.Priority])
if serv.Weight == 0 {
w1 *= 100
@@ -180,6 +185,10 @@ func SRV(b ServiceBackend, zone string, state request.Request, opt Options) (rec
w1 *= float64(serv.Weight)
}
weight := uint16(math.Floor(w1))
// weight should be at least 1
if weight == 0 {
weight = 1
}
what, ip := serv.HostType()
@@ -192,15 +201,15 @@ func SRV(b ServiceBackend, zone string, state request.Request, opt Options) (rec
break
}
lookup[srv.Target] = true
lookup[srv.Target] = struct{}{}
if !dns.IsSubDomain(zone, srv.Target) {
m1, e1 := b.Lookup(state, srv.Target, dns.TypeA)
m1, e1 := b.Lookup(ctx, state, srv.Target, dns.TypeA)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = b.Lookup(state, srv.Target, dns.TypeAAAA)
m1, e1 = b.Lookup(ctx, state, srv.Target, dns.TypeAAAA)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
@@ -214,7 +223,7 @@ func SRV(b ServiceBackend, zone string, state request.Request, opt Options) (rec
// Internal name, we should have some info on them, either v4 or v6
// Clients expect a complete answer, because we are a recursor in their view.
state1 := state.NewWithQuestion(srv.Target, dns.TypeA)
addr, e1 := A(b, zone, state1, nil, opt)
addr, e1 := A(ctx, b, zone, state1, nil, opt)
if e1 == nil {
extra = append(extra, addr...)
}
@@ -238,14 +247,14 @@ func SRV(b ServiceBackend, zone string, state request.Request, opt Options) (rec
}
// MX returns MX records from the Backend. If the Target is not a name but an IP address, a name is created on the fly.
func MX(b ServiceBackend, zone string, state request.Request, opt Options) (records, extra []dns.RR, err error) {
services, err := b.Services(state, false, opt)
func MX(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) (records, extra []dns.RR, err error) {
services, err := b.Services(ctx, state, false, opt)
if err != nil {
return nil, nil, err
}
dup := make(map[item]bool)
lookup := make(map[string]bool)
dup := make(map[item]struct{})
lookup := make(map[string]struct{})
for _, serv := range services {
if !serv.Mail {
continue
@@ -259,15 +268,15 @@ func MX(b ServiceBackend, zone string, state request.Request, opt Options) (reco
break
}
lookup[mx.Mx] = true
lookup[mx.Mx] = struct{}{}
if !dns.IsSubDomain(zone, mx.Mx) {
m1, e1 := b.Lookup(state, mx.Mx, dns.TypeA)
m1, e1 := b.Lookup(ctx, state, mx.Mx, dns.TypeA)
if e1 == nil {
extra = append(extra, m1.Answer...)
}
m1, e1 = b.Lookup(state, mx.Mx, dns.TypeAAAA)
m1, e1 = b.Lookup(ctx, state, mx.Mx, dns.TypeAAAA)
if e1 == nil {
// If we have seen CNAME's we *assume* that they are already added.
for _, a := range m1.Answer {
@@ -280,7 +289,7 @@ func MX(b ServiceBackend, zone string, state request.Request, opt Options) (reco
}
// Internal name
state1 := state.NewWithQuestion(mx.Mx, dns.TypeA)
addr, e1 := A(b, zone, state1, nil, opt)
addr, e1 := A(ctx, b, zone, state1, nil, opt)
if e1 == nil {
extra = append(extra, addr...)
}
@@ -304,8 +313,8 @@ func MX(b ServiceBackend, zone string, state request.Request, opt Options) (reco
}
// CNAME returns CNAME records from the backend or an error.
func CNAME(b ServiceBackend, zone string, state request.Request, opt Options) (records []dns.RR, err error) {
services, err := b.Services(state, true, opt)
func CNAME(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) (records []dns.RR, err error) {
services, err := b.Services(ctx, state, true, opt)
if err != nil {
return nil, err
}
@@ -320,34 +329,85 @@ func CNAME(b ServiceBackend, zone string, state request.Request, opt Options) (r
}
// TXT returns TXT records from Backend or an error.
func TXT(b ServiceBackend, zone string, state request.Request, opt Options) (records []dns.RR, err error) {
services, err := b.Services(state, false, opt)
func TXT(ctx context.Context, b ServiceBackend, zone string, state request.Request, previousRecords []dns.RR, opt Options) (records []dns.RR, err error) {
services, err := b.Services(ctx, state, true, opt)
if err != nil {
return nil, err
}
dup := make(map[string]struct{})
for _, serv := range services {
if serv.Text == "" {
what, _ := serv.HostType()
switch what {
case dns.TypeCNAME:
if Name(state.Name()).Matches(dns.Fqdn(serv.Host)) {
// x CNAME x is a direct loop, don't add those
continue
}
newRecord := serv.NewCNAME(state.QName(), serv.Host)
if len(previousRecords) > 7 {
// don't add it, and just continue
continue
}
if dnsutil.DuplicateCNAME(newRecord, previousRecords) {
continue
}
if dns.IsSubDomain(zone, dns.Fqdn(serv.Host)) {
state1 := state.NewWithQuestion(serv.Host, state.QType())
state1.Zone = zone
nextRecords, err := TXT(ctx, b, zone, state1, append(previousRecords, newRecord), opt)
if err == nil {
// Not only have we found something we should add the CNAME and the IP addresses.
if len(nextRecords) > 0 {
records = append(records, newRecord)
records = append(records, nextRecords...)
}
}
continue
}
// This means we can not complete the CNAME, try to look else where.
target := newRecord.Target
// Lookup
m1, e1 := b.Lookup(ctx, state, target, state.QType())
if e1 != nil {
continue
}
// Len(m1.Answer) > 0 here is well?
records = append(records, newRecord)
records = append(records, m1.Answer...)
continue
case dns.TypeTXT:
if _, ok := dup[serv.Host]; !ok {
dup[serv.Host] = struct{}{}
return append(records, serv.NewTXT(state.QName())), nil
}
}
records = append(records, serv.NewTXT(state.QName()))
}
return records, nil
}
// PTR returns the PTR records from the backend, only services that have a domain name as host are included.
func PTR(b ServiceBackend, zone string, state request.Request, opt Options) (records []dns.RR, err error) {
services, err := b.Reverse(state, true, opt)
func PTR(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) (records []dns.RR, err error) {
services, err := b.Reverse(ctx, state, true, opt)
if err != nil {
return nil, err
}
dup := make(map[string]bool)
dup := make(map[string]struct{})
for _, serv := range services {
if ip := net.ParseIP(serv.Host); ip == nil {
if _, ok := dup[serv.Host]; !ok {
dup[serv.Host] = true
dup[serv.Host] = struct{}{}
records = append(records, serv.NewPTR(state.QName(), serv.Host))
}
}
@@ -356,20 +416,22 @@ func PTR(b ServiceBackend, zone string, state request.Request, opt Options) (rec
}
// NS returns NS records from the backend
func NS(b ServiceBackend, zone string, state request.Request, opt Options) (records, extra []dns.RR, err error) {
func NS(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) (records, extra []dns.RR, err error) {
// NS record for this zone live in a special place, ns.dns.<zone>. Fake our lookup.
// only a tad bit fishy...
old := state.QName()
state.Clear()
state.Req.Question[0].Name = "ns.dns." + zone
services, err := b.Services(state, false, opt)
services, err := b.Services(ctx, state, false, opt)
if err != nil {
return nil, nil, err
}
// ... and reset
state.Req.Question[0].Name = old
seen := map[string]bool{}
for _, serv := range services {
what, ip := serv.HostType()
switch what {
@@ -378,16 +440,27 @@ func NS(b ServiceBackend, zone string, state request.Request, opt Options) (reco
case dns.TypeA, dns.TypeAAAA:
serv.Host = msg.Domain(serv.Key)
records = append(records, serv.NewNS(state.QName()))
extra = append(extra, newAddress(serv, serv.Host, ip, what))
ns := serv.NewNS(state.QName())
if _, ok := seen[ns.Ns]; ok {
continue
}
seen[ns.Ns] = true
records = append(records, ns)
}
}
return records, extra, nil
}
// SOA returns a SOA record from the backend.
func SOA(b ServiceBackend, zone string, state request.Request, opt Options) ([]dns.RR, error) {
header := dns.RR_Header{Name: zone, Rrtype: dns.TypeSOA, Ttl: 300, Class: dns.ClassINET}
func SOA(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) ([]dns.RR, error) {
minTTL := b.MinTTL(state)
ttl := uint32(300)
if minTTL < ttl {
ttl = minTTL
}
header := dns.RR_Header{Name: zone, Rrtype: dns.TypeSOA, Ttl: ttl, Class: dns.ClassINET}
Mbox := hostmaster + "."
Ns := "ns.dns."
@@ -403,19 +476,18 @@ func SOA(b ServiceBackend, zone string, state request.Request, opt Options) ([]d
Refresh: 7200,
Retry: 1800,
Expire: 86400,
Minttl: b.MinTTL(state),
Minttl: minTTL,
}
return []dns.RR{soa}, nil
}
// BackendError writes an error response to the client.
func BackendError(b ServiceBackend, zone string, rcode int, state request.Request, err error, opt Options) (int, error) {
func BackendError(ctx context.Context, b ServiceBackend, zone string, rcode int, state request.Request, err error, opt Options) (int, error) {
m := new(dns.Msg)
m.SetRcode(state.Req, rcode)
m.Authoritative, m.RecursionAvailable = true, true
m.Ns, _ = SOA(b, zone, state, opt)
m.Authoritative = true
m.Ns, _ = SOA(ctx, b, zone, state, opt)
state.SizeAndDo(m)
state.W.WriteMsg(m)
// Return success as the rcode to signal we have written to the client.
return dns.RcodeSuccess, err
@@ -432,26 +504,26 @@ func newAddress(s msg.Service, name string, ip net.IP, what uint16) dns.RR {
return &dns.AAAA{Hdr: hdr, AAAA: ip}
}
// checkForApex checks the spcecial apex.dns directory for records that will be returned as A or AAAA.
func checkForApex(b ServiceBackend, zone string, state request.Request, opt Options) ([]msg.Service, error) {
// checkForApex checks the special apex.dns directory for records that will be returned as A or AAAA.
func checkForApex(ctx context.Context, b ServiceBackend, zone string, state request.Request, opt Options) ([]msg.Service, error) {
if state.Name() != zone {
return b.Services(state, false, opt)
return b.Services(ctx, state, false, opt)
}
// If the zone name itself is queried we fake the query to search for a special entry
// this is equivalent to the NS search code.
old := state.QName()
state.Clear()
state.Req.Question[0].Name = dnsutil.Join([]string{"apex.dns", zone})
state.Req.Question[0].Name = dnsutil.Join("apex.dns", zone)
services, err := b.Services(state, false, opt)
services, err := b.Services(ctx, state, false, opt)
if err == nil {
state.Req.Question[0].Name = old
return services, err
}
state.Req.Question[0].Name = old
return b.Services(state, false, opt)
return b.Services(ctx, state, false, opt)
}
// item holds records.
@@ -463,17 +535,17 @@ type item struct {
// isDuplicate uses m to see if the combo (name, addr, port) already exists. If it does
// not exist already IsDuplicate will also add the record to the map.
func isDuplicate(m map[item]bool, name, addr string, port uint16) bool {
func isDuplicate(m map[item]struct{}, name, addr string, port uint16) bool {
if addr != "" {
_, ok := m[item{name, 0, addr}]
if !ok {
m[item{name, 0, addr}] = true
m[item{name, 0, addr}] = struct{}{}
}
return ok
}
_, ok := m[item{name, port, ""}]
if !ok {
m[item{name, port, ""}] = true
m[item{name, port, ""}] = struct{}{}
}
return ok
}

View File

@@ -1,6 +0,0 @@
reviewers:
- grobie
- miekg
approvers:
- grobie
- miekg

View File

@@ -19,56 +19,65 @@ cache [TTL] [ZONES...]
~~~
* **TTL** max TTL in seconds. If not specified, the maximum TTL will be used, which is 3600 for
noerror responses and 1800 for denial of existence ones.
NOERROR responses and 1800 for denial of existence ones.
Setting a TTL of 300: `cache 300` would cache records up to 300 seconds.
* **ZONES** zones it should cache for. If empty, the zones from the configuration block are used.
Each element in the cache is cached according to its TTL (with **TTL** as the max).
For the negative cache, the SOA's MinTTL value is used. A TTL of zero is not allowed.
A cache is divided into 256 shards, each holding up to 512 items by default - for a total size
of 256 * 512 = 131,072 items.
A cache is divided into 256 shards, each holding up to 39 items by default - for a total size
of 256 * 39 = 9984 items.
If you want more control:
~~~ txt
cache [TTL] [ZONES...] {
success CAPACITY [TTL]
denial CAPACITY [TTL]
success CAPACITY [TTL] [MINTTL]
denial CAPACITY [TTL] [MINTTL]
prefetch AMOUNT [[DURATION] [PERCENTAGE%]]
serve_stale [DURATION]
}
~~~
* **TTL** and **ZONES** as above.
* `success`, override the settings for caching successful responses. **CAPACITY** indicates the maximum
number of packets we cache before we start evicting (*randomly*). **TTL** overrides the cache maximum TTL.
**MINTTL** overrides the cache minimum TTL (default 5), which can be useful to limit queries to the backend.
* `denial`, override the settings for caching denial of existence responses. **CAPACITY** indicates the maximum
number of packets we cache before we start evicting (LRU). **TTL** overrides the cache maximum TTL.
**MINTTL** overrides the cache minimum TTL (default 5), which can be useful to limit queries to the backend.
There is a third category (`error`) but those responses are never cached.
* `prefetch` will prefetch popular items when they are about to be expunged from the cache.
Popular means **AMOUNT** queries have been seen with no gaps of **DURATION** or more between them.
**DURATION** defaults to 1m. Prefetching will happen when the TTL drops below **PERCENTAGE**,
which defaults to `10%`, or latest 1 second before TTL expiration. Values should be in the range `[10%, 90%]`.
Note the percent sign is mandatory. **PERCENTAGE** is treated as an `int`.
* `serve_stale`, when serve\_stale is set, cache always will serve an expired entry to a client if there is one
available. When this happens, cache will attempt to refresh the cache entry after sending the expired cache
entry to the client. The responses have a TTL of 0. **DURATION** is how far back to consider
stale responses as fresh. The default duration is 1h.
## Capacity and Eviction
When specifying **CAPACITY**, the minimum cache capacity is 131,072. Specifying a lower value will be
ignored. Specifying a **CAPACITY** of zero does not disable the cache.
Eviction is done per shard - i.e. when a shard reaches capacity, items are evicted from that shard. Since shards don't fill up perfectly evenly, evictions will occur before the entire cache reaches full capacity. Each shard capacity is equal to the total cache size / number of shards (256).
If **CAPACITY** _is not_ specified, the default cache size is 9984 per cache. The minimum allowed cache size is 1024.
If **CAPACITY** _is_ specified, the actual cache size used will be rounded down to the nearest number divisible by 256 (so all shards are equal in size).
Eviction is done per shard. In effect, when a shard reaches capacity, items are evicted from that shard.
Since shards don't fill up perfectly evenly, evictions will occur before the entire cache reaches full capacity.
Each shard capacity is equal to the total cache size / number of shards (256). Eviction is random, not TTL based.
Entries with 0 TTL will remain in the cache until randomly evicted when the shard reaches capacity.
## Metrics
If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported:
If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported:
* `coredns_cache_size{server, type}` - Total elements in the cache by cache type.
* `coredns_cache_entries{server, type}` - Total elements in the cache by cache type.
* `coredns_cache_hits_total{server, type}` - Counter of cache hits by cache type.
* `coredns_cache_misses_total{server}` - Counter of cache misses.
* `coredns_cache_drops_total{server}` - Counter of dropped messages.
* `coredns_cache_drops_total{server}` - Counter of responses excluded from the cache due to request/response question name mismatch.
* `coredns_cache_served_stale_total{server}` - Counter of requests served from stale cache entries.
Cache types are either "denial" or "success". `Server` is the server handling the request, see the
metrics plugin for documentation.
prometheus plugin for documentation.
## Examples
@@ -85,7 +94,18 @@ Proxy to Google Public DNS and only cache responses for example.org (or below).
~~~ corefile
. {
proxy . 8.8.8.8:53
forward . 8.8.8.8:53
cache example.org
}
~~~
Enable caching for `example.org`, keep a positive cache size of 5000 and a negative cache size of 2500:
~~~ corefile
example.org {
cache {
success 5000
denial 2500
}
}
~~~

View File

@@ -2,7 +2,6 @@
package cache
import (
"encoding/binary"
"hash/fnv"
"net"
"time"
@@ -16,25 +15,29 @@ import (
"github.com/miekg/dns"
)
// Cache is plugin that looks up responses in a cache and caches replies.
// Cache is a plugin that looks up responses in a cache and caches replies.
// It has a success and a denial of existence cache.
type Cache struct {
Next plugin.Handler
Zones []string
ncache *cache.Cache
ncap int
nttl time.Duration
ncache *cache.Cache
ncap int
nttl time.Duration
minnttl time.Duration
pcache *cache.Cache
pcap int
pttl time.Duration
pcache *cache.Cache
pcap int
pttl time.Duration
minpttl time.Duration
// Prefetch.
prefetch int
duration time.Duration
percentage int
staleUpTo time.Duration
// Testing.
now func() time.Time
}
@@ -47,9 +50,11 @@ func New() *Cache {
pcap: defaultCap,
pcache: cache.New(defaultCap),
pttl: maxTTL,
minpttl: minTTL,
ncap: defaultCap,
ncache: cache.New(defaultCap),
nttl: maxNTTL,
minnttl: minNTTL,
prefetch: 0,
duration: 1 * time.Minute,
percentage: 10,
@@ -57,27 +62,27 @@ func New() *Cache {
}
}
// Return key under which we store the item, -1 will be returned if we don't store the
// message.
// key returns key under which we store the item, -1 will be returned if we don't store the message.
// Currently we do not cache Truncated, errors zone transfers or dynamic update messages.
func key(m *dns.Msg, t response.Type, do bool) int {
// qname holds the already lowercased qname.
func key(qname string, m *dns.Msg, t response.Type, do bool) (bool, uint64) {
// We don't store truncated responses.
if m.Truncated {
return -1
return false, 0
}
// Nor errors or Meta or Update
if t == response.OtherError || t == response.Meta || t == response.Update {
return -1
return false, 0
}
return int(hash(m.Question[0].Name, m.Question[0].Qtype, do))
return true, hash(qname, m.Question[0].Qtype, do)
}
var one = []byte("1")
var zero = []byte("0")
func hash(qname string, qtype uint16, do bool) uint32 {
h := fnv.New32()
func hash(qname string, qtype uint16, do bool) uint64 {
h := fnv.New64()
if do {
h.Write(one)
@@ -85,19 +90,21 @@ func hash(qname string, qtype uint16, do bool) uint32 {
h.Write(zero)
}
b := make([]byte, 2)
binary.BigEndian.PutUint16(b, qtype)
h.Write(b)
h.Write([]byte{byte(qtype >> 8)})
h.Write([]byte{byte(qtype)})
h.Write([]byte(qname))
return h.Sum64()
}
for i := range qname {
c := qname[i]
if c >= 'A' && c <= 'Z' {
c += 'a' - 'A'
}
h.Write([]byte{c})
func computeTTL(msgTTL, minTTL, maxTTL time.Duration) time.Duration {
ttl := msgTTL
if ttl < minTTL {
ttl = minTTL
}
return h.Sum32()
if ttl > maxTTL {
ttl = maxTTL
}
return ttl
}
// ResponseWriter is a response writer that caches the reply message.
@@ -113,7 +120,7 @@ type ResponseWriter struct {
// newPrefetchResponseWriter returns a Cache ResponseWriter to be used in
// prefetch requests. It ensures RemoteAddr() can be called even after the
// original connetion has already been closed.
// original connection has already been closed.
func newPrefetchResponseWriter(server string, state request.Request, c *Cache) *ResponseWriter {
// Resolve the address now, the connection might be already closed when the
// actual prefetch request is made.
@@ -152,19 +159,20 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
}
// key returns empty string for anything we don't want to cache.
key := key(res, mt, do)
duration := w.pttl
if mt == response.NameError || mt == response.NoData {
duration = w.nttl
}
hasKey, key := key(w.state.Name(), res, mt, do)
msgTTL := dnsutil.MinimalTTL(res, mt)
if msgTTL < duration {
duration = msgTTL
var duration time.Duration
if mt == response.NameError || mt == response.NoData {
duration = computeTTL(msgTTL, w.minnttl, w.nttl)
} else if mt == response.ServerError {
// use default ttl which is 5s
duration = minTTL
} else {
duration = computeTTL(msgTTL, w.minpttl, w.pttl)
}
if key != -1 && duration > 0 {
if hasKey && duration > 0 {
if w.state.Match(res) {
w.set(res, key, mt, duration)
cacheSize.WithLabelValues(w.server, Success).Set(float64(w.pcache.Len()))
@@ -195,19 +203,21 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
return w.ResponseWriter.WriteMsg(res)
}
func (w *ResponseWriter) set(m *dns.Msg, key int, mt response.Type, duration time.Duration) {
if key == -1 || duration == 0 {
return
}
func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration time.Duration) {
// duration is expected > 0
// and key is valid
switch mt {
case response.NoError, response.Delegation:
i := newItem(m, w.now(), duration)
w.pcache.Add(uint32(key), i)
w.pcache.Add(key, i)
// when pre-fetching, remove the negative cache entry if it exists
if w.prefetch {
w.ncache.Remove(key)
}
case response.NameError, response.NoData:
case response.NameError, response.NoData, response.ServerError:
i := newItem(m, w.now(), duration)
w.ncache.Add(uint32(key), i)
w.ncache.Add(key, i)
case response.OtherError:
// don't cache these
@@ -228,7 +238,9 @@ func (w *ResponseWriter) Write(buf []byte) (int, error) {
const (
maxTTL = dnsutil.MaximumDefaulTTL
minTTL = dnsutil.MinimalDefaultTTL
maxNTTL = dnsutil.MaximumDefaulTTL / 2
minNTTL = dnsutil.MinimalDefaultTTL
defaultCap = 10000 // default capacity of the cache.

View File

@@ -1,4 +1,4 @@
// +build fuzz
// +build gofuzz
package cache

View File

@@ -3,7 +3,6 @@ package cache
import (
"context"
"math"
"sync"
"time"
"github.com/coredns/coredns/plugin"
@@ -11,7 +10,6 @@ import (
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
"github.com/prometheus/client_golang/prometheus"
)
// ServeDNS implements the plugin.Handler interface.
@@ -27,39 +25,55 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
server := metrics.WithServer(ctx)
i, found := c.get(now, state, server)
if i != nil && found {
resp := i.toMsg(r, now)
state.SizeAndDo(resp)
resp, _ = state.Scrub(resp)
w.WriteMsg(resp)
if c.prefetch > 0 {
ttl := i.ttl(now)
i.Freq.Update(c.duration, now)
threshold := int(math.Ceil(float64(c.percentage) / 100 * float64(i.origTTL)))
if i.Freq.Hits() >= c.prefetch && ttl <= threshold {
cw := newPrefetchResponseWriter(server, state, c)
go func(w dns.ResponseWriter) {
cachePrefetches.WithLabelValues(server).Inc()
plugin.NextOrFailure(c.Name(), c.Next, ctx, w, r)
// When prefetching we loose the item i, and with it the frequency
// that we've gathered sofar. See we copy the frequencies info back
// into the new item that was stored in the cache.
if i1 := c.exists(state); i1 != nil {
i1.Freq.Reset(now, i.Freq.Hits())
}
}(cw)
}
}
return dns.RcodeSuccess, nil
ttl := 0
i := c.getIgnoreTTL(now, state, server)
if i != nil {
ttl = i.ttl(now)
}
if i == nil {
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server}
return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r)
}
if ttl < 0 {
servedStale.WithLabelValues(server).Inc()
// Adjust the time to get a 0 TTL in the reply built from a stale item.
now = now.Add(time.Duration(ttl) * time.Second)
go func() {
r := r.Copy()
crr := &ResponseWriter{Cache: c, state: state, server: server, prefetch: true, remoteAddr: w.LocalAddr()}
plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r)
}()
}
resp := i.toMsg(r, now)
w.WriteMsg(resp)
crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server}
return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r)
if c.shouldPrefetch(i, now) {
go c.doPrefetch(ctx, state, server, i, now)
}
return dns.RcodeSuccess, nil
}
func (c *Cache) doPrefetch(ctx context.Context, state request.Request, server string, i *item, now time.Time) {
cw := newPrefetchResponseWriter(server, state, c)
cachePrefetches.WithLabelValues(server).Inc()
plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req)
// When prefetching we loose the item i, and with it the frequency
// that we've gathered sofar. See we copy the frequencies info back
// into the new item that was stored in the cache.
if i1 := c.exists(state); i1 != nil {
i1.Freq.Reset(now, i.Freq.Hits())
}
}
func (c *Cache) shouldPrefetch(i *item, now time.Time) bool {
if c.prefetch <= 0 {
return false
}
i.Freq.Update(c.duration, now)
threshold := int(math.Ceil(float64(c.percentage) / 100 * float64(i.origTTL)))
return i.Freq.Hits() >= c.prefetch && i.ttl(now) <= threshold
}
// Name implements the Handler interface.
@@ -81,6 +95,28 @@ func (c *Cache) get(now time.Time, state request.Request, server string) (*item,
return nil, false
}
// getIgnoreTTL unconditionally returns an item if it exists in the cache.
func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item {
k := hash(state.Name(), state.QType(), state.Do())
if i, ok := c.ncache.Get(k); ok {
ttl := i.(*item).ttl(now)
if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) {
cacheHits.WithLabelValues(server, Denial).Inc()
return i.(*item)
}
}
if i, ok := c.pcache.Get(k); ok {
ttl := i.(*item).ttl(now)
if ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds())) {
cacheHits.WithLabelValues(server, Success).Inc()
return i.(*item)
}
}
cacheMisses.WithLabelValues(server).Inc()
return nil
}
func (c *Cache) exists(state request.Request) *item {
k := hash(state.Name(), state.QType(), state.Do())
if i, ok := c.ncache.Get(k); ok {
@@ -91,42 +127,3 @@ func (c *Cache) exists(state request.Request) *item {
}
return nil
}
var (
cacheSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "size",
Help: "The number of elements in the cache.",
}, []string{"server", "type"})
cacheHits = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "hits_total",
Help: "The count of cache hits.",
}, []string{"server", "type"})
cacheMisses = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "misses_total",
Help: "The count of cache misses.",
}, []string{"server"})
cachePrefetches = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "prefetch_total",
Help: "The number of time the cache has prefetched a cached item.",
}, []string{"server"})
cacheDrops = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "drops_total",
Help: "The number responses that are not cached, because the reply is malformed.",
}, []string{"server"})
)
var once sync.Once

View File

@@ -9,7 +9,6 @@ import (
type item struct {
Rcode int
Authoritative bool
AuthenticatedData bool
RecursionAvailable bool
Answer []dns.RR
@@ -25,7 +24,6 @@ type item struct {
func newItem(m *dns.Msg, now time.Time, d time.Duration) *item {
i := new(item)
i.Rcode = m.Rcode
i.Authoritative = m.Authoritative
i.AuthenticatedData = m.AuthenticatedData
i.RecursionAvailable = m.RecursionAvailable
i.Answer = m.Answer
@@ -51,12 +49,20 @@ func newItem(m *dns.Msg, now time.Time, d time.Duration) *item {
}
// toMsg turns i into a message, it tailors the reply to m.
// The Authoritative bit is always set to 0, because the answer is from the cache.
// The Authoritative bit should be set to 0, but some client stub resolver implementations, most notably,
// on some legacy systems(e.g. ubuntu 14.04 with glib version 2.20), low-level glibc function `getaddrinfo`
// useb by Python/Ruby/etc.. will discard answers that do not have this bit set.
// So we're forced to always set this to 1; regardless if the answer came from the cache or not.
// On newer systems(e.g. ubuntu 16.04 with glib version 2.23), this issue is resolved.
// So we may set this bit back to 0 in the future ?
func (i *item) toMsg(m *dns.Msg, now time.Time) *dns.Msg {
m1 := new(dns.Msg)
m1.SetReply(m)
m1.Authoritative = false
// Set this to true as some DNS clients discard the *entire* packet when it's non-authoritative.
// This is probably not according to spec, but the bit itself is not super useful as this point, so
// just set it to true.
m1.Authoritative = true
m1.AuthenticatedData = i.AuthenticatedData
m1.RecursionAvailable = i.RecursionAvailable
m1.Rcode = i.Rcode

View File

@@ -0,0 +1,52 @@
package cache
import (
"github.com/coredns/coredns/plugin"
"github.com/prometheus/client_golang/prometheus"
)
var (
// cacheSize is total elements in the cache by cache type.
cacheSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "entries",
Help: "The number of elements in the cache.",
}, []string{"server", "type"})
// cacheHits is counter of cache hits by cache type.
cacheHits = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "hits_total",
Help: "The count of cache hits.",
}, []string{"server", "type"})
// cacheMisses is the counter of cache misses.
cacheMisses = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "misses_total",
Help: "The count of cache misses.",
}, []string{"server"})
// cachePrefetches is the number of time the cache has prefetched a cached item.
cachePrefetches = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "prefetch_total",
Help: "The number of time the cache has prefetched a cached item.",
}, []string{"server"})
// cacheDrops is the number responses that are not cached, because the reply is malformed.
cacheDrops = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "drops_total",
Help: "The number responses that are not cached, because the reply is malformed.",
}, []string{"server"})
// servedStale is the number of requests served from stale cache entries.
servedStale = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "served_stale_total",
Help: "The number of requests served from stale cache entries.",
}, []string{"server"})
)

View File

@@ -1,6 +1,7 @@
package cache
import (
"errors"
"fmt"
"strconv"
"time"
@@ -11,17 +12,12 @@ import (
"github.com/coredns/coredns/plugin/pkg/cache"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/mholt/caddy"
"github.com/caddyserver/caddy"
)
var log = clog.NewWithPlugin("cache")
func init() {
caddy.RegisterPlugin("cache", caddy.Plugin{
ServerType: "dns",
Action: setup,
})
}
func init() { plugin.Register("cache", setup) }
func setup(c *caddy.Controller) error {
ca, err := cacheParse(c)
@@ -34,11 +30,9 @@ func setup(c *caddy.Controller) error {
})
c.OnStartup(func() error {
once.Do(func() {
metrics.MustRegister(c,
cacheSize, cacheHits, cacheMisses,
cachePrefetches, cacheDrops)
})
metrics.MustRegister(c,
cacheSize, cacheHits, cacheMisses,
cachePrefetches, cacheDrops, servedStale)
return nil
})
@@ -101,6 +95,17 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
return nil, fmt.Errorf("cache TTL can not be zero or negative: %d", pttl)
}
ca.pttl = time.Duration(pttl) * time.Second
if len(args) > 2 {
minpttl, err := strconv.Atoi(args[2])
if err != nil {
return nil, err
}
// Reserve < 0
if minpttl < 0 {
return nil, fmt.Errorf("cache min TTL can not be negative: %d", minpttl)
}
ca.minpttl = time.Duration(minpttl) * time.Second
}
}
case Denial:
args := c.RemainingArgs()
@@ -122,6 +127,17 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
return nil, fmt.Errorf("cache TTL can not be zero or negative: %d", nttl)
}
ca.nttl = time.Duration(nttl) * time.Second
if len(args) > 2 {
minnttl, err := strconv.Atoi(args[2])
if err != nil {
return nil, err
}
// Reserve < 0
if minnttl < 0 {
return nil, fmt.Errorf("cache min TTL can not be negative: %d", minnttl)
}
ca.minnttl = time.Duration(minnttl) * time.Second
}
}
case "prefetch":
args := c.RemainingArgs()
@@ -161,6 +177,22 @@ func cacheParse(c *caddy.Controller) (*Cache, error) {
ca.percentage = num
}
case "serve_stale":
args := c.RemainingArgs()
if len(args) > 1 {
return nil, c.ArgErr()
}
ca.staleUpTo = 1 * time.Hour
if len(args) == 1 {
d, err := time.ParseDuration(args[0])
if err != nil {
return nil, err
}
if d < 0 {
return nil, errors.New("invalid negative duration for serve_stale")
}
ca.staleUpTo = d
}
default:
return nil, c.ArgErr()
}

13
vendor/github.com/coredns/coredns/plugin/done.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
package plugin
import "context"
// Done is a non-blocking function that returns true if the context has been canceled.
func Done(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}

View File

@@ -26,11 +26,11 @@ func Domain(s string) string {
for i, j := 1, len(l)-1; i < j; i, j = i+1, j-1 {
l[i], l[j] = l[j], l[i]
}
return dnsutil.Join(l[1 : len(l)-1])
return dnsutil.Join(l[1 : len(l)-1]...)
}
// PathWithWildcard ascts as Path, but if a name contains wildcards (* or any), the name will be
// chopped of before the (first) wildcard, and we do a highler evel search and
// PathWithWildcard acts as Path, but if a name contains wildcards (* or any), the name will be
// chopped of before the (first) wildcard, and we do a higher level search and
// later find the matching names. So service.*.skydns.local, will look for all
// services under skydns.local and will later check for names that match
// service.*.skydns.local. If a wildcard is found the returned bool is true.

View File

@@ -38,15 +38,21 @@ type Service struct {
// NewSRV returns a new SRV record based on the Service.
func (s *Service) NewSRV(name string, weight uint16) *dns.SRV {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
host := dns.Fqdn(s.Host)
if s.TargetStrip > 0 {
host = targetStrip(host, s.TargetStrip)
}
return &dns.SRV{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeSRV, Class: dns.ClassINET, Ttl: s.TTL},
Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: dns.Fqdn(host)}
Priority: uint16(s.Priority), Weight: weight, Port: uint16(s.Port), Target: host}
}
// NewMX returns a new MX record based on the Service.
func (s *Service) NewMX(name string) *dns.MX {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
host := dns.Fqdn(s.Host)
if s.TargetStrip > 0 {
host = targetStrip(host, s.TargetStrip)
}
return &dns.MX{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: s.TTL},
Preference: uint16(s.Priority), Mx: host}
@@ -79,7 +85,10 @@ func (s *Service) NewPTR(name string, target string) *dns.PTR {
// NewNS returns a new NS record based on the Service.
func (s *Service) NewNS(name string) *dns.NS {
host := targetStrip(dns.Fqdn(s.Host), s.TargetStrip)
host := dns.Fqdn(s.Host)
if s.TargetStrip > 0 {
host = targetStrip(host, s.TargetStrip)
}
return &dns.NS{Hdr: dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: s.TTL}, Ns: host}
}
@@ -155,16 +164,12 @@ func split255(s string) []string {
// targetStrip strips "targetstrip" labels from the left side of the fully qualified name.
func targetStrip(name string, targetStrip int) string {
if targetStrip == 0 {
return name
}
offset, end := 0, false
for i := 0; i < targetStrip; i++ {
offset, end = dns.NextLabel(name, offset)
}
if end {
// We overshot the name, use the orignal one.
// We overshot the name, use the original one.
offset = 0
}
name = name[offset:]

View File

@@ -19,8 +19,12 @@ func (s *Service) HostType() (what uint16, normalized net.IP) {
ip := net.ParseIP(s.Host)
switch {
case ip == nil:
return dns.TypeCNAME, nil
if len(s.Text) == 0 {
return dns.TypeCNAME, nil
}
return dns.TypeTXT, nil
case ip.To4() != nil:
return dns.TypeA, ip.To4()

View File

@@ -1,9 +0,0 @@
reviewers:
- fastest963
- miekg
- superq
- greenpau
approvers:
- fastest963
- miekg
- superq

View File

@@ -11,14 +11,14 @@ The default location for the metrics is `localhost:9153`. The metrics path is fi
The following metrics are exported:
* `coredns_build_info{version, revision, goversion}` - info about CoreDNS itself.
* `coredns_panic_count_total{}` - total number of panics.
* `coredns_dns_request_count_total{server, zone, proto, family}` - total query count.
* `coredns_dns_request_duration_seconds{server, zone}` - duration to process each query.
* `coredns_panics_total{}` - total number of panics.
* `coredns_dns_requests_total{server, zone, proto, family, type}` - total query count.
* `coredns_dns_request_duration_seconds{server, zone, type}` - duration to process each query.
* `coredns_dns_request_size_bytes{server, zone, proto}` - size of the request in bytes.
* `coredns_dns_request_do_count_total{server, zone}` - queries that have the DO bit set
* `coredns_dns_request_type_count_total{server, zone, type}` - counter of queries per zone and type.
* `coredns_dns_do_requests_total{server, zone}` - queries that have the DO bit set
* `coredns_dns_response_size_bytes{server, zone, proto}` - response size in bytes.
* `coredns_dns_response_rcode_count_total{server, zone, rcode}` - response per zone and rcode.
* `coredns_dns_responses_total{server, zone, rcode}` - response per zone and rcode.
* `coredns_plugin_enabled{server, zone, name}` - indicates whether a plugin is enabled on per server and zone basis.
Each counter has a label `zone` which is the zonename used for the request/response.
@@ -32,7 +32,6 @@ Extra labels used are:
* `type` which holds the query type. It holds most common types (A, AAAA, MX, SOA, CNAME, PTR, TXT,
NS, SRV, DS, DNSKEY, RRSIG, NSEC, NSEC3, IXFR, AXFR and ANY) and "other" which lumps together all
other types.
* The `response_rcode_count_total` has an extra label `rcode` which holds the rcode of the response.
If monitoring is enabled, queries that do not enter the plugin chain are exported under the fake
name "dropped" (without a closing dot - this is never a valid domain name).
@@ -47,12 +46,12 @@ prometheus [ADDRESS]
For each zone that you want to see metrics for.
It optionally takes an address to which the metrics are exported; the default
is `localhost:9153`. The metrics path is fixed to `/metrics`.
It optionally takes a bind address to which the metrics are exported; the default
listens on `localhost:9153`. The metrics path is fixed to `/metrics`.
## Examples
Use an alternative address:
Use an alternative listening address:
~~~ corefile
. {
@@ -60,7 +59,7 @@ Use an alternative address:
}
~~~
Or via an enviroment variable (this is supported throughout the Corefile): `export PORT=9253`, and
Or via an environment variable (this is supported throughout the Corefile): `export PORT=9253`, and
then:
~~~ corefile
@@ -75,3 +74,4 @@ When reloading, the Prometheus handler is stopped before the new server instance
If that new server fails to start, then the initial server instance is still available and DNS queries still served,
but Prometheus handler stays down.
Prometheus will not reply HTTP request until a successful reload or a complete restart of CoreDNS.
Only the plugins that register as Handler are visible in `coredns_plugin_enabled{server, zone, name}`. As of today the plugins reload and bind will not be reported.

View File

@@ -3,7 +3,7 @@ package metrics
import (
"context"
"github.com/coredns/coredns/plugin/metrics/vars"
"github.com/coredns/coredns/core/dnsserver"
)
// WithServer returns the current server handling the request. It returns the
@@ -15,4 +15,10 @@ import (
// Basic usage with a metric:
//
// <metric>.WithLabelValues(metrics.WithServer(ctx), labels..).Add(1)
func WithServer(ctx context.Context) string { return vars.WithServer(ctx) }
func WithServer(ctx context.Context) string {
srv := ctx.Value(dnsserver.Key{})
if srv == nil {
return ""
}
return srv.(*dnsserver.Server).Addr
}

View File

@@ -26,7 +26,7 @@ func (m *Metrics) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg
rw := dnstest.NewRecorder(w)
status, err := plugin.NextOrFailure(m.Name(), m.Next, ctx, rw, r)
vars.Report(ctx, state, zone, rcode.ToString(rw.Rcode), rw.Len, rw.Start)
vars.Report(WithServer(ctx), state, zone, rcode.ToString(rw.Rcode), rw.Len, rw.Start)
return status, err
}

View File

@@ -2,42 +2,47 @@
package metrics
import (
"context"
"net"
"net/http"
"os"
"sync"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics/vars"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// Metrics holds the prometheus configuration. The metrics' path is fixed to be /metrics
// Metrics holds the prometheus configuration. The metrics' path is fixed to be /metrics .
type Metrics struct {
Next plugin.Handler
Addr string
Reg *prometheus.Registry
Next plugin.Handler
Addr string
Reg *prometheus.Registry
ln net.Listener
lnSetup bool
mux *http.ServeMux
mux *http.ServeMux
srv *http.Server
zoneNames []string
zoneMap map[string]bool
zoneMap map[string]struct{}
zoneMu sync.RWMutex
}
// New returns a new instance of Metrics with the given address
// New returns a new instance of Metrics with the given address.
func New(addr string) *Metrics {
met := &Metrics{
Addr: addr,
Reg: prometheus.NewRegistry(),
zoneMap: make(map[string]bool),
zoneMap: make(map[string]struct{}),
}
// Add the default collectors
met.MustRegister(prometheus.NewGoCollector())
met.MustRegister(prometheus.NewProcessCollector(os.Getpid(), ""))
met.MustRegister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
// Add all of our collectors
met.MustRegister(buildInfo)
@@ -46,20 +51,28 @@ func New(addr string) *Metrics {
met.MustRegister(vars.RequestDuration)
met.MustRegister(vars.RequestSize)
met.MustRegister(vars.RequestDo)
met.MustRegister(vars.RequestType)
met.MustRegister(vars.ResponseSize)
met.MustRegister(vars.ResponseRcode)
met.MustRegister(vars.PluginEnabled)
return met
}
// MustRegister wraps m.Reg.MustRegister.
func (m *Metrics) MustRegister(c prometheus.Collector) { m.Reg.MustRegister(c) }
func (m *Metrics) MustRegister(c prometheus.Collector) {
err := m.Reg.Register(c)
if err != nil {
// ignore any duplicate error, but fatal on any other kind of error
if _, ok := err.(prometheus.AlreadyRegisteredError); !ok {
log.Fatalf("Cannot register metrics collector: %s", err)
}
}
}
// AddZone adds zone z to m.
func (m *Metrics) AddZone(z string) {
m.zoneMu.Lock()
m.zoneMap[z] = true
m.zoneMap[z] = struct{}{}
m.zoneNames = keys(m.zoneMap)
m.zoneMu.Unlock()
}
@@ -82,7 +95,7 @@ func (m *Metrics) ZoneNames() []string {
// OnStartup sets up the metrics on startup.
func (m *Metrics) OnStartup() error {
ln, err := net.Listen("tcp", m.Addr)
ln, err := reuseport.Listen("tcp", m.Addr)
if err != nil {
log.Errorf("Failed to start metrics handler: %s", err)
return err
@@ -90,14 +103,19 @@ func (m *Metrics) OnStartup() error {
m.ln = ln
m.lnSetup = true
ListenAddr = m.ln.Addr().String() // For tests
m.mux = http.NewServeMux()
m.mux.Handle("/metrics", promhttp.HandlerFor(m.Reg, promhttp.HandlerOpts{}))
// creating some helper variables to avoid data races on m.srv and m.ln
server := &http.Server{Handler: m.mux}
m.srv = server
go func() {
http.Serve(m.ln, m.mux)
server.Serve(ln)
}()
ListenAddr = ln.Addr().String() // For tests.
return nil
}
@@ -106,27 +124,29 @@ func (m *Metrics) OnRestart() error {
if !m.lnSetup {
return nil
}
u.Unset(m.Addr)
return m.stopServer()
}
uniqAddr.SetTodo(m.Addr)
m.ln.Close()
func (m *Metrics) stopServer() error {
if !m.lnSetup {
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
defer cancel()
if err := m.srv.Shutdown(ctx); err != nil {
log.Infof("Failed to stop prometheus http server: %s", err)
return err
}
m.lnSetup = false
m.ln.Close()
return nil
}
// OnFinalShutdown tears down the metrics listener on shutdown and restart.
func (m *Metrics) OnFinalShutdown() error {
// We allow prometheus statements in multiple Server Blocks, but only the first
// will open the listener, for the rest they are all nil; guard against that.
if !m.lnSetup {
return nil
}
func (m *Metrics) OnFinalShutdown() error { return m.stopServer() }
m.lnSetup = false
return m.ln.Close()
}
func keys(m map[string]bool) []string {
func keys(m map[string]struct{}) []string {
sx := []string{}
for k := range m {
sx = append(sx, k)
@@ -138,6 +158,10 @@ func keys(m map[string]bool) []string {
// we listen on "localhost:0" and need to retrieve the actual address.
var ListenAddr string
// shutdownTimeout is the maximum amount of time the metrics plugin will wait
// before erroring when it tries to close the metrics server
const shutdownTimeout time.Duration = time.Second * 5
var buildInfo = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: plugin.Namespace,
Name: "build_info",

View File

@@ -3,11 +3,11 @@ package metrics
import (
"github.com/coredns/coredns/core/dnsserver"
"github.com/mholt/caddy"
"github.com/caddyserver/caddy"
"github.com/prometheus/client_golang/prometheus"
)
// MustRegister registers the prometheus Collectors when the metrics middleware is used.
// MustRegister registers the prometheus Collectors when the metrics plugin is used.
func MustRegister(c *caddy.Controller, cs ...prometheus.Collector) {
m := dnsserver.GetConfig(c).Handler("prometheus")
if m == nil {

View File

@@ -0,0 +1,28 @@
package metrics
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
)
type reg struct {
sync.RWMutex
r map[string]*prometheus.Registry
}
func newReg() *reg { return &reg{r: make(map[string]*prometheus.Registry)} }
// update sets the registry if not already there and returns the input. Or it returns
// a previous set value.
func (r *reg) getOrSet(addr string, pr *prometheus.Registry) *prometheus.Registry {
r.Lock()
defer r.Unlock()
if v, ok := r.r[addr]; ok {
return v
}
r.r[addr] = pr
return pr
}

View File

@@ -7,57 +7,72 @@ import (
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/coremain"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics/vars"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/coredns/coredns/plugin/pkg/uniq"
"github.com/mholt/caddy"
"github.com/caddyserver/caddy"
)
var (
log = clog.NewWithPlugin("prometheus")
uniqAddr = uniq.New()
u = uniq.New()
registry = newReg()
)
func init() {
caddy.RegisterPlugin("prometheus", caddy.Plugin{
ServerType: "dns",
Action: setup,
})
}
func init() { plugin.Register("prometheus", setup) }
func setup(c *caddy.Controller) error {
m, err := prometheusParse(c)
m, err := parse(c)
if err != nil {
return plugin.Error("prometheus", err)
}
m.Reg = registry.getOrSet(m.Addr, m.Reg)
c.OnStartup(func() error { m.Reg = registry.getOrSet(m.Addr, m.Reg); u.Set(m.Addr, m.OnStartup); return nil })
c.OnRestartFailed(func() error { m.Reg = registry.getOrSet(m.Addr, m.Reg); u.Set(m.Addr, m.OnStartup); return nil })
c.OnStartup(func() error { return u.ForEach() })
c.OnRestartFailed(func() error { return u.ForEach() })
c.OnStartup(func() error {
conf := dnsserver.GetConfig(c)
for _, h := range conf.ListenHosts {
addrstr := conf.Transport + "://" + net.JoinHostPort(h, conf.Port)
for _, p := range conf.Handlers() {
vars.PluginEnabled.WithLabelValues(addrstr, conf.Zone, p.Name()).Set(1)
}
}
return nil
})
c.OnRestartFailed(func() error {
conf := dnsserver.GetConfig(c)
for _, h := range conf.ListenHosts {
addrstr := conf.Transport + "://" + net.JoinHostPort(h, conf.Port)
for _, p := range conf.Handlers() {
vars.PluginEnabled.WithLabelValues(addrstr, conf.Zone, p.Name()).Set(1)
}
}
return nil
})
c.OnRestart(m.OnRestart)
c.OnRestart(func() error { vars.PluginEnabled.Reset(); return nil })
c.OnFinalShutdown(m.OnFinalShutdown)
// Initialize metrics.
buildInfo.WithLabelValues(coremain.CoreVersion, coremain.GitCommit, runtime.Version()).Set(1)
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
m.Next = next
return m
})
c.OncePerServerBlock(func() error {
c.OnStartup(func() error {
return uniqAddr.ForEach()
})
return nil
})
c.OnRestart(m.OnRestart)
c.OnFinalShutdown(m.OnFinalShutdown)
// Initialize metrics.
buildInfo.WithLabelValues(coremain.CoreVersion, coremain.GitCommit, runtime.Version()).Set(1)
return nil
}
func prometheusParse(c *caddy.Controller) (*Metrics, error) {
var met = New(defaultAddr)
defer func() {
uniqAddr.Set(met.Addr, met.OnStartup)
}()
func parse(c *caddy.Controller) (*Metrics, error) {
met := New(defaultAddr)
i := 0
for c.Next() {

View File

@@ -1,17 +1,17 @@
package vars
import (
"context"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
// Report reports the metrics data associcated with request.
func Report(ctx context.Context, req request.Request, zone, rcode string, size int, start time.Time) {
// Report reports the metrics data associated with request. This function is exported because it is also
// called from core/dnsserver to report requests hitting the server that should not be handled and are thus
// not sent down the plugin chain.
func Report(server string, req request.Request, zone, rcode string, size int, start time.Time) {
// Proto and Family.
net := req.Proto()
fam := "1"
@@ -19,20 +19,18 @@ func Report(ctx context.Context, req request.Request, zone, rcode string, size i
fam = "2"
}
server := WithServer(ctx)
typ := req.QType()
RequestCount.WithLabelValues(server, zone, net, fam).Inc()
RequestDuration.WithLabelValues(server, zone).Observe(time.Since(start).Seconds())
if req.Do() {
RequestDo.WithLabelValues(server, zone).Inc()
}
if _, known := monitorType[typ]; known {
RequestType.WithLabelValues(server, zone, dns.Type(typ).String()).Inc()
RequestCount.WithLabelValues(server, zone, net, fam, dns.Type(typ).String()).Inc()
RequestDuration.WithLabelValues(server, zone, dns.Type(typ).String()).Observe(time.Since(start).Seconds())
} else {
RequestType.WithLabelValues(server, zone, other).Inc()
RequestCount.WithLabelValues(server, zone, net, fam, other).Inc()
RequestDuration.WithLabelValues(server, zone, other).Observe(time.Since(start).Seconds())
}
ResponseSize.WithLabelValues(server, zone, net).Observe(float64(size))
@@ -41,34 +39,25 @@ func Report(ctx context.Context, req request.Request, zone, rcode string, size i
ResponseRcode.WithLabelValues(server, zone, rcode).Inc()
}
// WithServer returns the current server handling the request.
func WithServer(ctx context.Context) string {
srv := ctx.Value(plugin.ServerCtx{})
if srv == nil {
return ""
}
return srv.(string)
}
var monitorType = map[uint16]bool{
dns.TypeAAAA: true,
dns.TypeA: true,
dns.TypeCNAME: true,
dns.TypeDNSKEY: true,
dns.TypeDS: true,
dns.TypeMX: true,
dns.TypeNSEC3: true,
dns.TypeNSEC: true,
dns.TypeNS: true,
dns.TypePTR: true,
dns.TypeRRSIG: true,
dns.TypeSOA: true,
dns.TypeSRV: true,
dns.TypeTXT: true,
var monitorType = map[uint16]struct{}{
dns.TypeAAAA: {},
dns.TypeA: {},
dns.TypeCNAME: {},
dns.TypeDNSKEY: {},
dns.TypeDS: {},
dns.TypeMX: {},
dns.TypeNSEC3: {},
dns.TypeNSEC: {},
dns.TypeNS: {},
dns.TypePTR: {},
dns.TypeRRSIG: {},
dns.TypeSOA: {},
dns.TypeSRV: {},
dns.TypeTXT: {},
// Meta Qtypes
dns.TypeIXFR: true,
dns.TypeAXFR: true,
dns.TypeANY: true,
dns.TypeIXFR: {},
dns.TypeAXFR: {},
dns.TypeANY: {},
}
const other = "other"

View File

@@ -11,9 +11,9 @@ var (
RequestCount = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "request_count_total",
Name: "requests_total",
Help: "Counter of DNS requests made per zone, protocol and family.",
}, []string{"server", "zone", "proto", "family"})
}, []string{"server", "zone", "proto", "family", "type"})
RequestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
@@ -21,7 +21,7 @@ var (
Name: "request_duration_seconds",
Buckets: plugin.TimeBuckets,
Help: "Histogram of the time (in seconds) each request took.",
}, []string{"server", "zone"})
}, []string{"server", "zone", "type"})
RequestSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
@@ -34,17 +34,10 @@ var (
RequestDo = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "request_do_count_total",
Name: "do_requests_total",
Help: "Counter of DNS requests with DO bit set per zone.",
}, []string{"server", "zone"})
RequestType = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "request_type_count_total",
Help: "Counter of DNS requests per type, per zone.",
}, []string{"server", "zone", "type"})
ResponseSize = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
@@ -56,15 +49,21 @@ var (
ResponseRcode = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: subsystem,
Name: "response_rcode_count_total",
Name: "responses_total",
Help: "Counter of response status codes.",
}, []string{"server", "zone", "rcode"})
Panic = prometheus.NewCounter(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Name: "panic_count_total",
Name: "panics_total",
Help: "A metrics that counts the number of panics.",
})
PluginEnabled = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Namespace: plugin.Namespace,
Name: "plugin_enabled",
Help: "A metric that indicates whether a plugin is enabled on per server and zone basis.",
}, []string{"server", "zone", "name"})
)
const (

View File

@@ -6,16 +6,17 @@ import (
"strconv"
"strings"
"github.com/coredns/coredns/plugin/pkg/parse"
"github.com/miekg/dns"
)
// See core/dnsserver/address.go - we should unify these two impls.
// Zones respresents a lists of zone names.
// Zones represents a lists of zone names.
type Zones []string
// Matches checks is qname is a subdomain of any of the zones in z. The match
// will return the most specific zones that matches other. The empty string
// Matches checks if qname is a subdomain of any of the zones in z. The match
// will return the most specific zones that matches. The empty string
// signals a not found condition.
func (z Zones) Matches(qname string) string {
zone := ""
@@ -60,25 +61,26 @@ type (
// Normalize will return the host portion of host, stripping
// of any port or transport. The host will also be fully qualified and lowercased.
// An empty string is returned on failure
func (h Host) Normalize() string {
// The error can be ignored here, because this function should only be called after the corefile has already been vetted.
host, _ := h.MustNormalize()
return host
}
// MustNormalize will return the host portion of host, stripping
// of any port or transport. The host will also be fully qualified and lowercased.
// An error is returned on error
func (h Host) MustNormalize() (string, error) {
s := string(h)
_, s = parse.Transport(s)
switch {
case strings.HasPrefix(s, TransportTLS+"://"):
s = s[len(TransportTLS+"://"):]
case strings.HasPrefix(s, TransportDNS+"://"):
s = s[len(TransportDNS+"://"):]
case strings.HasPrefix(s, TransportGRPC+"://"):
s = s[len(TransportGRPC+"://"):]
case strings.HasPrefix(s, TransportHTTPS+"://"):
s = s[len(TransportHTTPS+"://"):]
// The error can be ignored here, because this function is called after the corefile has already been vetted.
host, _, _, err := SplitHostPort(s)
if err != nil {
return "", err
}
// The error can be ignore here, because this function is called after the corefile
// has already been vetted.
host, _, _, _ := SplitHostPort(s)
return Name(host).Normalize()
return Name(host).Normalize(), nil
}
// SplitHostPort splits s up in a host and port portion, taking reverse address notation into account.
@@ -87,7 +89,7 @@ func (h Host) Normalize() string {
func SplitHostPort(s string) (host, port string, ipnet *net.IPNet, err error) {
// If there is: :[0-9]+ on the end we assume this is the port. This works for (ascii) domain
// names and our reverse syntax, which always needs a /mask *before* the port.
// So from the back, find first colon, and then check if its a number.
// So from the back, find first colon, and then check if it's a number.
host = s
colon := strings.LastIndex(s, ":")
@@ -125,7 +127,8 @@ func SplitHostPort(s string) (host, port string, ipnet *net.IPNet, err error) {
// Get the first lower octet boundary to see what encompassing zone we should be authoritative for.
mod := (bits - ones) % sizeDigit
nearest := (bits - ones) + mod
offset, end := 0, false
offset := 0
var end bool
for i := 0; i < nearest/sizeDigit; i++ {
offset, end = dns.NextLabel(rev, offset)
if end {
@@ -137,11 +140,3 @@ func SplitHostPort(s string) (host, port string, ipnet *net.IPNet, err error) {
}
return host, port, n, nil
}
// Duplicated from core/dnsserver/address.go !
const (
TransportDNS = "dns"
TransportTLS = "tls"
TransportGRPC = "grpc"
TransportHTTPS = "https"
)

View File

@@ -9,10 +9,10 @@ import (
)
// Hash returns the FNV hash of what.
func Hash(what []byte) uint32 {
h := fnv.New32()
func Hash(what []byte) uint64 {
h := fnv.New64()
h.Write(what)
return h.Sum32()
return h.Sum64()
}
// Cache is cache.
@@ -22,7 +22,7 @@ type Cache struct {
// shard is a cache with random eviction.
type shard struct {
items map[uint32]interface{}
items map[uint64]interface{}
size int
sync.RWMutex
@@ -31,8 +31,8 @@ type shard struct {
// New returns a new cache.
func New(size int) *Cache {
ssize := size / shardSize
if ssize < 512 {
ssize = 512
if ssize < 4 {
ssize = 4
}
c := &Cache{}
@@ -45,19 +45,19 @@ func New(size int) *Cache {
}
// Add adds a new element to the cache. If the element already exists it is overwritten.
func (c *Cache) Add(key uint32, el interface{}) {
func (c *Cache) Add(key uint64, el interface{}) {
shard := key & (shardSize - 1)
c.shards[shard].Add(key, el)
}
// Get looks up element index under key.
func (c *Cache) Get(key uint32) (interface{}, bool) {
func (c *Cache) Get(key uint64) (interface{}, bool) {
shard := key & (shardSize - 1)
return c.shards[shard].Get(key)
}
// Remove removes the element indexed with key.
func (c *Cache) Remove(key uint32) {
func (c *Cache) Remove(key uint64) {
shard := key & (shardSize - 1)
c.shards[shard].Remove(key)
}
@@ -72,22 +72,25 @@ func (c *Cache) Len() int {
}
// newShard returns a new shard with size.
func newShard(size int) *shard { return &shard{items: make(map[uint32]interface{}), size: size} }
func newShard(size int) *shard { return &shard{items: make(map[uint64]interface{}), size: size} }
// Add adds element indexed by key into the cache. Any existing element is overwritten
func (s *shard) Add(key uint32, el interface{}) {
l := s.Len()
if l+1 > s.size {
s.Evict()
}
func (s *shard) Add(key uint64, el interface{}) {
s.Lock()
if len(s.items) >= s.size {
if _, ok := s.items[key]; !ok {
for k := range s.items {
delete(s.items, k)
break
}
}
}
s.items[key] = el
s.Unlock()
}
// Remove removes the element indexed by key from the cache.
func (s *shard) Remove(key uint32) {
func (s *shard) Remove(key uint64) {
s.Lock()
delete(s.items, key)
s.Unlock()
@@ -95,26 +98,16 @@ func (s *shard) Remove(key uint32) {
// Evict removes a random element from the cache.
func (s *shard) Evict() {
key := -1
s.RLock()
s.Lock()
for k := range s.items {
key = int(k)
delete(s.items, k)
break
}
s.RUnlock()
if key == -1 {
// empty cache
return
}
// If this item is gone between the RUnlock and Lock race we don't care.
s.Remove(uint32(key))
s.Unlock()
}
// Get looks up the element indexed under key.
func (s *shard) Get(key uint32) (interface{}, bool) {
func (s *shard) Get(key uint64) (interface{}, bool) {
s.RLock()
el, found := s.items[key]
s.RUnlock()

View File

@@ -3,6 +3,8 @@ package dnstest
import (
"net"
"github.com/coredns/coredns/plugin/pkg/reuseport"
"github.com/miekg/dns"
)
@@ -27,7 +29,7 @@ func NewServer(f dns.HandlerFunc) *Server {
s2 := &dns.Server{} // tcp
for i := 0; i < 5; i++ { // 5 attempts
s2.Listener, _ = net.Listen("tcp", ":0")
s2.Listener, _ = reuseport.Listen("tcp", ":0")
if s2.Listener == nil {
continue
}

View File

@@ -8,12 +8,10 @@ import (
// Join joins labels to form a fully qualified domain name. If the last label is
// the root label it is ignored. Not other syntax checks are performed.
func Join(labels []string) string {
func Join(labels ...string) string {
ll := len(labels)
if labels[ll-1] == "." {
s := strings.Join(labels[:ll-1], ".")
return dns.Fqdn(s)
return strings.Join(labels[:ll-1], ".") + "."
}
s := strings.Join(labels, ".")
return dns.Fqdn(s)
return dns.Fqdn(strings.Join(labels, "."))
}

View File

@@ -14,34 +14,21 @@ func MinimalTTL(m *dns.Msg, mt response.Type) time.Duration {
return MinimalDefaultTTL
}
// No data to examine, return a short ttl as a fail safe.
if len(m.Answer)+len(m.Ns)+len(m.Extra) == 0 {
// No records or OPT is the only record, return a short ttl as a fail safe.
if len(m.Answer)+len(m.Ns) == 0 &&
(len(m.Extra) == 0 || (len(m.Extra) == 1 && m.Extra[0].Header().Rrtype == dns.TypeOPT)) {
return MinimalDefaultTTL
}
minTTL := MaximumDefaulTTL
for _, r := range m.Answer {
switch mt {
case response.NameError, response.NoData:
if r.Header().Rrtype == dns.TypeSOA {
minTTL = time.Duration(r.(*dns.SOA).Minttl) * time.Second
}
case response.NoError, response.Delegation:
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
}
for _, r := range m.Ns {
switch mt {
case response.NameError, response.NoData:
if r.Header().Rrtype == dns.TypeSOA {
minTTL = time.Duration(r.(*dns.SOA).Minttl) * time.Second
}
case response.NoError, response.Delegation:
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
}
@@ -50,15 +37,8 @@ func MinimalTTL(m *dns.Msg, mt response.Type) time.Duration {
// OPT records use TTL field for extended rcode and flags
continue
}
switch mt {
case response.NameError, response.NoData:
if r.Header().Rrtype == dns.TypeSOA {
minTTL = time.Duration(r.(*dns.SOA).Minttl) * time.Second
}
case response.NoError, response.Delegation:
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
}
return minTTL

View File

@@ -53,7 +53,7 @@ func NewRequest(method, url string, m *dns.Msg) (*http.Request, error) {
}
// ResponseToMsg converts a http.Repsonse to a dns message.
// ResponseToMsg converts a http.Response to a dns message.
func ResponseToMsg(resp *http.Response) (*dns.Msg, error) {
defer resp.Body.Close()

View File

@@ -3,10 +3,37 @@ package edns
import (
"errors"
"sync"
"github.com/miekg/dns"
)
var sup = &supported{m: make(map[uint16]struct{})}
type supported struct {
m map[uint16]struct{}
sync.RWMutex
}
// SetSupportedOption adds a new supported option the set of EDNS0 options that we support. Plugins typically call
// this in their setup code to signal support for a new option.
// By default we support:
// dns.EDNS0NSID, dns.EDNS0EXPIRE, dns.EDNS0COOKIE, dns.EDNS0TCPKEEPALIVE, dns.EDNS0PADDING. These
// values are not in this map and checked directly in the server.
func SetSupportedOption(option uint16) {
sup.Lock()
sup.m[option] = struct{}{}
sup.Unlock()
}
// SupportedOption returns true if the option code is supported as an extra EDNS0 option.
func SupportedOption(option uint16) bool {
sup.RLock()
_, ok := sup.m[option]
sup.RUnlock()
return ok
}
// Version checks the EDNS version in the request. If error
// is nil everything is OK and we can invoke the plugin. If non-nil, the
// returned Msg is valid to be returned to the client (and should). For some
@@ -28,6 +55,7 @@ func Version(req *dns.Msg) (*dns.Msg, error) {
o.Hdr.Name = "."
o.Hdr.Rrtype = dns.TypeOPT
o.SetVersion(0)
m.Rcode = dns.RcodeBadVers
o.SetExtendedRcode(dns.RcodeBadVers)
m.Extra = []dns.RR{o}
@@ -35,7 +63,7 @@ func Version(req *dns.Msg) (*dns.Msg, error) {
}
// Size returns a normalized size based on proto.
func Size(proto string, size int) int {
func Size(proto string, size uint16) uint16 {
if proto == "tcp" {
return dns.MaxMsgSize
}

View File

@@ -10,18 +10,22 @@ import (
"github.com/miekg/dns"
)
// Do will fuzz p - used by gofuzz. See Maefile.fuzz for comments and context.
// Do will fuzz p - used by gofuzz. See Makefile.fuzz for comments and context.
func Do(p plugin.Handler, data []byte) int {
ctx := context.TODO()
ret := 1
r := new(dns.Msg)
if err := r.Unpack(data); err != nil {
ret = 0
return 0 // plugin will never be called when this happens.
}
// If the data unpack into a dns msg, but does not have a proper question section discard it.
// The server parts make sure this is true before calling the plugins; mimic this behavior.
if len(r.Question) == 0 {
return 0
}
if _, err := p.ServeDNS(ctx, &test.ResponseWriter{}, r); err != nil {
ret = 1
return 1
}
return ret
return 0
}

View File

@@ -1,34 +1,66 @@
// Package log implements a small wrapper around the std lib log package.
// It implements log levels by prefixing the logs with [INFO], [DEBUG],
// [WARNING] or [ERROR].
// Debug logging is available and enabled if the *debug* plugin is used.
// Package log implements a small wrapper around the std lib log package. It
// implements log levels by prefixing the logs with [INFO], [DEBUG], [WARNING]
// or [ERROR]. Debug logging is available and enabled if the *debug* plugin is
// used.
//
// log.Info("this is some logging"), will log on the Info level.
//
// log.Debug("this is debug output"), will log in the Debug level.
// log.Debug("this is debug output"), will log in the Debug level, etc.
package log
import (
"fmt"
"io/ioutil"
golog "log"
"os"
"sync"
)
// D controls whether we should output debug logs. If true, we do.
var D bool
// D controls whether we should output debug logs. If true, we do, once set
// it can not be unset.
var D = &d{}
type d struct {
on bool
sync.RWMutex
}
// Set enables debug logging.
func (d *d) Set() {
d.Lock()
d.on = true
d.Unlock()
}
// Clear disables debug logging.
func (d *d) Clear() {
d.Lock()
d.on = false
d.Unlock()
}
// Value returns if debug logging is enabled.
func (d *d) Value() bool {
d.RLock()
b := d.on
d.RUnlock()
return b
}
// logf calls log.Printf prefixed with level.
func logf(level, format string, v ...interface{}) {
s := level + fmt.Sprintf(format, v...)
golog.Print(s)
golog.Print(level, fmt.Sprintf(format, v...))
}
// log calls log.Print prefixed with level.
func log(level string, v ...interface{}) { s := level + fmt.Sprint(v...); golog.Print(s) }
func log(level string, v ...interface{}) {
golog.Print(level, fmt.Sprint(v...))
}
// Debug is equivalent to log.Print(), but prefixed with "[DEBUG] ". It only outputs something
// if D is true.
func Debug(v ...interface{}) {
if !D {
if !D.Value() {
return
}
log(debug, v...)
@@ -37,7 +69,7 @@ func Debug(v ...interface{}) {
// Debugf is equivalent to log.Printf(), but prefixed with "[DEBUG] ". It only outputs something
// if D is true.
func Debugf(format string, v ...interface{}) {
if !D {
if !D.Value() {
return
}
logf(debug, format, v...)
@@ -61,9 +93,21 @@ func Error(v ...interface{}) { log(err, v...) }
// Errorf is equivalent to log.Printf, but prefixed with "[ERROR] ".
func Errorf(format string, v ...interface{}) { logf(err, format, v...) }
// Fatal is equivalent to log.Print, but prefixed with "[FATAL] ", and calling
// os.Exit(1).
func Fatal(v ...interface{}) { log(fatal, v...); os.Exit(1) }
// Fatalf is equivalent to log.Printf, but prefixed with "[FATAL] ", and calling
// os.Exit(1)
func Fatalf(format string, v ...interface{}) { logf(fatal, format, v...); os.Exit(1) }
// Discard sets the log output to /dev/null.
func Discard() { golog.SetOutput(ioutil.Discard) }
const (
debug = "[DEBUG] "
err = "[ERROR] "
warning = "[WARNING] "
fatal = "[FATAL] "
info = "[INFO] "
warning = "[WARNING] "
)

View File

@@ -2,7 +2,7 @@ package log
import (
"fmt"
golog "log"
"os"
)
// P is a logger that includes the plugin doing the logging.
@@ -12,21 +12,19 @@ type P struct {
// NewWithPlugin returns a logger that includes "plugin/name: " in the log message.
// I.e [INFO] plugin/<name>: message.
func NewWithPlugin(name string) P { return P{name} }
func NewWithPlugin(name string) P { return P{"plugin/" + name + ": "} }
func (p P) logf(level, format string, v ...interface{}) {
s := level + pFormat(p.plugin) + fmt.Sprintf(format, v...)
golog.Print(s)
log(level, p.plugin, fmt.Sprintf(format, v...))
}
func (p P) log(level string, v ...interface{}) {
s := level + pFormat(p.plugin) + fmt.Sprint(v...)
golog.Print(s)
log(level+p.plugin, v...)
}
// Debug logs as log.Debug.
func (p P) Debug(v ...interface{}) {
if !D {
if !D.Value() {
return
}
p.log(debug, v...)
@@ -34,7 +32,7 @@ func (p P) Debug(v ...interface{}) {
// Debugf logs as log.Debugf.
func (p P) Debugf(format string, v ...interface{}) {
if !D {
if !D.Value() {
return
}
p.logf(debug, format, v...)
@@ -58,4 +56,8 @@ func (p P) Error(v ...interface{}) { p.log(err, v...) }
// Errorf logs as log.Errorf.
func (p P) Errorf(format string, v ...interface{}) { p.logf(err, format, v...) }
func pFormat(s string) string { return "plugin/" + s + ": " }
// Fatal logs as log.Fatal and calls os.Exit(1).
func (p P) Fatal(v ...interface{}) { p.log(fatal, v...); os.Exit(1) }
// Fatalf logs as log.Fatalf and calls os.Exit(1).
func (p P) Fatalf(format string, v ...interface{}) { p.logf(fatal, format, v...); os.Exit(1) }

View File

@@ -1,24 +1,42 @@
package dnsutil
package parse
import (
"fmt"
"net"
"os"
"strings"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/miekg/dns"
)
// ParseHostPortOrFile parses the strings in s, each string can either be a address,
// address:port or a filename. The address part is checked and the filename case a
// resolv.conf like file is parsed and the nameserver found are returned.
func ParseHostPortOrFile(s ...string) ([]string, error) {
// Strips the zone, but preserves any port that comes after the zone
func stripZone(host string) string {
if strings.Contains(host, "%") {
lastPercent := strings.LastIndex(host, "%")
newHost := host[:lastPercent]
return newHost
}
return host
}
// HostPortOrFile parses the strings in s, each string can either be a
// address, [scheme://]address:port or a filename. The address part is checked
// and in case of filename a resolv.conf like file is (assumed) and parsed and
// the nameservers found are returned.
func HostPortOrFile(s ...string) ([]string, error) {
var servers []string
for _, host := range s {
for _, h := range s {
trans, host := Transport(h)
addr, _, err := net.SplitHostPort(host)
if err != nil {
// Parse didn't work, it is not a addr:port combo
if net.ParseIP(host) == nil {
// Not an IP address.
hostNoZone := stripZone(host)
if net.ParseIP(hostNoZone) == nil {
ss, err := tryFile(host)
if err == nil {
servers = append(servers, ss...)
@@ -26,13 +44,22 @@ func ParseHostPortOrFile(s ...string) ([]string, error) {
}
return servers, fmt.Errorf("not an IP address or file: %q", host)
}
ss := net.JoinHostPort(host, "53")
var ss string
switch trans {
case transport.DNS:
ss = net.JoinHostPort(host, transport.Port)
case transport.TLS:
ss = transport.TLS + "://" + net.JoinHostPort(host, transport.TLSPort)
case transport.GRPC:
ss = transport.GRPC + "://" + net.JoinHostPort(host, transport.GRPCPort)
case transport.HTTPS:
ss = transport.HTTPS + "://" + net.JoinHostPort(host, transport.HTTPSPort)
}
servers = append(servers, ss)
continue
}
if net.ParseIP(addr) == nil {
// No an IP address.
if net.ParseIP(stripZone(addr)) == nil {
ss, err := tryFile(host)
if err == nil {
servers = append(servers, ss...)
@@ -40,7 +67,10 @@ func ParseHostPortOrFile(s ...string) ([]string, error) {
}
return servers, fmt.Errorf("not an IP address or file: %q", host)
}
servers = append(servers, host)
servers = append(servers, h)
}
if len(servers) == 0 {
return servers, fmt.Errorf("no nameservers found")
}
return servers, nil
}
@@ -61,9 +91,9 @@ func tryFile(s string) ([]string, error) {
return servers, nil
}
// ParseHostPort will check if the host part is a valid IP address, if the
// HostPort will check if the host part is a valid IP address, if the
// IP address is valid, but no port is found, defaultPort is added.
func ParseHostPort(s, defaultPort string) (string, error) {
func HostPort(s, defaultPort string) (string, error) {
addr, port, err := net.SplitHostPort(s)
if port == "" {
port = defaultPort

View File

@@ -0,0 +1,49 @@
// Package parse contains functions that can be used in the setup code for plugins.
package parse
import (
"fmt"
"github.com/coredns/coredns/plugin/pkg/transport"
"github.com/caddyserver/caddy"
)
// Transfer parses transfer statements: 'transfer [to|from] [address...]'.
func Transfer(c *caddy.Controller, secondary bool) (tos, froms []string, err error) {
if !c.NextArg() {
return nil, nil, c.ArgErr()
}
value := c.Val()
switch value {
case "to":
tos = c.RemainingArgs()
for i := range tos {
if tos[i] != "*" {
normalized, err := HostPort(tos[i], transport.Port)
if err != nil {
return nil, nil, err
}
tos[i] = normalized
}
}
case "from":
if !secondary {
return nil, nil, fmt.Errorf("can't use `transfer from` when not being a secondary")
}
froms = c.RemainingArgs()
for i := range froms {
if froms[i] != "*" {
normalized, err := HostPort(froms[i], transport.Port)
if err != nil {
return nil, nil, err
}
froms[i] = normalized
} else {
return nil, nil, fmt.Errorf("can't use '*' in transfer from")
}
}
}
return
}

View File

@@ -0,0 +1,33 @@
package parse
import (
"strings"
"github.com/coredns/coredns/plugin/pkg/transport"
)
// Transport returns the transport defined in s and a string where the
// transport prefix is removed (if there was any). If no transport is defined
// we default to TransportDNS
func Transport(s string) (trans string, addr string) {
switch {
case strings.HasPrefix(s, transport.TLS+"://"):
s = s[len(transport.TLS+"://"):]
return transport.TLS, s
case strings.HasPrefix(s, transport.DNS+"://"):
s = s[len(transport.DNS+"://"):]
return transport.DNS, s
case strings.HasPrefix(s, transport.GRPC+"://"):
s = s[len(transport.GRPC+"://"):]
return transport.GRPC, s
case strings.HasPrefix(s, transport.HTTPS+"://"):
s = s[len(transport.HTTPS+"://"):]
return transport.HTTPS, s
}
return transport.DNS, s
}

View File

@@ -15,6 +15,8 @@ const (
NoError Type = iota
// NameError is a NXDOMAIN in header, SOA in auth.
NameError
// ServerError is a set of errors we want to cache, for now it contains SERVFAIL and NOTIMPL.
ServerError
// NoData indicates name found, but not the type: NOERROR in header, SOA in auth.
NoData
// Delegation is a msg with a pointer to another nameserver: NOERROR in header, NS in auth, optionally fluff in additional (not checked).
@@ -28,13 +30,14 @@ const (
)
var toString = map[Type]string{
NoError: "NOERROR",
NameError: "NXDOMAIN",
NoData: "NODATA",
Delegation: "DELEGATION",
Meta: "META",
Update: "UPDATE",
OtherError: "OTHERERROR",
NoError: "NOERROR",
NameError: "NXDOMAIN",
ServerError: "SERVERERROR",
NoData: "NODATA",
Delegation: "DELEGATION",
Meta: "META",
Update: "UPDATE",
OtherError: "OTHERERROR",
}
func (t Type) String() string { return toString[t] }
@@ -106,6 +109,10 @@ func Typify(m *dns.Msg, t time.Time) (Type, *dns.OPT) {
return NameError, opt
}
if m.Rcode == dns.RcodeServerFailure || m.Rcode == dns.RcodeNotImplemented {
return ServerError, opt
}
if ns > 0 && m.Rcode == dns.RcodeSuccess {
return Delegation, opt
}

View File

@@ -0,0 +1,37 @@
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package reuseport
import (
"context"
"net"
"syscall"
"github.com/coredns/coredns/plugin/pkg/log"
"golang.org/x/sys/unix"
)
func control(network, address string, c syscall.RawConn) error {
c.Control(func(fd uintptr) {
if err := unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil {
log.Warningf("Failed to set SO_REUSEPORT on socket: %s", err)
}
})
return nil
}
// Listen announces on the local network address. See net.Listen for more information.
// If SO_REUSEPORT is available it will be set on the socket.
func Listen(network, addr string) (net.Listener, error) {
lc := net.ListenConfig{Control: control}
return lc.Listen(context.Background(), network, addr)
}
// ListenPacket announces on the local network address. See net.ListenPacket for more information.
// If SO_REUSEPORT is available it will be set on the socket.
func ListenPacket(network, addr string) (net.PacketConn, error) {
lc := net.ListenConfig{Control: control}
return lc.ListenPacket(context.Background(), network, addr)
}

View File

@@ -0,0 +1,13 @@
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package reuseport
import "net"
// Listen is a wrapper around net.Listen.
func Listen(network, addr string) (net.Listener, error) { return net.Listen(network, addr) }
// ListenPacket is a wrapper around net.ListenPacket.
func ListenPacket(network, addr string) (net.PacketConn, error) {
return net.ListenPacket(network, addr)
}

View File

@@ -0,0 +1,21 @@
package transport
// These transports are supported by CoreDNS.
const (
DNS = "dns"
TLS = "tls"
GRPC = "grpc"
HTTPS = "https"
)
// Port numbers for the various transports.
const (
// Port is the default port for DNS
Port = "53"
// TLSPort is the default port for DNS-over-TLS.
TLSPort = "853"
// GRPCPort is the default port for DNS-over-gRPC.
GRPCPort = "443"
// HTTPSPort is the default port for DNS-over-HTTPS.
HTTPSPort = "443"
)

View File

@@ -15,8 +15,7 @@ type item struct {
// New returns a new initialized U.
func New() U { return U{u: make(map[string]item)} }
// Set sets function f in U under key. If the key already exists
// it is not overwritten.
// Set sets function f in U under key. If the key already exists it is not overwritten.
func (u U) Set(key string, f func() error) {
if _, ok := u.u[key]; ok {
return
@@ -24,17 +23,12 @@ func (u U) Set(key string, f func() error) {
u.u[key] = item{todo, f}
}
// SetTodo sets key to 'todo' again.
func (u U) SetTodo(key string) {
v, ok := u.u[key]
if !ok {
return
}
v.state = todo
u.u[key] = v
// Unset removes the key.
func (u U) Unset(key string) {
delete(u.u, key)
}
// ForEach iterates for u executes f for each element that is 'todo' and sets it to 'done'.
// ForEach iterates over u and executes f for each element that is 'todo' and sets it to 'done'.
func (u U) ForEach() error {
for k, v := range u.u {
if v.state == todo {

View File

@@ -1,23 +0,0 @@
package watch
// Chan is used to inform the server of a change. Whenever
// a watched FQDN has a change in data, that FQDN should be
// sent down this channel.
type Chan chan string
// Watchable is the interface watchable plugins should implement
type Watchable interface {
// Name returns the plugin name.
Name() string
// SetWatchChan is called when the watch channel is created.
SetWatchChan(Chan)
// Watch is called whenever a watch is created for a FQDN. Plugins
// should send the FQDN down the watch channel when its data may have
// changed. This is an exact match only.
Watch(qname string) error
// StopWatching is called whenever all watches are canceled for a FQDN.
StopWatching(qname string)
}

View File

@@ -1,178 +0,0 @@
package watch
import (
"fmt"
"io"
"sync"
"github.com/miekg/dns"
"github.com/coredns/coredns/pb"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/pkg/log"
"github.com/coredns/coredns/request"
)
// Watcher handles watch creation, cancellation, and processing.
type Watcher interface {
// Watch monitors a client stream and creates and cancels watches.
Watch(pb.DnsService_WatchServer) error
// Stop cancels open watches and stops the watch processing go routine.
Stop()
}
// Manager contains all the data needed to manage watches
type Manager struct {
changes Chan
stopper chan bool
counter int64
watches map[string]watchlist
plugins []Watchable
mutex sync.Mutex
}
type watchlist map[int64]pb.DnsService_WatchServer
// NewWatcher creates a Watcher, which is used to manage watched names.
func NewWatcher(plugins []Watchable) *Manager {
w := &Manager{changes: make(Chan), stopper: make(chan bool), watches: make(map[string]watchlist), plugins: plugins}
for _, p := range plugins {
p.SetWatchChan(w.changes)
}
go w.process()
return w
}
func (w *Manager) nextID() int64 {
w.mutex.Lock()
w.counter++
id := w.counter
w.mutex.Unlock()
return id
}
// Watch monitors a client stream and creates and cancels watches.
func (w *Manager) Watch(stream pb.DnsService_WatchServer) error {
for {
in, err := stream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
create := in.GetCreateRequest()
if create != nil {
msg := new(dns.Msg)
err := msg.Unpack(create.Query.Msg)
if err != nil {
log.Warningf("Could not decode watch request: %s\n", err)
stream.Send(&pb.WatchResponse{Err: "could not decode request"})
continue
}
id := w.nextID()
if err := stream.Send(&pb.WatchResponse{WatchId: id, Created: true}); err != nil {
// if we fail to notify client of watch creation, don't create the watch
continue
}
// Normalize qname
qname := (&request.Request{Req: msg}).Name()
w.mutex.Lock()
if _, ok := w.watches[qname]; !ok {
w.watches[qname] = make(watchlist)
}
w.watches[qname][id] = stream
w.mutex.Unlock()
for _, p := range w.plugins {
err := p.Watch(qname)
if err != nil {
log.Warningf("Failed to start watch for %s in plugin %s: %s\n", qname, p.Name(), err)
stream.Send(&pb.WatchResponse{Err: fmt.Sprintf("failed to start watch for %s in plugin %s", qname, p.Name())})
}
}
continue
}
cancel := in.GetCancelRequest()
if cancel != nil {
w.mutex.Lock()
for qname, wl := range w.watches {
ws, ok := wl[cancel.WatchId]
if !ok {
continue
}
// only allow cancels from the client that started it
// TODO: test what happens if a stream tries to cancel a watchID that it doesn't own
if ws != stream {
continue
}
delete(wl, cancel.WatchId)
// if there are no more watches for this qname, we should tell the plugins
if len(wl) == 0 {
for _, p := range w.plugins {
p.StopWatching(qname)
}
delete(w.watches, qname)
}
// let the client know we canceled the watch
stream.Send(&pb.WatchResponse{WatchId: cancel.WatchId, Canceled: true})
}
w.mutex.Unlock()
continue
}
}
}
func (w *Manager) process() {
for {
select {
case <-w.stopper:
return
case changed := <-w.changes:
w.mutex.Lock()
for qname, wl := range w.watches {
if plugin.Zones([]string{changed}).Matches(qname) == "" {
continue
}
for id, stream := range wl {
wr := pb.WatchResponse{WatchId: id, Qname: qname}
err := stream.Send(&wr)
if err != nil {
log.Warningf("Error sending change for %s to watch %d: %s. Removing watch.\n", qname, id, err)
delete(w.watches[qname], id)
}
}
}
w.mutex.Unlock()
}
}
}
// Stop cancels open watches and stops the watch processing go routine.
func (w *Manager) Stop() {
w.stopper <- true
w.mutex.Lock()
for wn, wl := range w.watches {
for id, stream := range wl {
wr := pb.WatchResponse{WatchId: id, Canceled: true}
err := stream.Send(&wr)
if err != nil {
log.Warningf("Error notifiying client of cancellation: %s\n", err)
}
}
delete(w.watches, wn)
}
w.mutex.Unlock()
}

View File

@@ -21,7 +21,8 @@ type (
// and/or error.
//
// If ServeDNS writes to the response body, it should return a status
// code. If the status code is not one of the following:
// code. CoreDNS assumes *no* reply has yet been written if the status
// code is one of the following:
//
// * SERVFAIL (dns.RcodeServerFailure)
//
@@ -31,9 +32,9 @@ type (
//
// * NOTIMP (dns.RcodeNotImplemented)
//
// CoreDNS assumes *no* reply has yet been written. All other response
// codes signal other handlers above it that the response message is
// already written, and that they should not write to it also.
// All other response codes signal other handlers above it that the
// response message is already written, and that they should not write
// to it also.
//
// If ServeDNS encounters an error, it should return the error value
// so it can be logged by designated error-handling plugin.
@@ -68,7 +69,7 @@ func (f HandlerFunc) Name() string { return "handlerfunc" }
// Error returns err with 'plugin/name: ' prefixed to it.
func Error(name string, err error) error { return fmt.Errorf("%s/%s: %s", "plugin", name, err) }
// NextOrFailure calls next.ServeDNS when next is not nill, otherwise it will return, a ServerFailure and a nil error.
// NextOrFailure calls next.ServeDNS when next is not nil, otherwise it will return, a ServerFailure and a nil error.
func NextOrFailure(name string, next Handler, ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) { // nolint: golint
if next != nil {
if span := ot.SpanFromContext(ctx); span != nil {
@@ -83,7 +84,7 @@ func NextOrFailure(name string, next Handler, ctx context.Context, w dns.Respons
}
// ClientWrite returns true if the response has been written to the client.
// Each plugin to adhire to this protocol.
// Each plugin to adhere to this protocol.
func ClientWrite(rcode int) bool {
switch rcode {
case dns.RcodeServerFailure:
@@ -106,6 +107,3 @@ var TimeBuckets = prometheus.ExponentialBuckets(0.00025, 2, 16) // from 0.25ms t
// ErrOnce is returned when a plugin doesn't support multiple setups per server.
var ErrOnce = errors.New("this plugin can only be used once per Server Block")
// ServerCtx is the context key to pass server address context to the plugins handling the request.
type ServerCtx struct{}

11
vendor/github.com/coredns/coredns/plugin/register.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
package plugin
import "github.com/caddyserver/caddy"
// Register registers your plugin with CoreDNS and allows it to be called when the server is running.
func Register(name string, action caddy.SetupFunc) {
caddy.RegisterPlugin(name, caddy.Plugin{
ServerType: "dns",
Action: action,
})
}

View File

@@ -2,8 +2,8 @@ package test
import (
"context"
"fmt"
"sort"
"testing"
"github.com/miekg/dns"
)
@@ -113,29 +113,25 @@ func OPT(bufsize int, do bool) *dns.OPT {
}
// Header test if the header in resp matches the header as defined in tc.
func Header(t *testing.T, tc Case, resp *dns.Msg) bool {
func Header(tc Case, resp *dns.Msg) error {
if resp.Rcode != tc.Rcode {
t.Errorf("Rcode is %q, expected %q", dns.RcodeToString[resp.Rcode], dns.RcodeToString[tc.Rcode])
return false
return fmt.Errorf("rcode is %q, expected %q", dns.RcodeToString[resp.Rcode], dns.RcodeToString[tc.Rcode])
}
if len(resp.Answer) != len(tc.Answer) {
t.Errorf("Answer for %q contained %d results, %d expected", tc.Qname, len(resp.Answer), len(tc.Answer))
return false
return fmt.Errorf("answer for %q contained %d results, %d expected", tc.Qname, len(resp.Answer), len(tc.Answer))
}
if len(resp.Ns) != len(tc.Ns) {
t.Errorf("Authority for %q contained %d results, %d expected", tc.Qname, len(resp.Ns), len(tc.Ns))
return false
return fmt.Errorf("authority for %q contained %d results, %d expected", tc.Qname, len(resp.Ns), len(tc.Ns))
}
if len(resp.Extra) != len(tc.Extra) {
t.Errorf("Additional for %q contained %d results, %d expected", tc.Qname, len(resp.Extra), len(tc.Extra))
return false
return fmt.Errorf("additional for %q contained %d results, %d expected", tc.Qname, len(resp.Extra), len(tc.Extra))
}
return true
return nil
}
// Section tests if the the section in tc matches rr.
func Section(t *testing.T, tc Case, sec sect, rr []dns.RR) bool {
// Section tests if the section in tc matches rr.
func Section(tc Case, sec sect, rr []dns.RR) error {
section := []dns.RR{}
switch sec {
case 0:
@@ -148,134 +144,112 @@ func Section(t *testing.T, tc Case, sec sect, rr []dns.RR) bool {
for i, a := range rr {
if a.Header().Name != section[i].Header().Name {
t.Errorf("RR %d should have a Header Name of %q, but has %q", i, section[i].Header().Name, a.Header().Name)
return false
return fmt.Errorf("RR %d should have a Header Name of %q, but has %q", i, section[i].Header().Name, a.Header().Name)
}
// 303 signals: don't care what the ttl is.
if section[i].Header().Ttl != 303 && a.Header().Ttl != section[i].Header().Ttl {
if _, ok := section[i].(*dns.OPT); !ok {
// we check edns0 bufize on this one
t.Errorf("RR %d should have a Header TTL of %d, but has %d", i, section[i].Header().Ttl, a.Header().Ttl)
return false
return fmt.Errorf("RR %d should have a Header TTL of %d, but has %d", i, section[i].Header().Ttl, a.Header().Ttl)
}
}
if a.Header().Rrtype != section[i].Header().Rrtype {
t.Errorf("RR %d should have a header rr type of %d, but has %d", i, section[i].Header().Rrtype, a.Header().Rrtype)
return false
return fmt.Errorf("RR %d should have a header rr type of %d, but has %d", i, section[i].Header().Rrtype, a.Header().Rrtype)
}
switch x := a.(type) {
case *dns.SRV:
if x.Priority != section[i].(*dns.SRV).Priority {
t.Errorf("RR %d should have a Priority of %d, but has %d", i, section[i].(*dns.SRV).Priority, x.Priority)
return false
return fmt.Errorf("RR %d should have a Priority of %d, but has %d", i, section[i].(*dns.SRV).Priority, x.Priority)
}
if x.Weight != section[i].(*dns.SRV).Weight {
t.Errorf("RR %d should have a Weight of %d, but has %d", i, section[i].(*dns.SRV).Weight, x.Weight)
return false
return fmt.Errorf("RR %d should have a Weight of %d, but has %d", i, section[i].(*dns.SRV).Weight, x.Weight)
}
if x.Port != section[i].(*dns.SRV).Port {
t.Errorf("RR %d should have a Port of %d, but has %d", i, section[i].(*dns.SRV).Port, x.Port)
return false
return fmt.Errorf("RR %d should have a Port of %d, but has %d", i, section[i].(*dns.SRV).Port, x.Port)
}
if x.Target != section[i].(*dns.SRV).Target {
t.Errorf("RR %d should have a Target of %q, but has %q", i, section[i].(*dns.SRV).Target, x.Target)
return false
return fmt.Errorf("RR %d should have a Target of %q, but has %q", i, section[i].(*dns.SRV).Target, x.Target)
}
case *dns.RRSIG:
if x.TypeCovered != section[i].(*dns.RRSIG).TypeCovered {
t.Errorf("RR %d should have a TypeCovered of %d, but has %d", i, section[i].(*dns.RRSIG).TypeCovered, x.TypeCovered)
return false
return fmt.Errorf("RR %d should have a TypeCovered of %d, but has %d", i, section[i].(*dns.RRSIG).TypeCovered, x.TypeCovered)
}
if x.Labels != section[i].(*dns.RRSIG).Labels {
t.Errorf("RR %d should have a Labels of %d, but has %d", i, section[i].(*dns.RRSIG).Labels, x.Labels)
return false
return fmt.Errorf("RR %d should have a Labels of %d, but has %d", i, section[i].(*dns.RRSIG).Labels, x.Labels)
}
if x.SignerName != section[i].(*dns.RRSIG).SignerName {
t.Errorf("RR %d should have a SignerName of %s, but has %s", i, section[i].(*dns.RRSIG).SignerName, x.SignerName)
return false
return fmt.Errorf("RR %d should have a SignerName of %s, but has %s", i, section[i].(*dns.RRSIG).SignerName, x.SignerName)
}
case *dns.NSEC:
if x.NextDomain != section[i].(*dns.NSEC).NextDomain {
t.Errorf("RR %d should have a NextDomain of %s, but has %s", i, section[i].(*dns.NSEC).NextDomain, x.NextDomain)
return false
return fmt.Errorf("RR %d should have a NextDomain of %s, but has %s", i, section[i].(*dns.NSEC).NextDomain, x.NextDomain)
}
// TypeBitMap
case *dns.A:
if x.A.String() != section[i].(*dns.A).A.String() {
t.Errorf("RR %d should have a Address of %q, but has %q", i, section[i].(*dns.A).A.String(), x.A.String())
return false
return fmt.Errorf("RR %d should have a Address of %q, but has %q", i, section[i].(*dns.A).A.String(), x.A.String())
}
case *dns.AAAA:
if x.AAAA.String() != section[i].(*dns.AAAA).AAAA.String() {
t.Errorf("RR %d should have a Address of %q, but has %q", i, section[i].(*dns.AAAA).AAAA.String(), x.AAAA.String())
return false
return fmt.Errorf("RR %d should have a Address of %q, but has %q", i, section[i].(*dns.AAAA).AAAA.String(), x.AAAA.String())
}
case *dns.TXT:
for j, txt := range x.Txt {
if txt != section[i].(*dns.TXT).Txt[j] {
t.Errorf("RR %d should have a Txt of %q, but has %q", i, section[i].(*dns.TXT).Txt[j], txt)
return false
return fmt.Errorf("RR %d should have a Txt of %q, but has %q", i, section[i].(*dns.TXT).Txt[j], txt)
}
}
case *dns.HINFO:
if x.Cpu != section[i].(*dns.HINFO).Cpu {
t.Errorf("RR %d should have a Cpu of %s, but has %s", i, section[i].(*dns.HINFO).Cpu, x.Cpu)
return fmt.Errorf("RR %d should have a Cpu of %s, but has %s", i, section[i].(*dns.HINFO).Cpu, x.Cpu)
}
if x.Os != section[i].(*dns.HINFO).Os {
t.Errorf("RR %d should have a Os of %s, but has %s", i, section[i].(*dns.HINFO).Os, x.Os)
return fmt.Errorf("RR %d should have a Os of %s, but has %s", i, section[i].(*dns.HINFO).Os, x.Os)
}
case *dns.SOA:
tt := section[i].(*dns.SOA)
if x.Ns != tt.Ns {
t.Errorf("SOA nameserver should be %q, but is %q", tt.Ns, x.Ns)
return false
return fmt.Errorf("SOA nameserver should be %q, but is %q", tt.Ns, x.Ns)
}
case *dns.PTR:
tt := section[i].(*dns.PTR)
if x.Ptr != tt.Ptr {
t.Errorf("PTR ptr should be %q, but is %q", tt.Ptr, x.Ptr)
return false
return fmt.Errorf("PTR ptr should be %q, but is %q", tt.Ptr, x.Ptr)
}
case *dns.CNAME:
tt := section[i].(*dns.CNAME)
if x.Target != tt.Target {
t.Errorf("CNAME target should be %q, but is %q", tt.Target, x.Target)
return false
return fmt.Errorf("CNAME target should be %q, but is %q", tt.Target, x.Target)
}
case *dns.MX:
tt := section[i].(*dns.MX)
if x.Mx != tt.Mx {
t.Errorf("MX Mx should be %q, but is %q", tt.Mx, x.Mx)
return false
return fmt.Errorf("MX Mx should be %q, but is %q", tt.Mx, x.Mx)
}
if x.Preference != tt.Preference {
t.Errorf("MX Preference should be %q, but is %q", tt.Preference, x.Preference)
return false
return fmt.Errorf("MX Preference should be %q, but is %q", tt.Preference, x.Preference)
}
case *dns.NS:
tt := section[i].(*dns.NS)
if x.Ns != tt.Ns {
t.Errorf("NS nameserver should be %q, but is %q", tt.Ns, x.Ns)
return false
return fmt.Errorf("NS nameserver should be %q, but is %q", tt.Ns, x.Ns)
}
case *dns.OPT:
tt := section[i].(*dns.OPT)
if x.UDPSize() != tt.UDPSize() {
t.Errorf("OPT UDPSize should be %d, but is %d", tt.UDPSize(), x.UDPSize())
return false
return fmt.Errorf("OPT UDPSize should be %d, but is %d", tt.UDPSize(), x.UDPSize())
}
if x.Do() != tt.Do() {
t.Errorf("OPT DO should be %t, but is %t", tt.Do(), x.Do())
return false
return fmt.Errorf("OPT DO should be %t, but is %t", tt.Do(), x.Do())
}
}
}
return true
return nil
}
// CNAMEOrder makes sure that CNAMES do not appear after their target records
func CNAMEOrder(t *testing.T, res *dns.Msg) {
func CNAMEOrder(res *dns.Msg) error {
for i, c := range res.Answer {
if c.Header().Rrtype != dns.TypeCNAME {
continue
@@ -284,38 +258,29 @@ func CNAMEOrder(t *testing.T, res *dns.Msg) {
if a.Header().Name != c.(*dns.CNAME).Target {
continue
}
t.Errorf("CNAME found after target record\n")
t.Logf("%v\n", res)
return fmt.Errorf("CNAME found after target record")
}
}
return nil
}
// SortAndCheck sorts resp and the checks the header and three sections against the testcase in tc.
func SortAndCheck(t *testing.T, resp *dns.Msg, tc Case) {
func SortAndCheck(resp *dns.Msg, tc Case) error {
sort.Sort(RRSet(resp.Answer))
sort.Sort(RRSet(resp.Ns))
sort.Sort(RRSet(resp.Extra))
if !Header(t, tc, resp) {
t.Logf("%v\n", resp)
return
if err := Header(tc, resp); err != nil {
return err
}
if !Section(t, tc, Answer, resp.Answer) {
t.Logf("%v\n", resp)
return
if err := Section(tc, Answer, resp.Answer); err != nil {
return err
}
if !Section(t, tc, Ns, resp.Ns) {
t.Logf("%v\n", resp)
return
if err := Section(tc, Ns, resp.Ns); err != nil {
return err
}
if !Section(t, tc, Extra, resp.Extra) {
t.Logf("%v\n", resp)
return
}
return
return Section(tc, Extra, resp.Extra)
}
// ErrorHandler returns a Handler that returns ServerFailure error when called.

View File

@@ -10,7 +10,8 @@ import (
// remote will always be 10.240.0.1 and port 40212. The local address is always 127.0.0.1 and
// port 53.
type ResponseWriter struct {
TCP bool // if TCP is true we return an TCP connection instead of an UDP one.
TCP bool // if TCP is true we return an TCP connection instead of an UDP one.
RemoteIP string
}
// LocalAddr returns the local address, 127.0.0.1:53 (UDP, TCP if t.TCP is true).
@@ -23,9 +24,13 @@ func (t *ResponseWriter) LocalAddr() net.Addr {
return &net.UDPAddr{IP: ip, Port: port, Zone: ""}
}
// RemoteAddr returns the remote address, always 10.240.0.1:40212 (UDP, TCP is t.TCP is true).
// RemoteAddr returns the remote address, defaults to 10.240.0.1:40212 (UDP, TCP is t.TCP is true).
func (t *ResponseWriter) RemoteAddr() net.Addr {
ip := net.ParseIP("10.240.0.1")
remoteIP := "10.240.0.1"
if t.RemoteIP != "" {
remoteIP = t.RemoteIP
}
ip := net.ParseIP(remoteIP)
port := 40212
if t.TCP {
return &net.TCPAddr{IP: ip, Port: port, Zone: ""}
@@ -46,10 +51,10 @@ func (t *ResponseWriter) Close() error { return nil }
func (t *ResponseWriter) TsigStatus() error { return nil }
// TsigTimersOnly implement dns.ResponseWriter interface.
func (t *ResponseWriter) TsigTimersOnly(bool) { return }
func (t *ResponseWriter) TsigTimersOnly(bool) {}
// Hijack implement dns.ResponseWriter interface.
func (t *ResponseWriter) Hijack() { return }
func (t *ResponseWriter) Hijack() {}
// ResponseWriter6 returns fixed client and remote address in IPv6. The remote
// address is always fe80::42:ff:feca:4c65 and port 40212. The local address is always ::1 and port 53.

265
vendor/github.com/coredns/coredns/plugin/test/scrape.go generated vendored Normal file
View File

@@ -0,0 +1,265 @@
// Adapted by Miek Gieben for CoreDNS testing.
//
// License from prom2json
// Copyright 2014 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package test will scrape a target and you can inspect the variables.
// Basic usage:
//
// result := Scrape("http://localhost:9153/metrics")
// v := MetricValue("coredns_cache_capacity", result)
//
package test
import (
"fmt"
"io"
"mime"
"net/http"
"strconv"
"github.com/matttproud/golang_protobuf_extensions/pbutil"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
)
type (
// MetricFamily holds a prometheus metric.
MetricFamily struct {
Name string `json:"name"`
Help string `json:"help"`
Type string `json:"type"`
Metrics []interface{} `json:"metrics,omitempty"` // Either metric or summary.
}
// metric is for all "single value" metrics.
metric struct {
Labels map[string]string `json:"labels,omitempty"`
Value string `json:"value"`
}
summary struct {
Labels map[string]string `json:"labels,omitempty"`
Quantiles map[string]string `json:"quantiles,omitempty"`
Count string `json:"count"`
Sum string `json:"sum"`
}
histogram struct {
Labels map[string]string `json:"labels,omitempty"`
Buckets map[string]string `json:"buckets,omitempty"`
Count string `json:"count"`
Sum string `json:"sum"`
}
)
// Scrape returns the all the vars a []*metricFamily.
func Scrape(url string) []*MetricFamily {
mfChan := make(chan *dto.MetricFamily, 1024)
go fetchMetricFamilies(url, mfChan)
result := []*MetricFamily{}
for mf := range mfChan {
result = append(result, newMetricFamily(mf))
}
return result
}
// ScrapeMetricAsInt provide a sum of all metrics collected for the name and label provided.
// if the metric is not a numeric value, it will be counted a 0.
func ScrapeMetricAsInt(addr string, name string, label string, nometricvalue int) int {
valueToInt := func(m metric) int {
v := m.Value
r, err := strconv.Atoi(v)
if err != nil {
return 0
}
return r
}
met := Scrape(fmt.Sprintf("http://%s/metrics", addr))
found := false
tot := 0
for _, mf := range met {
if mf.Name == name {
// Sum all metrics available
for _, m := range mf.Metrics {
if label == "" {
tot += valueToInt(m.(metric))
found = true
continue
}
for _, v := range m.(metric).Labels {
if v == label {
tot += valueToInt(m.(metric))
found = true
}
}
}
}
}
if !found {
return nometricvalue
}
return tot
}
// MetricValue returns the value associated with name as a string as well as the labels.
// It only returns the first metrics of the slice.
func MetricValue(name string, mfs []*MetricFamily) (string, map[string]string) {
for _, mf := range mfs {
if mf.Name == name {
// Only works with Gauge and Counter...
return mf.Metrics[0].(metric).Value, mf.Metrics[0].(metric).Labels
}
}
return "", nil
}
// MetricValueLabel returns the value for name *and* label *value*.
func MetricValueLabel(name, label string, mfs []*MetricFamily) (string, map[string]string) {
// bit hacky is this really handy...?
for _, mf := range mfs {
if mf.Name == name {
for _, m := range mf.Metrics {
for _, v := range m.(metric).Labels {
if v == label {
return m.(metric).Value, m.(metric).Labels
}
}
}
}
}
return "", nil
}
func newMetricFamily(dtoMF *dto.MetricFamily) *MetricFamily {
mf := &MetricFamily{
Name: dtoMF.GetName(),
Help: dtoMF.GetHelp(),
Type: dtoMF.GetType().String(),
Metrics: make([]interface{}, len(dtoMF.Metric)),
}
for i, m := range dtoMF.Metric {
if dtoMF.GetType() == dto.MetricType_SUMMARY {
mf.Metrics[i] = summary{
Labels: makeLabels(m),
Quantiles: makeQuantiles(m),
Count: fmt.Sprint(m.GetSummary().GetSampleCount()),
Sum: fmt.Sprint(m.GetSummary().GetSampleSum()),
}
} else if dtoMF.GetType() == dto.MetricType_HISTOGRAM {
mf.Metrics[i] = histogram{
Labels: makeLabels(m),
Buckets: makeBuckets(m),
Count: fmt.Sprint(m.GetHistogram().GetSampleCount()),
Sum: fmt.Sprint(m.GetSummary().GetSampleSum()),
}
} else {
mf.Metrics[i] = metric{
Labels: makeLabels(m),
Value: fmt.Sprint(value(m)),
}
}
}
return mf
}
func value(m *dto.Metric) float64 {
if m.Gauge != nil {
return m.GetGauge().GetValue()
}
if m.Counter != nil {
return m.GetCounter().GetValue()
}
if m.Untyped != nil {
return m.GetUntyped().GetValue()
}
return 0.
}
func makeLabels(m *dto.Metric) map[string]string {
result := map[string]string{}
for _, lp := range m.Label {
result[lp.GetName()] = lp.GetValue()
}
return result
}
func makeQuantiles(m *dto.Metric) map[string]string {
result := map[string]string{}
for _, q := range m.GetSummary().Quantile {
result[fmt.Sprint(q.GetQuantile())] = fmt.Sprint(q.GetValue())
}
return result
}
func makeBuckets(m *dto.Metric) map[string]string {
result := map[string]string{}
for _, b := range m.GetHistogram().Bucket {
result[fmt.Sprint(b.GetUpperBound())] = fmt.Sprint(b.GetCumulativeCount())
}
return result
}
func fetchMetricFamilies(url string, ch chan<- *dto.MetricFamily) {
defer close(ch)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return
}
req.Header.Add("Accept", acceptHeader)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return
}
mediatype, params, err := mime.ParseMediaType(resp.Header.Get("Content-Type"))
if err == nil && mediatype == "application/vnd.google.protobuf" &&
params["encoding"] == "delimited" &&
params["proto"] == "io.prometheus.client.MetricFamily" {
for {
mf := &dto.MetricFamily{}
if _, err = pbutil.ReadDelimited(resp.Body, mf); err != nil {
if err == io.EOF {
break
}
return
}
ch <- mf
}
} else {
// We could do further content-type checks here, but the
// fallback for now will anyway be the text format
// version 0.0.4, so just go for it and see if it works.
var parser expfmt.TextParser
metricFamilies, err := parser.TextToMetricFamilies(resp.Body)
if err != nil {
return
}
for _, mf := range metricFamilies {
ch <- mf
}
}
}
const acceptHeader = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.7,text/plain;version=0.0.4;q=0.3`

View File

@@ -1,52 +0,0 @@
package test
import (
"net"
"sync"
"time"
"github.com/miekg/dns"
)
// TCPServer starts a DNS server with a TCP listener on laddr.
func TCPServer(laddr string) (*dns.Server, string, error) {
l, err := net.Listen("tcp", laddr)
if err != nil {
return nil, "", err
}
server := &dns.Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
waitLock := sync.Mutex{}
waitLock.Lock()
server.NotifyStartedFunc = func() { waitLock.Unlock() }
go func() {
server.ActivateAndServe()
l.Close()
}()
waitLock.Lock()
return server, l.Addr().String(), nil
}
// UDPServer starts a DNS server with an UDP listener on laddr.
func UDPServer(laddr string) (*dns.Server, string, error) {
pc, err := net.ListenPacket("udp", laddr)
if err != nil {
return nil, "", err
}
server := &dns.Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour}
waitLock := sync.Mutex{}
waitLock.Lock()
server.NotifyStartedFunc = func() { waitLock.Unlock() }
go func() {
server.ActivateAndServe()
pc.Close()
}()
waitLock.Lock()
return server, pc.LocalAddr().String(), nil
}

31
vendor/github.com/coredns/coredns/request/edns0.go generated vendored Normal file
View File

@@ -0,0 +1,31 @@
package request
import (
"github.com/coredns/coredns/plugin/pkg/edns"
"github.com/miekg/dns"
)
func supportedOptions(o []dns.EDNS0) []dns.EDNS0 {
var supported = make([]dns.EDNS0, 0, 3)
// For as long as possible try avoid looking up in the map, because that need an Rlock.
for _, opt := range o {
switch code := opt.Option(); code {
case dns.EDNS0NSID:
fallthrough
case dns.EDNS0EXPIRE:
fallthrough
case dns.EDNS0COOKIE:
fallthrough
case dns.EDNS0TCPKEEPALIVE:
fallthrough
case dns.EDNS0PADDING:
supported = append(supported, opt)
default:
if edns.SupportedOption(code) {
supported = append(supported, opt)
}
}
}
return supported
}

View File

@@ -2,7 +2,6 @@
package request
import (
"context"
"net"
"strings"
@@ -19,18 +18,16 @@ type Request struct {
// Optional lowercased zone of this query.
Zone string
Context context.Context
// Cache size after first call to Size or Do.
size int
do *bool // nil: nothing, otherwise *do value
// TODO(miek): opt record itself as well?
// Cache size after first call to Size or Do. If size is zero nothing has been cached yet.
// Both Size and Do set these values (and cache them).
size uint16 // UDP buffer size, or 64K in case of TCP.
do bool // DNSSEC OK value
// Caches
family int8 // transport's family.
name string // lowercase qname.
ip string // client's ip.
port string // client's port.
family int // transport's family.
localPort string // server's port.
localIP string // server's ip.
}
@@ -114,15 +111,11 @@ func (r *Request) RemoteAddr() string { return r.W.RemoteAddr().String() }
func (r *Request) LocalAddr() string { return r.W.LocalAddr().String() }
// Proto gets the protocol used as the transport. This will be udp or tcp.
func (r *Request) Proto() string { return Proto(r.W) }
// Proto gets the protocol used as the transport. This will be udp or tcp.
func Proto(w dns.ResponseWriter) string {
// FIXME(miek): why not a method on Request
if _, ok := w.RemoteAddr().(*net.UDPAddr); ok {
func (r *Request) Proto() string {
if _, ok := r.W.RemoteAddr().(*net.UDPAddr); ok {
return "udp"
}
if _, ok := w.RemoteAddr().(*net.TCPAddr); ok {
if _, ok := r.W.RemoteAddr().(*net.TCPAddr); ok {
return "tcp"
}
return "udp"
@@ -131,7 +124,7 @@ func Proto(w dns.ResponseWriter) string {
// Family returns the family of the transport, 1 for IPv4 and 2 for IPv6.
func (r *Request) Family() int {
if r.family != 0 {
return r.family
return int(r.family)
}
var a net.IP
@@ -145,26 +138,20 @@ func (r *Request) Family() int {
if a.To4() != nil {
r.family = 1
return r.family
return 1
}
r.family = 2
return r.family
return 2
}
// Do returns if the request has the DO (DNSSEC OK) bit set.
func (r *Request) Do() bool {
if r.do != nil {
return *r.do
if r.size != 0 {
return r.do
}
r.do = new(bool)
if o := r.Req.IsEdns0(); o != nil {
*r.do = o.Do()
return *r.do
}
*r.do = false
return false
r.Size()
return r.do
}
// Len returns the length in bytes in the request.
@@ -174,33 +161,29 @@ func (r *Request) Len() int { return r.Req.Len() }
// Or when the request was over TCP, we return the maximum allowed size of 64K.
func (r *Request) Size() int {
if r.size != 0 {
return r.size
return int(r.size)
}
size := 0
size := uint16(0)
if o := r.Req.IsEdns0(); o != nil {
if r.do == nil {
r.do = new(bool)
}
*r.do = o.Do()
size = int(o.UDPSize())
r.do = o.Do()
size = o.UDPSize()
}
// normalize size
size = edns.Size(r.Proto(), size)
r.size = size
return size
return int(size)
}
// SizeAndDo adds an OPT record that the reflects the intent from request.
// The returned bool indicated if an record was found and normalised.
// The returned bool indicates if an record was found and normalised.
func (r *Request) SizeAndDo(m *dns.Msg) bool {
o := r.Req.IsEdns0() // TODO(miek): speed this up
o := r.Req.IsEdns0()
if o == nil {
return false
}
odo := o.Do()
if mo := m.IsEdns0(); mo != nil {
mo.Hdr.Name = "."
mo.Hdr.Rrtype = dns.TypeOPT
@@ -208,130 +191,59 @@ func (r *Request) SizeAndDo(m *dns.Msg) bool {
mo.SetUDPSize(o.UDPSize())
mo.Hdr.Ttl &= 0xff00 // clear flags
if odo {
// Assume if the message m has options set, they are OK and represent what an upstream can do.
if o.Do() {
mo.SetDo()
}
return true
}
// Reuse the request's OPT record and tack it to m.
o.Hdr.Name = "."
o.Hdr.Rrtype = dns.TypeOPT
o.SetVersion(0)
o.Hdr.Ttl &= 0xff00 // clear flags
if odo {
o.SetDo()
if len(o.Option) > 0 {
o.Option = supportedOptions(o.Option)
}
m.Extra = append(m.Extra, o)
return true
}
// Result is the result of Scrub.
type Result int
const (
// ScrubIgnored is returned when Scrub did nothing to the message.
ScrubIgnored Result = iota
// ScrubExtra is returned when the reply has been scrubbed by removing RRs from the additional section.
ScrubExtra
// ScrubAnswer is returned when the reply has been scrubbed by removing RRs from the answer section.
ScrubAnswer
)
// Scrub scrubs the reply message so that it will fit the client's buffer. It will first
// check if the reply fits without compression and then *with* compression.
// Scrub will then use binary search to find a save cut off point in the additional section.
// If even *without* the additional section the reply still doesn't fit we
// repeat this process for the answer section. If we scrub the answer section
// we set the TC bit on the reply; indicating the client should retry over TCP.
// Note, the TC bit will be set regardless of protocol, even TCP message will
// get the bit, the client should then retry with pigeons.
func (r *Request) Scrub(reply *dns.Msg) (*dns.Msg, Result) {
size := r.Size()
func (r *Request) Scrub(reply *dns.Msg) *dns.Msg {
reply.Truncate(r.Size())
reply.Compress = false
rl := reply.Len()
if size >= rl {
return reply, ScrubIgnored
if reply.Compress {
return reply
}
reply.Compress = true
rl = reply.Len()
if size >= rl {
return reply, ScrubIgnored
}
// Account for the OPT record that gets added in SizeAndDo(), subtract that length.
sub := 0
if r.Do() {
sub = optLen
}
origExtra := reply.Extra
re := len(reply.Extra) - sub
l, m := 0, 0
for l < re {
m = (l + re) / 2
reply.Extra = origExtra[:m]
rl = reply.Len()
if rl < size {
l = m + 1
continue
if r.Proto() == "udp" {
rl := reply.Len()
// Last ditch attempt to avoid fragmentation, if the size is bigger than the v4/v6 UDP fragmentation
// limit and sent via UDP compress it (in the hope we go under that limit). Limits taken from NSD:
//
// .., 1480 (EDNS/IPv4), 1220 (EDNS/IPv6), or the advertised EDNS buffer size if that is
// smaller than the EDNS default.
// See: https://open.nlnetlabs.nl/pipermail/nsd-users/2011-November/001278.html
if rl > 1480 && r.Family() == 1 {
reply.Compress = true
}
if rl > size {
re = m - 1
continue
}
if rl == size {
break
if rl > 1220 && r.Family() == 2 {
reply.Compress = true
}
}
// We may come out of this loop with one rotation too many, m makes it too large, but m-1 works.
if rl > size && m > 0 {
reply.Extra = origExtra[:m-1]
rl = reply.Len()
}
if rl < size {
r.SizeAndDo(reply)
return reply, ScrubExtra
}
origAnswer := reply.Answer
ra := len(reply.Answer)
l, m = 0, 0
for l < ra {
m = (l + ra) / 2
reply.Answer = origAnswer[:m]
rl = reply.Len()
if rl < size {
l = m + 1
continue
}
if rl > size {
ra = m - 1
continue
}
if rl == size {
break
}
}
// We may come out of this loop with one rotation too many, m makes it too large, but m-1 works.
if rl > size && m > 0 {
reply.Answer = origAnswer[:m-1]
// No need to recalc length, as we don't use it. We set truncated anyway. Doing
// this extra m-1 step does make it fit in the client's buffer however.
}
// It now fits, but Truncated. We can't call sizeAndDo() because that adds a new record (OPT)
// in the additional section.
reply.Truncated = true
return reply, ScrubAnswer
return reply
}
// Type returns the type of the question as a string. If the request is malformed
// the empty string is returned.
// Type returns the type of the question as a string. If the request is malformed the empty string is returned.
func (r *Request) Type() string {
if r.Req == nil {
return ""
@@ -418,14 +330,6 @@ func (r *Request) QClass() uint16 {
}
// ErrorMessage returns an error message suitable for sending
// back to the client.
func (r *Request) ErrorMessage(rcode int) *dns.Msg {
m := new(dns.Msg)
m.SetRcode(r.Req, rcode)
return m
}
// Clear clears all caching from Request s.
func (r *Request) Clear() {
r.name = ""
@@ -443,7 +347,7 @@ func (r *Request) Match(reply *dns.Msg) bool {
return false
}
if reply.Response == false {
if !reply.Response {
return false
}
@@ -457,5 +361,3 @@ func (r *Request) Match(reply *dns.Msg) bool {
return true
}
const optLen = 12 // OPT record length.

21
vendor/github.com/coredns/coredns/request/writer.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
package request
import "github.com/miekg/dns"
// ScrubWriter will, when writing the message, call scrub to make it fit the client's buffer.
type ScrubWriter struct {
dns.ResponseWriter
req *dns.Msg // original request
}
// NewScrubWriter returns a new and initialized ScrubWriter.
func NewScrubWriter(req *dns.Msg, w dns.ResponseWriter) *ScrubWriter { return &ScrubWriter{w, req} }
// WriteMsg overrides the default implementation of the underlying dns.ResponseWriter and calls
// scrub on the message m and will then write it to the client.
func (s *ScrubWriter) WriteMsg(m *dns.Msg) error {
state := Request{Req: s.req, W: s.ResponseWriter}
state.SizeAndDo(m)
state.Scrub(m)
return s.ResponseWriter.WriteMsg(m)
}