TUN-528: Move cloudflared into a separate repo

This commit is contained in:
Areg Harutyunyan
2018-05-01 18:45:06 -05:00
parent e8c621a648
commit d06fc520c7
4726 changed files with 1763680 additions and 0 deletions

8
vendor/github.com/coredns/coredns/.codecov.yml generated vendored Normal file
View File

@@ -0,0 +1,8 @@
coverage:
status:
project:
default:
target: 50%
threshold: null
patch: false
changes: false

12
vendor/github.com/coredns/coredns/.dockerignore generated vendored Normal file
View File

@@ -0,0 +1,12 @@
.git/*
core/*
coremain/*
hooks/*
man/*
pb/*
plugin/*
request/*
test/*
vendor/*
build/*
release/*

View File

@@ -0,0 +1,10 @@
<!--
Thank you for contributing to CoreDNS!
Please provide the following information to help us make the most of your pull request:
-->
### 1. Why is this pull request needed and what does it do?
### 2. Which issues (if any) are related?
### 3. Which documentation changes (if any) need to be made?

19
vendor/github.com/coredns/coredns/.github/stale.yml generated vendored Normal file
View File

@@ -0,0 +1,19 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 100
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 21
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
- later
- bug
# Label to use when marking an issue as stale
staleLabel: wontfix-stalebot
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

12
vendor/github.com/coredns/coredns/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,12 @@
query.log
Corefile
*.swp
coredns
coredns.exe
coredns.exe~
debug
debug.test
kubectl
go-test-tmpfile*
coverage.txt
.idea

8
vendor/github.com/coredns/coredns/.presubmit/context generated vendored Executable file
View File

@@ -0,0 +1,8 @@
#!/bin/bash
echo "** presubmit/$(basename $0)"
if grep -lr "golang.org/x/net/context" "$@"; then
echo "** presubmit/$(basename $0): please use std lib's 'context' instead"
exit 1
fi

View File

@@ -0,0 +1,9 @@
#!/bin/bash
echo "** presubmit/$(basename $0)"
for dir in "$@"; do
if find $dir | grep '-'; then
echo "** presubmit/$(basename $0): please use an underscore in filenames instead of a hyphen"
fi
done

19
vendor/github.com/coredns/coredns/.presubmit/test-lowercase generated vendored Executable file
View File

@@ -0,0 +1,19 @@
#!/bin/bash
echo "** presubmit/$(basename $0)"
# Get the tests that call t.* without capitalizing the first char - seems we standardized on that.
if egrep -r '\bt\.Fatal.?\("[a-z]' "$@"; then
echo "** presubmit/$(basename $0): please start with an upper case letter when using t.Fatal*()"
exit 1
fi
if egrep -r '\bt\.Error.?\("[a-z]' "$@"; then
echo "** presubmit/$(basename $0): please start with an upper case letter when using t.Error*()"
exit 1
fi
if egrep -r '\bt\.Log.?\("[a-z]' "$@"; then
echo "** presubmit/$(basename $0): please start with an upper case letter when using t.Log*()"
exit 1
fi

44
vendor/github.com/coredns/coredns/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,44 @@
sudo: required
# Trusty distribution is much faster when sudo is required
dist: trusty
services:
- docker
language: go
go:
- "1.10.x"
go_import_path: github.com/coredns/coredns
git:
depth: 3
env:
- TEST_TYPE=coverage ETCD_VERSION=2.3.1
- TEST_TYPE=integration ETCD_VERSION=2.3.1
- TEST_TYPE=core ETCD_VERSION=2.3.1
- TEST_TYPE=plugin ETCD_VERSION=2.3.1
# In the Travis VM-based build environment, IPv6 networking is not
# enabled by default. The sysctl operations below enable IPv6.
# IPv6 is needed by some of the CoreDNS test cases. The VM environment
# is needed to have access to sudo in the test environment. Sudo is
# needed to have docker in the test environment.
before_install:
- cat /proc/net/if_inet6
- uname -a
- sudo bash -c 'if [ `cat /proc/net/if_inet6 | wc -l` = "0" ]; then echo "Enabling IPv6" ; sysctl net.ipv6.conf.all.disable_ipv6=0 ; sysctl net.ipv6.conf.default.disable_ipv6=0 ; sysctl net.ipv6.conf.lo.disable_ipv6=0 ; fi'
- cat /proc/net/if_inet6
- env
before_script:
- docker run -d --net=host --name=etcd quay.io/coreos/etcd:v$ETCD_VERSION
script:
- make TEST_TYPE=$TEST_TYPE travis
after_success:
- bash <(curl -s https://codecov.io/bash)

9
vendor/github.com/coredns/coredns/ADOPTERS.md generated vendored Normal file
View File

@@ -0,0 +1,9 @@
* [Infoblox](https://www.infoblox.com) uses CoreDNS in its Active Trust Cloud SaaS service, as well as for Kubernetes cluster DNS.
* [Sky Betting & Gaming](http://engineering.skybettingandgaming.com) uses CoreDNS for Kubernetes cluster DNS.
* [Kismia](https://kismia.com) uses CoreDNS for Kubernetes cluster DNS.
* [Admiral](https://getadmiral.com) uses CoreDNS to handle geographic DNS requests for our public-facing microservices.
* [Qunar](https://qunar.com) uses CoreDNS for service discovery of its GPU machine learning cloud with TensorFlow and Kubernetes.
* [seansean2](https://web.mit.edu) uses CoreDNS in production at MIT for DNS.
* [Tradeshift](https://tradeshift.com/) uses CoreDNS to look up company identifiers across multiple shards/regions/zones
* [SoundCloud](https://soundcloud.com/) uses CoreDNS as internal cache+proxy in Kubernetes clusters to handle hundreds of thousands DNS service discovery requests per second.
* [Z Lab](https://zlab.co.jp) uses CoreDNS in production combination with Consul and Kuberenetes Clusters.

43
vendor/github.com/coredns/coredns/CODE-OF-CONDUCT.md generated vendored Normal file
View File

@@ -0,0 +1,43 @@
CNCF Community Code of Conduct
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of fostering
an open and welcoming community, we pledge to respect all people who contribute
through reporting issues, posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free experience for
everyone, regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic addresses,
without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are not
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
commit themselves to fairly and consistently applying these principles to every aspect
of managing this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a CNCF project maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 1.2.0, available at
http://contributor-covenant.org/version/1/2/0/
### CNCF Events Code of Conduct
CNCF events are governed by the Linux Foundation [Code of Conduct](http://events.linuxfoundation.org/events/cloudnativecon/attend/code-of-conduct) available on the event page. This is designed to be compatible with the above policy and also includes more details on responding to incidents.

116
vendor/github.com/coredns/coredns/CONTRIBUTING.md generated vendored Normal file
View File

@@ -0,0 +1,116 @@
# Contributing to CoreDNS
Welcome! Our community focuses on helping others and making CoreDNS the best it
can be. We gladly accept contributions and encourage you to get involved!
## Bug reports
First, please [search this repository](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93)
with a variety of keywords to ensure your bug is not already reported.
If not, [open an issue](https://github.com/coredns/coredns/issues) and answer the
questions so we can understand and reproduce the problematic behavior.
The burden is on you to convince us that it is actually a bug in CoreDNS. This is
easiest to do when you write clear, concise instructions so we can reproduce
the behavior (even if it seems obvious). The more detailed and specific you are,
the faster we will be able to help you. Check out
[How to Report Bugs Effectively](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html).
Please be kind. :smile: Remember that CoreDNS comes at no cost to you, and you're
getting free help.
We are using [Stable Bot](https://github.com/probot/stale) to close inactive issues. If you feel an
issue should not be closed you can add the "pinned" label to it or just update the bug every now and
then.
## Minor improvements and new tests
Submit [pull requests](https://github.com/coredns/coredns/pulls) at any time. Make
sure to write tests to assert your change is working properly and is thoroughly
covered.
## Proposals, suggestions, ideas, new features
First, please [search](https://github.com/coredns/coredns/search?q=&type=Issues&utf8=%E2%9C%93)
with a variety of keywords to ensure your suggestion/proposal is new.
If so, you may open either an issue or a pull request for discussion and
feedback.
The advantage of issues is that you don't have to spend time actually
implementing your idea, but you should still describe it thoroughly. The
advantage of a pull request is that we can immediately see the impact the change
will have on the project, what the code will look like, and how to improve it.
The disadvantage of pull requests is that they are unlikely to get accepted
without significant changes, or it may be rejected entirely. Don't worry, that
won't happen without an open discussion first.
If you are going to spend significant time implementing code for a pull request,
best to open an issue first and "claim" it and get feedback before you invest
a lot of time.
## Vulnerabilities
If you've found a vulnerability that is serious, please email: <team@coredns.io>.
If it's not a big deal, a pull request will probably be faster.
## Git Hook
We use `golint` and `go vet` as tools to warn use about things (noted golint is obnoxious sometimes,
but still helpful). Add the following script as a git `post-commit` in `.git/hooks/post-commit` and
make it executable.
~~~ sh
#!/bin/bash
# <https://git-scm.com/docs/githooks>:
# The script takes no parameters and its exit status does not affect the commit in any way. You can
# use git # rev-parse HEAD to get the new commits SHA1 hash, or you can use git log -l HEAD to get
# all of its # information.
for d in *; do
if [[ "$d" == "vendor" ]]; then
continue
fi
if [[ "$d" == "logo" ]]; then
continue
fi
if [[ ! -d "$d" ]]; then
continue
fi
golint "$d"/...
done
~~~
## Updating Dependencies
We use Golang's [`dep`](https://github.com/golang/dep) as the tool to manage vendor dependencies.
The tool could be obtained through:
```sh
$ go get -u github.com/golang/dep/cmd/dep
```
Use the following to update the locked versions of all dependencies
```sh
$ dep ensure -update
```
To add a dependency to the project, you might run
```sh
$ dep ensure github.com/pkg/errors
```
After the dependencies have been updated or added, you might run the following to
prune vendored packages:
```sh
$ dep prune
```
Please refer to Golang's [`dep`](https://github.com/golang/dep) for more details.
# Thank you
Thanks for your help! CoreDNS would not be what it is today without your contributions.

12
vendor/github.com/coredns/coredns/Dockerfile generated vendored Normal file
View File

@@ -0,0 +1,12 @@
FROM debian:stable-slim
RUN apt-get update && apt-get -uy upgrade
RUN apt-get -y install ca-certificates && update-ca-certificates
FROM scratch
COPY --from=0 /etc/ssl/certs /etc/ssl/certs
ADD coredns /coredns
EXPOSE 53 53/udp
ENTRYPOINT ["/coredns"]

565
vendor/github.com/coredns/coredns/Gopkg.lock generated vendored Normal file
View File

@@ -0,0 +1,565 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/DataDog/dd-trace-go"
packages = [
"opentracing",
"tracer",
"tracer/ext"
]
revision = "27617015d45e6cd550b9a7ac7715c37cc2f7d020"
version = "v0.6.1"
[[projects]]
name = "github.com/Shopify/sarama"
packages = ["."]
revision = "f7be6aa2bc7b2e38edf816b08b582782194a1c02"
version = "v1.16.0"
[[projects]]
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
revision = "e59b73d3c2bf1c328ccb78e683c0462fa1a473c7"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = [
"aws",
"aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/sdkio",
"internal/sdkrand",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/route53",
"service/route53/route53iface",
"service/sts"
]
revision = "ee7b4b1162937cba700de23bd90acb742982e626"
version = "v1.13.50"
[[projects]]
name = "github.com/coreos/etcd"
packages = [
"client",
"pkg/pathutil",
"pkg/srv",
"pkg/types",
"version"
]
revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af"
version = "v3.3.5"
[[projects]]
name = "github.com/coreos/go-semver"
packages = ["semver"]
revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
version = "v0.2.0"
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/dnstap/golang-dnstap"
packages = ["."]
revision = "2cf77a2b5e11ac8d0ba3892772ac8e1f7b528344"
[[projects]]
name = "github.com/eapache/go-resiliency"
packages = ["breaker"]
revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/eapache/go-xerial-snappy"
packages = ["."]
revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
[[projects]]
name = "github.com/eapache/queue"
packages = ["."]
revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/farsightsec/golang-framestream"
packages = ["."]
revision = "c06a5734334d9629b3db143d74b47eb94ea68612"
[[projects]]
name = "github.com/ghodss/yaml"
packages = ["."]
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
version = "v1.0.0"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "6529cf7c58879c08d927016dde4477f18a0634cb"
version = "v1.36.0"
[[projects]]
name = "github.com/go-logfmt/logfmt"
packages = ["."]
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0"
[[projects]]
name = "github.com/gogo/protobuf"
packages = [
"proto",
"sortkeys"
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0"
[[projects]]
branch = "master"
name = "github.com/golang/glog"
packages = ["."]
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]]
name = "github.com/golang/protobuf"
packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/golang/snappy"
packages = ["."]
revision = "553a641470496b2327abcac10b36396bd98e45c9"
[[projects]]
branch = "master"
name = "github.com/google/gofuzz"
packages = ["."]
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
name = "github.com/googleapis/gnostic"
packages = [
"OpenAPIv2",
"compiler",
"extensions"
]
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0"
[[projects]]
branch = "master"
name = "github.com/grpc-ecosystem/grpc-opentracing"
packages = ["go/otgrpc"]
revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746"
[[projects]]
branch = "master"
name = "github.com/hashicorp/golang-lru"
packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]]
branch = "master"
name = "github.com/howeyc/gopass"
packages = ["."]
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
name = "github.com/imdario/mergo"
packages = ["."]
revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c"
version = "v0.3.4"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
[[projects]]
name = "github.com/json-iterator/go"
packages = ["."]
revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4"
version = "1.1.3"
[[projects]]
branch = "master"
name = "github.com/kr/logfmt"
packages = ["."]
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
[[projects]]
name = "github.com/matttproud/golang_protobuf_extensions"
packages = ["pbutil"]
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0"
[[projects]]
name = "github.com/modern-go/concurrent"
packages = ["."]
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
version = "1.0.3"
[[projects]]
name = "github.com/modern-go/reflect2"
packages = ["."]
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
version = "1.0.0"
[[projects]]
branch = "master"
name = "github.com/opentracing-contrib/go-observer"
packages = ["."]
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
[[projects]]
name = "github.com/opentracing/opentracing-go"
packages = [
".",
"ext",
"log"
]
revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38"
version = "v1.0.2"
[[projects]]
name = "github.com/openzipkin/zipkin-go-opentracing"
packages = [
".",
"flag",
"thrift/gen-go/scribe",
"thrift/gen-go/zipkincore",
"types",
"wire"
]
revision = "26cf9707480e6b90e5eff22cf0bbf05319154232"
version = "v0.3.4"
[[projects]]
name = "github.com/pierrec/lz4"
packages = ["."]
revision = "2fcda4cb7018ce05a25959d2fe08c83e3329f169"
version = "v1.1"
[[projects]]
name = "github.com/pierrec/xxHash"
packages = ["xxHash32"]
revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
version = "v0.1.1"
[[projects]]
branch = "master"
name = "github.com/prometheus/client_model"
packages = ["go"]
revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c"
[[projects]]
branch = "master"
name = "github.com/prometheus/common"
packages = [
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c"
[[projects]]
branch = "master"
name = "github.com/rcrowley/go-metrics"
packages = ["."]
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
[[projects]]
name = "github.com/spf13/pflag"
packages = ["."]
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
version = "v1.0.1"
[[projects]]
name = "github.com/ugorji/go"
packages = ["codec"]
revision = "f3cacc17c85ecb7f1b6a9e373ee85d1480919868"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
revision = "1a580b3eff7814fc9b40602fd35256c63b50f491"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = [
"context",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace"
]
revision = "2491c5de3490fced2f6cff376127c667efeed857"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = [
"unix",
"windows"
]
revision = "7c87d13f8e835d2fb3a70a2912c811ed0c1d241b"
[[projects]]
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/time"
packages = ["rate"]
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "7bb2a897381c9c5ab2aeb8614f758d7766af68ff"
[[projects]]
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"channelz",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "41344da2231b913fa3d983840a57a6b1b7b631a1"
version = "v1.12.0"
[[projects]]
name = "gopkg.in/inf.v0"
packages = ["."]
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
version = "v0.9.1"
[[projects]]
name = "gopkg.in/yaml.v2"
packages = ["."]
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
version = "v2.2.1"
[[projects]]
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "73d903622b7391f3312dcbac6483fed484e185f8"
[[projects]]
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/reflect"
]
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
[[projects]]
name = "k8s.io/client-go"
packages = [
"discovery",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/version",
"plugin/pkg/client/auth/exec",
"rest",
"rest/watch",
"tools/auth",
"tools/cache",
"tools/clientcmd",
"tools/clientcmd/api",
"tools/clientcmd/api/latest",
"tools/clientcmd/api/v1",
"tools/metrics",
"tools/pager",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/flowcontrol",
"util/homedir",
"util/integer",
"util/retry"
]
revision = "23781f4d6632d88e869066eaebb743857aa1ef9b"
version = "v7.0.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "dbbdcbcd4c0e11f040230e43a145f113ed7e67ff2c52b2a5830e117c16a23630"
solver-name = "gps-cdcl"
solver-version = 1

28
vendor/github.com/coredns/coredns/Gopkg.toml generated vendored Normal file
View File

@@ -0,0 +1,28 @@
ignored = [
"github.com/mholt/caddy",
"github.com/mholt/caddy/caddyfile",
"github.com/mholt/caddy/startupshutdown",
"github.com/mholt/caddy/onevent",
"github.com/miekg/dns",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
]
# client-go 6.0.0 uses apimachinery 180eddb345a5be3a157cea1c624700ad5bd27b8f
# and api 11147472b7c934c474a2c484af3c0c5210b7a3af (see Godep.json). go dep
# is unable to match Godep.json automatically so have to specify here.
[[constraint]]
name = "k8s.io/client-go"
version = "v7.0.0"
[[override]]
name = "k8s.io/apimachinery"
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
[[override]]
name = "k8s.io/api"
revision = "73d903622b7391f3312dcbac6483fed484e185f8"
[[override]]
name = "github.com/apache/thrift"
revision = "e59b73d3c2bf1c328ccb78e683c0462fa1a473c7"
[[override]]
name = "github.com/ugorji/go"
revision = "f3cacc17c85ecb7f1b6a9e373ee85d1480919868"

201
vendor/github.com/coredns/coredns/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

100
vendor/github.com/coredns/coredns/Makefile generated vendored Normal file
View File

@@ -0,0 +1,100 @@
# Makefile for building CoreDNS
GITCOMMIT:=$(shell git describe --dirty --always)
BINARY:=coredns
SYSTEM:=
CHECKS:=check godeps
VERBOSE:=-v
GOPATH?=$(HOME)/go
PRESUBMIT:=core coremain plugin test request
all: coredns
.PHONY: coredns
coredns: $(CHECKS)
CGO_ENABLED=0 $(SYSTEM) go build $(VERBOSE) -ldflags="-s -w -X github.com/coredns/coredns/coremain.GitCommit=$(GITCOMMIT)" -o $(BINARY)
.PHONY: check
check: presubmit goimports core/zplugin.go core/dnsserver/zdirectives.go godeps linter
.PHONY: test
test: check
go test -race $(VERBOSE) ./test ./plugin/...
.PHONY: testk8s
testk8s: check
go test -race $(VERBOSE) -tags=k8s -run 'TestKubernetes' ./test ./plugin/kubernetes/...
.PHONY: godeps
godeps:
# Not vendored so external plugin compile, avoiding:
# cannot use c (type *"github.com/mholt/caddy".Controller) as type
# *"github.com/coredns/coredns/vendor/github.com/mholt/caddy".Controller like errors.
(cd $(GOPATH)/src/github.com/mholt/caddy 2>/dev/null && git checkout -q master 2>/dev/null || true)
(cd $(GOPATH)/src/github.com/miekg/dns 2>/dev/null && git checkout -q master 2>/dev/null || true)
(cd $(GOPATH)/src/github.com/prometheus/client_golang 2>/dev/null && git checkout -q master 2>/dev/null || true)
go get -u github.com/mholt/caddy
go get -u github.com/miekg/dns
go get -u github.com/prometheus/client_golang/prometheus/promhttp
go get -u github.com/prometheus/client_golang/prometheus
(cd $(GOPATH)/src/github.com/mholt/caddy && git checkout -q v0.10.11)
(cd $(GOPATH)/src/github.com/miekg/dns && git checkout -q v1.0.8)
(cd $(GOPATH)/src/github.com/prometheus/client_golang && git checkout -q v0.8.0)
.PHONY: travis
travis: check
ifeq ($(TEST_TYPE),core)
( cd request ; go test -v -tags 'etcd' -race ./... )
( cd core ; go test -v -tags 'etcd' -race ./... )
( cd coremain go test -v -tags 'etcd' -race ./... )
endif
ifeq ($(TEST_TYPE),integration)
( cd test ; go test -v -tags 'etcd' -race ./... )
endif
ifeq ($(TEST_TYPE),plugin)
( cd plugin ; go test -v -tags 'etcd' -race ./... )
endif
ifeq ($(TEST_TYPE),coverage)
for d in `go list ./... | grep -v vendor`; do \
t=$$(date +%s); \
go test -i -tags 'etcd' -coverprofile=cover.out -covermode=atomic $$d || exit 1; \
go test -v -tags 'etcd' -coverprofile=cover.out -covermode=atomic $$d || exit 1; \
echo "Coverage test $$d took $$(($$(date +%s)-t)) seconds"; \
if [ -f cover.out ]; then \
cat cover.out >> coverage.txt; \
rm cover.out; \
fi; \
done
endif
core/zplugin.go core/dnsserver/zdirectives.go: plugin.cfg
go generate coredns.go
.PHONY: gen
gen:
go generate coredns.go
.PHONY: pb
pb:
$(MAKE) -C pb
.PHONY: linter
linter:
go get -u github.com/alecthomas/gometalinter
gometalinter --install golint
gometalinter --deadline=2m --disable-all --enable=gofmt --enable=golint --enable=vet --vendor --exclude=^pb/ ./...
.PHONY: goimports
goimports:
go get -u github.com/alecthomas/gometalinter
gometalinter --install goimports
( gometalinter --deadline=2m --disable-all --enable=goimports --vendor --exclude=^pb/ ./... || true )
# Presubmit runs all scripts in .presubmit; any non 0 exit code will fail the build.
.PHONY: presubmit
presubmit:
@for pre in $(PWD)/.presubmit/* ; do "$$pre" $(PRESUBMIT); done
.PHONY: clean
clean:
go clean
rm -f coredns

41
vendor/github.com/coredns/coredns/Makefile.doc generated vendored Normal file
View File

@@ -0,0 +1,41 @@
ORG:=organization=CoreDNS
RONN:=ronn -r
PLUGINS:=$(wildcard plugin/*/README.md)
READMES:=$(subst plugin/,,$(PLUGINS))
READMES:=$(subst /README.md,,$(READMES))
PLUGINS:=$(subst plugin/,coredns-,$(PLUGINS))
PLUGINS:=$(subst /README.md,(7),$(PLUGINS))
ifeq (, $(shell which ronn))
$(warning "No ronn in $$PATH, exiting")
all:
@echo "noop"
else
all: man/coredns.1 man/corefile.5 plugins
endif
man/coredns.1: coredns.1.md
sed -e 's/^\(#.*\)/\U\1/' $< > $@.md
sed -i -e "s/@@PLUGINS@@/$(PLUGINS)/" $@.md
$(RONN) --$(ORG) --manual='CoreDNS' $@.md
rm $@.md
man/corefile.5: corefile.5.md
sed -e 's/^\(#.*\)/\U\1/' $< > $@.md
$(RONN) --$(ORG) --manual='CoreDNS' $@.md
rm $@.md
.PHONY: plugins
plugins:
for README in $(READMES); do \
$(MAKE) -f Makefile.doc man/coredns-$$README.7; \
done
man/coredns-%.7: plugin/%/README.md
sed -e 's/^\(#.*\)/\U\1/' $< > $@.md
$(RONN) --$(ORG) --manual='CoreDNS plugins' $@.md
rm $@.md
PHONY: clean
clean:
rm -f man/*

33
vendor/github.com/coredns/coredns/Makefile.fuzz generated vendored Normal file
View File

@@ -0,0 +1,33 @@
# Makefile for fuzzing
#
# Use go-fuzz and needs the tools installed. For each fuzz.go in a plugin's directory
# you can start the fuzzing with: make -f Makefile.fuzz <plugin>
# e.g.
#
# make -f Makefile.fuzz proxy
#
# Each plugin that wants to join the fuzzing fray only needs to add a fuzz.go that calls
# the plugins's ServeDNS and used the plugin/pkg/fuzz for the Do function.
#
# Installing go-fuzz
#$ go get github.com/dvyukov/go-fuzz/go-fuzz
#$ go get github.com/dvyukov/go-fuzz/go-fuzz-build
REPO:="github.com/coredns/coredns/plugin"
FUZZ:=$(dir $(wildcard plugin/*/fuzz.go)) # plugin/cache/
PLUGINS:=$(foreach f,$(FUZZ),$(subst plugin, ,$(f:/=))) # > /cache
PLUGINS:=$(foreach f,$(PLUGINS),$(subst /, ,$(f))) # > cache
.PHONY: echo
echo:
@echo fuzz targets: $(PLUGINS)
.PHONY: $(PLUGINS)
$(PLUGINS): echo
go-fuzz-build -tags fuzz $(REPO)/$(@)
go-fuzz -bin=./$(@)-fuzz.zip -workdir=fuzz/$(@)
.PHONY: clean
clean:
rm *-fuzz.zip

151
vendor/github.com/coredns/coredns/Makefile.release generated vendored Normal file
View File

@@ -0,0 +1,151 @@
# Makefile for releasing CoreDNS
#
# The release is controlled from coremain/version.go. The version found there is
# used to tag the git repo and to build the assets that are uploaded to github
# (after some sanity checks).
#
# The release should be accompanied by release notes published on coredns.io.
# For example: https://coredns.io/2016/09/18/coredns-001-release/ Also send an
# email to coredns-discuss@ to announce the new version.
#
# We use https://github.com/progrium/gh-release to automate github stuff be sure
# to have that binary in your path.
#
# Get a list of authors for this release with:
#
# git log --pretty=format:'%an' v001..master | sort -u (where v001 is the
# previous release, obviously you'll need to adjust this)
#
# Steps:
#
# * Get an access token: https://help.github.com/articles/creating-an-access-token-for-command-line-use/
# * export GITHUB_ACCESS_TOKEN=<token>
# * Up the version in coremain/version.go
# * Run: make DOCKER=coredns -f Makefile.release release
# * runs make -f Makefile.doc
# * this is a noop if ronn can't be found
# * perform a go generate
# * will *commit* your change(s) with 'Release $VERSION'
# * push to github
# * build the release and do all that fluff.
#
# Steps for docker:
#
# * Login into docker: docker login (should have push creds for coredns registry)
# * We use the manifest-tool from https://github.com/estesp/manifest-tool to build the manifest list
# * Make sure you have the binary in your path.
#
# * Run: make DOCKER=coredns -f Makefile.release docker
#
# Docker push should happen after you make the new release and uploaded it to Github.
#
# If you want to push to a personal registry, point DOCKER to something else, i.e.
#
# * make DOCKER=miek -f Makefile.release docker
EMPTY:=
SPACE:=$(EMPTY) $(EMPTY)
COMMA:=$(EMPTY),$(EMPTY)
ifeq (, $(shell which gh-release))
$(error "No gh-release in $$PATH, install with: go get github.com/progrium/gh-release")
endif
ifeq (, $(shell which manifest-tool))
$(error "No manifest-tool in $$PATH, install with: go get github.com/estesp/manifest-tool")
endif
DOCKER:=
NAME:=coredns
VERSION:=$(shell grep 'CoreVersion' coremain/version.go | awk '{ print $$3 }' | tr -d '"')
GITHUB:=coredns
DOCKER_IMAGE_NAME:=$(DOCKER)/$(NAME)
GITCOMMIT:=$(shell git describe --dirty --always)
LINUX_ARCH:=amd64 arm arm64 ppc64le s390x
PLATFORMS:=$(subst $(SPACE),$(COMMA),$(foreach arch,$(LINUX_ARCH),linux/$(arch)))
ifeq ($(DOCKER),)
$(error "Please specify Docker registry to use. Use DOCKER=coredns for releases")
endif
all:
@echo Use the 'release' target to start a release
release: pre commit push build tar upload
docker: docker-build docker-push
.PHONY: pre
pre:
go generate
$(MAKE) -f Makefile.doc
.PHONY: push
push:
@echo Pushing release to master
git push
.PHONY: commit
commit:
@echo Committing
git commit -am"Release $(VERSION)"
.PHONY: build
build:
@echo Cleaning old builds
@rm -rf build && mkdir build
@echo Building: darwin $(VERSION)
mkdir -p build/darwin/amd64 && $(MAKE) coredns BINARY=build/darwin/amd64/$(NAME) SYSTEM="GOOS=darwin GOARCH=amd64" CHECKS="godeps" VERBOSE=""
@echo Building: windows $(VERSION)
mkdir -p build/windows/amd64 && $(MAKE) coredns BINARY=build/windows/amd64/$(NAME) SYSTEM="GOOS=windows GOARCH=amd64" CHECKS="godeps" VERBOSE=""
@echo Building: linux/$(LINUX_ARCH) $(VERSION) ;\
for arch in $(LINUX_ARCH); do \
mkdir -p build/linux/$$arch && $(MAKE) coredns BINARY=build/linux/$$arch/$(NAME) SYSTEM="GOOS=linux GOARCH=$$arch" CHECKS="godeps" VERBOSE="" ;\
done
.PHONY: tar
tar:
@echo Cleaning old releases
@rm -rf release && mkdir release
tar -zcf release/$(NAME)_$(VERSION)_darwin_amd64.tgz -C build/darwin/amd64 $(NAME)
tar -zcf release/$(NAME)_$(VERSION)_windows_amd64.tgz -C build/windows/amd64 $(NAME)
for arch in $(LINUX_ARCH); do \
tar -zcf release/$(NAME)_$(VERSION)_linux_$$arch.tgz -C build/linux/$$arch $(NAME) ;\
done
.PHONY: upload
upload:
@echo Releasing: $(VERSION)
gh-release create $(GITHUB)/$(NAME) $(VERSION)
.PHONY: docker-build
docker-build: tar
# Steps:
# 1. Copy appropriate coredns binary to build/docker/linux/<arch>
# 2. Copy Dockerfile to build/docker/linux/<arch>
# 3. Replace base image from alpine:latest to <arch>/alpine:latest
# 4. Comment RUN in Dockerfile
# <arch>:
# arm: arm32v6
# arm64: arm64v8
rm -rf build/docker
for arch in $(LINUX_ARCH); do \
mkdir -p build/docker/linux/$$arch ;\
tar -xzf release/$(NAME)_$(VERSION)_linux_$$arch.tgz -C build/docker/linux/$$arch ;\
cp Dockerfile build/docker/linux/$$arch ;\
docker build -t coredns build/docker/linux/$$arch ;\
docker tag coredns $(DOCKER_IMAGE_NAME):coredns-$$arch ;\
done
.PHONY: docker-push
docker-push:
@echo Pushing: $(VERSION) to $(DOCKER_IMAGE_NAME)
for arch in $(LINUX_ARCH); do \
docker push $(DOCKER_IMAGE_NAME):coredns-$$arch ;\
done
manifest-tool push from-args --platforms $(PLATFORMS) --template $(DOCKER_IMAGE_NAME):coredns-ARCH --target $(DOCKER_IMAGE_NAME):$(VERSION)
manifest-tool push from-args --platforms $(PLATFORMS) --template $(DOCKER_IMAGE_NAME):coredns-ARCH --target $(DOCKER_IMAGE_NAME):latest
.PHONY: clean
clean:
rm -rf release
rm -rf build

33
vendor/github.com/coredns/coredns/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,33 @@
reviewers:
- bradbeam
- chrisohaver
- fastest963
- fturib
- greenpau
- grobie
- isolus
- johnbelamaric
- miekg
- pmoroney
- rajansandeep
- stp-ip
- superq
- varyoo
- yongtang
approvers:
- chrisohaver
- johnbelamaric
- miekg
- yongtang
features:
- comments
- reviewers
- aliases
- branches
aliases:
- |
/plugin: (.*) -> /label add: plugin/$1

200
vendor/github.com/coredns/coredns/README.md generated vendored Normal file
View File

@@ -0,0 +1,200 @@
[![CoreDNS](https://coredns.io/images/CoreDNS_Colour_Horizontal.png)](https://coredns.io)
[![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/coredns/coredns)
[![Build Status](https://img.shields.io/travis/coredns/coredns/master.svg?label=build)](https://travis-ci.org/coredns/coredns)
[![Code Coverage](https://img.shields.io/codecov/c/github/coredns/coredns/master.svg)](https://codecov.io/github/coredns/coredns?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/coredns/coredns.svg)](https://hub.docker.com/r/coredns/coredns)
[![Go Report Card](https://goreportcard.com/badge/github.com/coredns/coredns)](https://goreportcard.com/report/coredns/coredns)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcoredns%2Fcoredns.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcoredns%2Fcoredns?ref=badge_shield)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1250/badge)](https://bestpractices.coreinfrastructure.org/projects/1250)
CoreDNS (written in Go) chains [plugins](https://coredns.io/plugins). Each plugin performs a DNS
function.
CoreDNS is a [Cloud Native Computing Foundation](https://cncf.io) incubating level project.
CoreDNS is a fast and flexible DNS server. The keyword here is *flexible*: with CoreDNS you
are able to do what you want with your DNS data by utilizing plugins. If some functionality is not
provided out of the box you can add it by [writing a plugin](https://coredns.io/explugins).
CoreDNS can listen for DNS request coming in over UDP/TCP (go'old DNS), TLS ([RFC
7858](https://tools.ietf.org/html/rfc7858)) and [gRPC](https://grpc.io) (not a standard).
Currently CoreDNS is able to:
* Serve zone data from a file; both DNSSEC (NSEC only) and DNS are supported (*file*).
* Retrieve zone data from primaries, i.e., act as a secondary server (AXFR only) (*secondary*).
* Sign zone data on-the-fly (*dnssec*).
* Load balancing of responses (*loadbalance*).
* Allow for zone transfers, i.e., act as a primary server (*file*).
* Automatically load zone files from disk (*auto*).
* Caching (*cache*).
* Use etcd as a backend (replace [SkyDNS](https://github.com/skynetservices/skydns)) (*etcd*).
* Use k8s (kubernetes) as a backend (*kubernetes*).
* Serve as a proxy to forward queries to some other (recursive) nameserver (*proxy*, and *forward*).
* Provide metrics (by using Prometheus) (*metrics*).
* Provide query (*log*) and error (*error*) logging.
* Support the CH class: `version.bind` and friends (*chaos*).
* Support the RFC 5001 DNS name server identifier (NSID) option (*nsid*).
* Profiling support (*pprof*).
* Rewrite queries (qtype, qclass and qname) (*rewrite* and *template*).
And more. Each of the plugins is documented. See [coredns.io/plugins](https://coredns.io/plugins)
for all in-tree plugins, and [coredns.io/explugins](https://coredns.io/explugins) for all
out-of-tree plugins.
## Compilation from Source
Check out the project and do dependency resolution with:
~~~
% go get github.com/coredns/coredns
~~~
Some of the dependencies require Go version 1.9 or later.
We vendor most (not all!) packages. Building from scratch is easiest, by just using `make`:
~~~
% make
~~~
This should yield a `coredns` binary.
## Compilation with Docker
CoreDNS requires Go to compile. However, if you already have docker installed and prefer not to setup
a Go environment, you could build CoreDNS easily:
```
$ docker run --rm -i -t -v $PWD:/go/src/github.com/coredns/coredns \
-w /go/src/github.com/coredns/coredns golang:1.10 make
```
The above command alone will have `coredns` binary generated.
## Examples
When starting CoreDNS without any configuration, it loads the
[*whoami*](https://coredns.io/plugins/whoami) plugin and starts listening on port 53 (override with
`-dns.port`), it should show the following:
~~~ txt
.:53
2016/09/18 09:20:50 [INFO] CoreDNS-001
CoreDNS-001
~~~
Any query send to port 53 should return some information; your sending address, port and protocol
used.
If you have a Corefile without a port number specified it will, by default, use port 53, but you
can override the port with the `-dns.port` flag:
`./coredns -dns.port 1053`, runs the server on port 1053.
Start a simple proxy, you'll need to be root to start listening on port 53.
`Corefile` contains:
~~~ corefile
.:53 {
forward . 8.8.8.8:53
log
}
~~~
Just start CoreDNS: `./coredns`. Then just query on that port (53). The query should be forwarded to
8.8.8.8 and the response will be returned. Each query should also show up in the log which is
printed on standard output.
Serve the (NSEC) DNSSEC-signed `example.org` on port 1053, with errors and logging sent to standard
output. Allow zone transfers to everybody, but specifically mention 1 IP address so that CoreDNS can
send notifies to it.
~~~ txt
example.org:1053 {
file /var/lib/coredns/example.org.signed {
transfer to *
transfer to 2001:500:8f::53
}
errors
log
}
~~~
Serve `example.org` on port 1053, but forward everything that does *not* match `example.org` to a recursive
nameserver *and* rewrite ANY queries to HINFO.
~~~ txt
.:1053 {
rewrite ANY HINFO
forward . 8.8.8.8:53
file /var/lib/coredns/example.org.signed example.org {
transfer to *
transfer to 2001:500:8f::53
}
errors
log
}
~~~
IP addresses are also allowed. They are automatically converted to reverse zones:
~~~ corefile
10.0.0.0/24 {
whoami
}
~~~
Means you are authoritative for `0.0.10.in-addr.arpa.`.
This also works for IPv6 addresses. If for some reason you want to serve a zone named `10.0.0.0/24`
add the closing dot: `10.0.0.0/24.` as this also stops the conversion.
This even works for CIDR (See RFC 1518 and 1519) addressing, i.e. `10.0.0.0/25`, CoreDNS will then
check if the `in-addr` request falls in the correct range.
Listening on TLS and for gRPC? Use:
~~~ corefile
tls://example.org grpc://example.org {
whoami
}
~~~
Specifying ports works in the same way:
~~~ txt
grpc://example.org:1443 {
# ...
}
~~~
When no transport protocol is specified the default `dns://` is assumed.
## Community
We're most active on Slack (and Github):
- Slack: #coredns on <https://slack.cncf.io>
- Github: <https://github.com/coredns/coredns>
More resources can be found:
- Website: <https://coredns.io>
- Blog: <https://blog.coredns.io>
- Twitter: [@corednsio](https://twitter.com/corednsio)
- Mailing list/group: <coredns-discuss@googlegroups.com>
## Deployment
Examples for deployment via systemd and other use cases can be found in the
[deployment repository](https://github.com/coredns/deployment).
## Security
If you find a security vulnerability or any security related issues,
please DO NOT file a public issue, instead send your report privately to
`security@coredns.io`. Security reports are greatly appreciated and we
will publicly thank you for it.

7
vendor/github.com/coredns/coredns/core/coredns.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
// Package core registers the server and all plugins we support.
package core
import (
// plug in the server
_ "github.com/coredns/coredns/core/dnsserver"
)

View File

@@ -0,0 +1,146 @@
package dnsserver
import (
"fmt"
"net"
"strings"
"github.com/coredns/coredns/plugin"
"github.com/miekg/dns"
)
type zoneAddr struct {
Zone string
Port string
Transport string // dns, tls or grpc
IPNet *net.IPNet // if reverse zone this hold the IPNet
Address string // used for bound zoneAddr - validation of overlapping
}
// String return the string representation of z.
func (z zoneAddr) String() string {
s := z.Transport + "://" + z.Zone + ":" + z.Port
if z.Address != "" {
s += " on " + z.Address
}
return s
}
// Transport returns the protocol of the string s
func Transport(s string) string {
switch {
case strings.HasPrefix(s, TransportTLS+"://"):
return TransportTLS
case strings.HasPrefix(s, TransportDNS+"://"):
return TransportDNS
case strings.HasPrefix(s, TransportGRPC+"://"):
return TransportGRPC
case strings.HasPrefix(s, TransportHTTPS+"://"):
return TransportHTTPS
}
return TransportDNS
}
// normalizeZone parses an zone string into a structured format with separate
// host, and port portions, as well as the original input string.
func normalizeZone(str string) (zoneAddr, error) {
var err error
// Default to DNS if there isn't a transport protocol prefix.
trans := TransportDNS
switch {
case strings.HasPrefix(str, TransportTLS+"://"):
trans = TransportTLS
str = str[len(TransportTLS+"://"):]
case strings.HasPrefix(str, TransportDNS+"://"):
trans = TransportDNS
str = str[len(TransportDNS+"://"):]
case strings.HasPrefix(str, TransportGRPC+"://"):
trans = TransportGRPC
str = str[len(TransportGRPC+"://"):]
case strings.HasPrefix(str, TransportHTTPS+"://"):
trans = TransportHTTPS
str = str[len(TransportHTTPS+"://"):]
}
host, port, ipnet, err := plugin.SplitHostPort(str)
if err != nil {
return zoneAddr{}, err
}
if port == "" {
if trans == TransportDNS {
port = Port
}
if trans == TransportTLS {
port = TLSPort
}
if trans == TransportGRPC {
port = GRPCPort
}
if trans == TransportHTTPS {
port = HTTPSPort
}
}
return zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet}, nil
}
// SplitProtocolHostPort splits a full formed address like "dns://[::1]:53" into parts.
func SplitProtocolHostPort(address string) (protocol string, ip string, port string, err error) {
parts := strings.Split(address, "://")
switch len(parts) {
case 1:
ip, port, err := net.SplitHostPort(parts[0])
return "", ip, port, err
case 2:
ip, port, err := net.SplitHostPort(parts[1])
return parts[0], ip, port, err
default:
return "", "", "", fmt.Errorf("provided value is not in an address format : %s", address)
}
}
// Supported transports.
const (
TransportDNS = "dns"
TransportTLS = "tls"
TransportGRPC = "grpc"
TransportHTTPS = "https"
)
type zoneOverlap struct {
registeredAddr map[zoneAddr]zoneAddr // each zoneAddr is registered once by its key
unboundOverlap map[zoneAddr]zoneAddr // the "no bind" equiv ZoneAdddr is registered by its original key
}
func newOverlapZone() *zoneOverlap {
return &zoneOverlap{registeredAddr: make(map[zoneAddr]zoneAddr), unboundOverlap: make(map[zoneAddr]zoneAddr)}
}
// registerAndCheck adds a new zoneAddr for validation, it returns information about existing or overlapping with already registered
// we consider that an unbound address is overlapping all bound addresses for same zone, same port
func (zo *zoneOverlap) registerAndCheck(z zoneAddr) (existingZone *zoneAddr, overlappingZone *zoneAddr) {
if exist, ok := zo.registeredAddr[z]; ok {
// exact same zone already registered
return &exist, nil
}
uz := zoneAddr{Zone: z.Zone, Address: "", Port: z.Port, Transport: z.Transport}
if already, ok := zo.unboundOverlap[uz]; ok {
if z.Address == "" {
// current is not bound to an address, but there is already another zone with a bind address registered
return nil, &already
}
if _, ok := zo.registeredAddr[uz]; ok {
// current zone is bound to an address, but there is already an overlapping zone+port with no bind address
return nil, &uz
}
}
// there is no overlap, keep the current zoneAddr for future checks
zo.registeredAddr[z] = z
zo.unboundOverlap[uz] = z
return nil, nil
}

View File

@@ -0,0 +1,179 @@
package dnsserver
import "testing"
func TestNormalizeZone(t *testing.T) {
for i, test := range []struct {
input string
expected string
shouldErr bool
}{
{".", "dns://.:53", false},
{".:54", "dns://.:54", false},
{"..", "://:", true},
{"..", "://:", true},
{".:", "://:", true},
} {
addr, err := normalizeZone(test.input)
actual := addr.String()
if test.shouldErr && err == nil {
t.Errorf("Test %d: Expected error, but there wasn't any", i)
}
if !test.shouldErr && err != nil {
t.Errorf("Test %d: Expected no error, but there was one: %v", i, err)
}
if actual != test.expected {
t.Errorf("Test %d: Expected %s but got %s", i, test.expected, actual)
}
}
}
func TestNormalizeZoneReverse(t *testing.T) {
for i, test := range []struct {
input string
expected string
shouldErr bool
}{
{"2003::1/64", "dns://0.0.0.0.0.0.0.0.0.0.0.0.3.0.0.2.ip6.arpa.:53", false},
{"2003::1/64.", "dns://2003::1/64.:53", false}, // OK, with closing dot the parse will fail.
{"2003::1/64:53", "dns://0.0.0.0.0.0.0.0.0.0.0.0.3.0.0.2.ip6.arpa.:53", false},
{"2003::1/64.:53", "dns://2003::1/64.:53", false},
{"10.0.0.0/24", "dns://0.0.10.in-addr.arpa.:53", false},
{"10.0.0.0/24.", "dns://10.0.0.0/24.:53", false},
{"10.0.0.0/24:53", "dns://0.0.10.in-addr.arpa.:53", false},
{"10.0.0.0/24.:53", "dns://10.0.0.0/24.:53", false},
// non %8==0 netmasks
{"2003::53/67", "dns://0.0.0.0.0.0.0.0.0.0.0.0.0.3.0.0.2.ip6.arpa.:53", false},
{"10.0.0.0/25.", "dns://10.0.0.0/25.:53", false}, // has dot
{"10.0.0.0/25", "dns://0.0.10.in-addr.arpa.:53", false},
{"fd00:77:30::0/110", "dns://0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.3.0.0.7.7.0.0.0.0.d.f.ip6.arpa.:53", false},
} {
addr, err := normalizeZone(test.input)
actual := addr.String()
if test.shouldErr && err == nil {
t.Errorf("Test %d: Expected error, but there wasn't any", i)
}
if !test.shouldErr && err != nil {
t.Errorf("Test %d: Expected no error, but there was one: %v", i, err)
}
if actual != test.expected {
t.Errorf("Test %d: Expected %s but got %s", i, test.expected, actual)
}
}
}
func TestSplitProtocolHostPort(t *testing.T) {
for i, test := range []struct {
input string
proto string
ip string
port string
shouldErr bool
}{
{"dns://:53", "dns", "", "53", false},
{"dns://127.0.0.1:4005", "dns", "127.0.0.1", "4005", false},
{"[ffe0:34ab:1]:4005", "", "ffe0:34ab:1", "4005", false},
// port part is mandatory
{"dns://", "dns", "", "", true},
{"dns://127.0.0.1", "dns", "127.0.0.1", "", true},
// cannot be empty
{"", "", "", "", true},
// invalid format with twice ://
{"dns://127.0.0.1://53", "", "", "", true},
} {
proto, ip, port, err := SplitProtocolHostPort(test.input)
if test.shouldErr && err == nil {
t.Errorf("Test %d: (address = %s) expected error, but there wasn't any", i, test.input)
continue
}
if !test.shouldErr && err != nil {
t.Errorf("Test %d: (address = %s) expected no error, but there was one: %v", i, test.input, err)
continue
}
if err == nil || test.shouldErr {
continue
}
if proto != test.proto {
t.Errorf("Test %d: (address = %s) expected protocol with value %s but got %s", i, test.input, test.proto, proto)
}
if ip != test.ip {
t.Errorf("Test %d: (address = %s) expected ip with value %s but got %s", i, test.input, test.ip, ip)
}
if port != test.port {
t.Errorf("Test %d: (address = %s) expected port with value %s but got %s", i, test.input, test.port, port)
}
}
}
type checkCall struct {
zone zoneAddr
same bool
overlap bool
overlapKey string
}
type checkTest struct {
sequence []checkCall
}
func TestOverlapAddressChecker(t *testing.T) {
for i, test := range []checkTest{
{sequence: []checkCall{
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "53"}, true, false, ""},
},
},
{sequence: []checkCall{
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "54"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "127.0.0.1", Port: "53"}, false, true, "dns://.:53"},
},
},
{sequence: []checkCall{
{zoneAddr{Transport: "dns", Zone: ".", Address: "127.0.0.1", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "54"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "127.0.0.1", Port: "53"}, true, false, ""},
},
},
{sequence: []checkCall{
{zoneAddr{Transport: "dns", Zone: ".", Address: "127.0.0.1", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "54"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "128.0.0.1", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "129.0.0.1", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: ".", Address: "", Port: "53"}, false, true, "dns://.:53 on 129.0.0.1"},
},
},
{sequence: []checkCall{
{zoneAddr{Transport: "dns", Zone: ".", Address: "127.0.0.1", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: "com.", Address: "127.0.0.1", Port: "53"}, false, false, ""},
{zoneAddr{Transport: "dns", Zone: "com.", Address: "", Port: "53"}, false, true, "dns://com.:53 on 127.0.0.1"},
},
},
} {
checker := newOverlapZone()
for _, call := range test.sequence {
same, overlap := checker.registerAndCheck(call.zone)
sZone := call.zone.String()
if (same != nil) != call.same {
t.Errorf("Test %d: error, for zone %s, 'same' (%v) has not the expected value (%v)", i, sZone, same != nil, call.same)
}
if same == nil {
if (overlap != nil) != call.overlap {
t.Errorf("Test %d: error, for zone %s, 'overlap' (%v) has not the expected value (%v)", i, sZone, overlap != nil, call.overlap)
}
if overlap != nil {
if overlap.String() != call.overlapKey {
t.Errorf("Test %d: error, for zone %s, 'overlap Key' (%v) has not the expected value (%v)", i, sZone, overlap.String(), call.overlapKey)
}
}
}
}
}
}

View File

@@ -0,0 +1,73 @@
package dnsserver
import (
"crypto/tls"
"fmt"
"github.com/coredns/coredns/plugin"
"github.com/mholt/caddy"
)
// Config configuration for a single server.
type Config struct {
// The zone of the site.
Zone string
// one or several hostnames to bind the server to.
// defaults to a single empty string that denote the wildcard address
ListenHosts []string
// The port to listen on.
Port string
// Root points to a base directory we we find user defined "things".
// First consumer is the file plugin to looks for zone files in this place.
Root string
// Debug controls the panic/recover mechanism that is enabled by default.
Debug bool
// The transport we implement, normally just "dns" over TCP/UDP, but could be
// DNS-over-TLS or DNS-over-gRPC.
Transport string
// If this function is not nil it will be used to further filter access
// to this handler. The primary use is to limit access to a reverse zone
// on a non-octet boundary, i.e. /17
FilterFunc func(string) bool
// TLSConfig when listening for encrypted connections (gRPC, DNS-over-TLS).
TLSConfig *tls.Config
// Plugin stack.
Plugin []plugin.Plugin
// Compiled plugin stack.
pluginChain plugin.Handler
// Plugin interested in announcing that they exist, so other plugin can call methods
// on them should register themselves here. The name should be the name as return by the
// Handler's Name method.
registry map[string]plugin.Handler
}
// keyForConfig build a key for identifying the configs during setup time
func keyForConfig(blocIndex int, blocKeyIndex int) string {
return fmt.Sprintf("%d:%d", blocIndex, blocKeyIndex)
}
// GetConfig gets the Config that corresponds to c.
// If none exist nil is returned.
func GetConfig(c *caddy.Controller) *Config {
ctx := c.Context().(*dnsContext)
key := keyForConfig(c.ServerBlockIndex, c.ServerBlockKeyIndex)
if cfg, ok := ctx.keysToConfigs[key]; ok {
return cfg
}
// we should only get here during tests because directive
// actions typically skip the server blocks where we make
// the configs.
ctx.saveConfig(key, &Config{ListenHosts: []string{""}})
return GetConfig(c)
}

View File

@@ -0,0 +1,74 @@
package dnsserver
import (
"encoding/base64"
"fmt"
"io/ioutil"
"net"
"net/http"
"github.com/coredns/coredns/plugin/pkg/nonwriter"
"github.com/miekg/dns"
)
// mimeTypeDOH is the DoH mimetype that should be used.
const mimeTypeDOH = "application/dns-message"
// pathDOH is the URL path that should be used.
const pathDOH = "/dns-query"
// postRequestToMsg extracts the dns message from the request body.
func postRequestToMsg(req *http.Request) (*dns.Msg, error) {
defer req.Body.Close()
buf, err := ioutil.ReadAll(req.Body)
if err != nil {
return nil, err
}
m := new(dns.Msg)
err = m.Unpack(buf)
return m, err
}
// getRequestToMsg extract the dns message from the GET request.
func getRequestToMsg(req *http.Request) (*dns.Msg, error) {
values := req.URL.Query()
b64, ok := values["dns"]
if !ok {
return nil, fmt.Errorf("no 'dns' query parameter found")
}
if len(b64) != 1 {
return nil, fmt.Errorf("multiple 'dns' query values found")
}
return base64ToMsg(b64[0])
}
func base64ToMsg(b64 string) (*dns.Msg, error) {
buf, err := b64Enc.DecodeString(b64)
if err != nil {
return nil, err
}
m := new(dns.Msg)
err = m.Unpack(buf)
return m, err
}
var b64Enc = base64.RawURLEncoding
// DoHWriter is a nonwriter.Writer that adds more specific LocalAddr and RemoteAddr methods.
type DoHWriter struct {
nonwriter.Writer
// raddr is the remote's address. This can be optionally set.
raddr net.Addr
// laddr is our address. This can be optionally set.
laddr net.Addr
}
// RemoteAddr returns the remote address.
func (d *DoHWriter) RemoteAddr() net.Addr { return d.raddr }
// LocalAddr returns the local address.
func (d *DoHWriter) LocalAddr() net.Addr { return d.laddr }

View File

@@ -0,0 +1,66 @@
package dnsserver
import (
"bytes"
"encoding/base64"
"net/http"
"testing"
"github.com/miekg/dns"
)
func TestPostRequest(t *testing.T) {
const ex = "example.org."
m := new(dns.Msg)
m.SetQuestion(ex, dns.TypeDNSKEY)
out, _ := m.Pack()
req, err := http.NewRequest(http.MethodPost, "https://"+ex+pathDOH+"?bla=foo:443", bytes.NewReader(out))
if err != nil {
t.Errorf("Failure to make request: %s", err)
}
req.Header.Set("content-type", mimeTypeDOH)
req.Header.Set("accept", mimeTypeDOH)
m, err = postRequestToMsg(req)
if err != nil {
t.Fatalf("Failure to get message from request: %s", err)
}
if x := m.Question[0].Name; x != ex {
t.Errorf("Qname expected %s, got %s", ex, x)
}
if x := m.Question[0].Qtype; x != dns.TypeDNSKEY {
t.Errorf("Qname expected %d, got %d", x, dns.TypeDNSKEY)
}
}
func TestGetRequest(t *testing.T) {
const ex = "example.org."
m := new(dns.Msg)
m.SetQuestion(ex, dns.TypeDNSKEY)
out, _ := m.Pack()
b64 := base64.RawURLEncoding.EncodeToString(out)
req, err := http.NewRequest(http.MethodGet, "https://"+ex+pathDOH+"?dns="+b64, nil)
if err != nil {
t.Errorf("Failure to make request: %s", err)
}
req.Header.Set("content-type", mimeTypeDOH)
req.Header.Set("accept", mimeTypeDOH)
m, err = getRequestToMsg(req)
if err != nil {
t.Fatalf("Failure to get message from request: %s", err)
}
if x := m.Question[0].Name; x != ex {
t.Errorf("Qname expected %s, got %s", ex, x)
}
if x := m.Question[0].Qtype; x != dns.TypeDNSKEY {
t.Errorf("Qname expected %d, got %d", x, dns.TypeDNSKEY)
}
}

View File

@@ -0,0 +1,29 @@
package dnsserver
import "fmt"
// startUpZones create the text that we show when starting up:
// grpc://example.com.:1055
// example.com.:1053 on 127.0.0.1
func startUpZones(protocol, addr string, zones map[string]*Config) string {
s := ""
for zone := range zones {
// split addr into protocol, IP and Port
_, ip, port, err := SplitProtocolHostPort(addr)
if err != nil {
// this should not happen, but we need to take care of it anyway
s += fmt.Sprintln(protocol + zone + ":" + addr)
continue
}
if ip == "" {
s += fmt.Sprintln(protocol + zone + ":" + port)
continue
}
// if the server is listening on a specific address let's make it visible in the log,
// so one can differentiate between all active listeners
s += fmt.Sprintln(protocol + zone + ":" + port + " on " + ip)
}
return s
}

View File

@@ -0,0 +1,258 @@
package dnsserver
import (
"flag"
"fmt"
"net"
"strings"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyfile"
)
const serverType = "dns"
// Any flags defined here, need to be namespaced to the serverType other
// wise they potentially clash with other server types.
func init() {
flag.StringVar(&Port, serverType+".port", DefaultPort, "Default port")
caddy.RegisterServerType(serverType, caddy.ServerType{
Directives: func() []string { return Directives },
DefaultInput: func() caddy.Input {
return caddy.CaddyfileInput{
Filepath: "Corefile",
Contents: []byte(".:" + Port + " {\nwhoami\n}\n"),
ServerTypeName: serverType,
}
},
NewContext: newContext,
})
}
func newContext(i *caddy.Instance) caddy.Context {
return &dnsContext{keysToConfigs: make(map[string]*Config)}
}
type dnsContext struct {
keysToConfigs map[string]*Config
// configs is the master list of all site configs.
configs []*Config
}
func (h *dnsContext) saveConfig(key string, cfg *Config) {
h.configs = append(h.configs, cfg)
h.keysToConfigs[key] = cfg
}
// InspectServerBlocks make sure that everything checks out before
// executing directives and otherwise prepares the directives to
// be parsed and executed.
func (h *dnsContext) InspectServerBlocks(sourceFile string, serverBlocks []caddyfile.ServerBlock) ([]caddyfile.ServerBlock, error) {
// Normalize and check all the zone names and check for duplicates
for ib, s := range serverBlocks {
for ik, k := range s.Keys {
za, err := normalizeZone(k)
if err != nil {
return nil, err
}
s.Keys[ik] = za.String()
// Save the config to our master list, and key it for lookups.
cfg := &Config{
Zone: za.Zone,
ListenHosts: []string{""},
Port: za.Port,
Transport: za.Transport,
}
keyConfig := keyForConfig(ib, ik)
if za.IPNet == nil {
h.saveConfig(keyConfig, cfg)
continue
}
ones, bits := za.IPNet.Mask.Size()
if (bits-ones)%8 != 0 { // only do this for non-octet boundaries
cfg.FilterFunc = func(s string) bool {
// TODO(miek): strings.ToLower! Slow and allocates new string.
addr := dnsutil.ExtractAddressFromReverse(strings.ToLower(s))
if addr == "" {
return true
}
return za.IPNet.Contains(net.ParseIP(addr))
}
}
h.saveConfig(keyConfig, cfg)
}
}
return serverBlocks, nil
}
// MakeServers uses the newly-created siteConfigs to create and return a list of server instances.
func (h *dnsContext) MakeServers() ([]caddy.Server, error) {
// Now that all Keys and Directives are parsed and initialized
// lets verify that there is no overlap on the zones and addresses to listen for
errValid := h.validateZonesAndListeningAddresses()
if errValid != nil {
return nil, errValid
}
// we must map (group) each config to a bind address
groups, err := groupConfigsByListenAddr(h.configs)
if err != nil {
return nil, err
}
// then we create a server for each group
var servers []caddy.Server
for addr, group := range groups {
// switch on addr
switch Transport(addr) {
case TransportDNS:
s, err := NewServer(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
case TransportTLS:
s, err := NewServerTLS(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
case TransportGRPC:
s, err := NewServergRPC(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
case TransportHTTPS:
s, err := NewServerHTTPS(addr, group)
if err != nil {
return nil, err
}
servers = append(servers, s)
}
}
return servers, nil
}
// AddPlugin adds a plugin to a site's plugin stack.
func (c *Config) AddPlugin(m plugin.Plugin) {
c.Plugin = append(c.Plugin, m)
}
// registerHandler adds a handler to a site's handler registration. Handlers
// use this to announce that they exist to other plugin.
func (c *Config) registerHandler(h plugin.Handler) {
if c.registry == nil {
c.registry = make(map[string]plugin.Handler)
}
// Just overwrite...
c.registry[h.Name()] = h
}
// Handler returns the plugin handler that has been added to the config under its name.
// This is useful to inspect if a certain plugin is active in this server.
// Note that this is order dependent and the order is defined in directives.go, i.e. if your plugin
// comes before the plugin you are checking; it will not be there (yet).
func (c *Config) Handler(name string) plugin.Handler {
if c.registry == nil {
return nil
}
if h, ok := c.registry[name]; ok {
return h
}
return nil
}
// Handlers returns a slice of plugins that have been registered. This can be used to
// inspect and interact with registered plugins but cannot be used to remove or add plugins.
// Note that this is order dependent and the order is defined in directives.go, i.e. if your plugin
// comes before the plugin you are checking; it will not be there (yet).
func (c *Config) Handlers() []plugin.Handler {
if c.registry == nil {
return nil
}
hs := make([]plugin.Handler, 0, len(c.registry))
for k := range c.registry {
hs = append(hs, c.registry[k])
}
return hs
}
func (h *dnsContext) validateZonesAndListeningAddresses() error {
//Validate Zone and addresses
checker := newOverlapZone()
for _, conf := range h.configs {
for _, h := range conf.ListenHosts {
// Validate the overlapping of ZoneAddr
akey := zoneAddr{Transport: conf.Transport, Zone: conf.Zone, Address: h, Port: conf.Port}
existZone, overlapZone := checker.registerAndCheck(akey)
if existZone != nil {
return fmt.Errorf("cannot serve %s - it is already defined", akey.String())
}
if overlapZone != nil {
return fmt.Errorf("cannot serve %s - zone overlap listener capacity with %v", akey.String(), overlapZone.String())
}
}
}
return nil
}
// groupSiteConfigsByListenAddr groups site configs by their listen
// (bind) address, so sites that use the same listener can be served
// on the same server instance. The return value maps the listen
// address (what you pass into net.Listen) to the list of site configs.
// This function does NOT vet the configs to ensure they are compatible.
func groupConfigsByListenAddr(configs []*Config) (map[string][]*Config, error) {
groups := make(map[string][]*Config)
for _, conf := range configs {
for _, h := range conf.ListenHosts {
addr, err := net.ResolveTCPAddr("tcp", net.JoinHostPort(h, conf.Port))
if err != nil {
return nil, err
}
addrstr := conf.Transport + "://" + addr.String()
groups[addrstr] = append(groups[addrstr], conf)
}
}
return groups, nil
}
const (
// DefaultPort is the default port.
DefaultPort = "53"
// TLSPort is the default port for DNS-over-TLS.
TLSPort = "853"
// GRPCPort is the default port for DNS-over-gRPC.
GRPCPort = "443"
// HTTPSPort is the default port for DNS-over-HTTPS.
HTTPSPort = "443"
)
// These "soft defaults" are configurable by
// command line flags, etc.
var (
// Port is the port we listen on by default.
Port = DefaultPort
// GracefulTimeout is the maximum duration of a graceful shutdown.
GracefulTimeout time.Duration
)
var _ caddy.GracefulServer = new(Server)

View File

@@ -0,0 +1,121 @@
package dnsserver
import (
"testing"
)
func TestHandler(t *testing.T) {
tp := testPlugin{}
c := testConfig("dns", tp)
if _, err := NewServer("127.0.0.1:53", []*Config{c}); err != nil {
t.Errorf("Expected no error for NewServer, got %s", err)
}
if h := c.Handler("testplugin"); h != tp {
t.Errorf("Expected testPlugin from Handler, got %T", h)
}
if h := c.Handler("nothing"); h != nil {
t.Errorf("Expected nil from Handler, got %T", h)
}
}
func TestHandlers(t *testing.T) {
tp := testPlugin{}
c := testConfig("dns", tp)
if _, err := NewServer("127.0.0.1:53", []*Config{c}); err != nil {
t.Errorf("Expected no error for NewServer, got %s", err)
}
hs := c.Handlers()
if len(hs) != 1 || hs[0] != tp {
t.Errorf("Expected [testPlugin] from Handlers, got %v", hs)
}
}
func TestGroupingServers(t *testing.T) {
for i, test := range []struct {
configs []*Config
expectedGroups []string
failing bool
}{
// single config -> one group
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{""}},
},
expectedGroups: []string{"dns://:53"},
failing: false},
// 2 configs on different port -> 2 groups
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{""}},
{Transport: "dns", Zone: ".", Port: "54", ListenHosts: []string{""}},
},
expectedGroups: []string{"dns://:53", "dns://:54"},
failing: false},
// 2 configs on same port, both not using bind, diff zones -> 1 group
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{""}},
{Transport: "dns", Zone: "com.", Port: "53", ListenHosts: []string{""}},
},
expectedGroups: []string{"dns://:53"},
failing: false},
// 2 configs on same port, one addressed - one not using bind, diff zones -> 1 group
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{"127.0.0.1"}},
{Transport: "dns", Zone: ".", Port: "54", ListenHosts: []string{""}},
},
expectedGroups: []string{"dns://127.0.0.1:53", "dns://:54"},
failing: false},
// 2 configs on diff ports, 3 different address, diff zones -> 3 group
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{"127.0.0.1", "::1"}},
{Transport: "dns", Zone: ".", Port: "54", ListenHosts: []string{""}}},
expectedGroups: []string{"dns://127.0.0.1:53", "dns://[::1]:53", "dns://:54"},
failing: false},
// 2 configs on same port, same address, diff zones -> 1 group
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{"127.0.0.1", "::1"}},
{Transport: "dns", Zone: "com.", Port: "53", ListenHosts: []string{"127.0.0.1", "::1"}},
},
expectedGroups: []string{"dns://127.0.0.1:53", "dns://[::1]:53"},
failing: false},
// 2 configs on same port, total 2 diff addresses, diff zones -> 2 groups
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{"127.0.0.1"}},
{Transport: "dns", Zone: "com.", Port: "53", ListenHosts: []string{"::1"}},
},
expectedGroups: []string{"dns://127.0.0.1:53", "dns://[::1]:53"},
failing: false},
// 2 configs on same port, total 3 diff addresses, diff zones -> 3 groups
{configs: []*Config{
{Transport: "dns", Zone: ".", Port: "53", ListenHosts: []string{"127.0.0.1", "::1"}},
{Transport: "dns", Zone: "com.", Port: "53", ListenHosts: []string{""}}},
expectedGroups: []string{"dns://127.0.0.1:53", "dns://[::1]:53", "dns://:53"},
failing: false},
} {
groups, err := groupConfigsByListenAddr(test.configs)
if err != nil {
if !test.failing {
t.Fatalf("Test %d, expected no errors, but got: %v", i, err)
}
continue
}
if test.failing {
t.Fatalf("Test %d, expected to failed but did not, returned values", i)
}
if len(groups) != len(test.expectedGroups) {
t.Errorf("Test %d : expected the group's size to be %d, was %d", i, len(test.expectedGroups), len(groups))
continue
}
for _, v := range test.expectedGroups {
if _, ok := groups[v]; !ok {
t.Errorf("Test %d : expected value %v to be in the group, was not", i, v)
}
}
}
}

View File

@@ -0,0 +1,389 @@
// Package dnsserver implements all the interfaces from Caddy, so that CoreDNS can be a servertype plugin.
package dnsserver
import (
"context"
"fmt"
"net"
"runtime"
"sync"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics/vars"
"github.com/coredns/coredns/plugin/pkg/edns"
"github.com/coredns/coredns/plugin/pkg/log"
"github.com/coredns/coredns/plugin/pkg/rcode"
"github.com/coredns/coredns/plugin/pkg/trace"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
ot "github.com/opentracing/opentracing-go"
)
// Server represents an instance of a server, which serves
// DNS requests at a particular address (host and port). A
// server is capable of serving numerous zones on
// the same address and the listener may be stopped for
// graceful termination (POSIX only).
type Server struct {
Addr string // Address we listen on
server [2]*dns.Server // 0 is a net.Listener, 1 is a net.PacketConn (a *UDPConn) in our case.
m sync.Mutex // protects the servers
zones map[string]*Config // zones keyed by their address
dnsWg sync.WaitGroup // used to wait on outstanding connections
connTimeout time.Duration // the maximum duration of a graceful shutdown
trace trace.Trace // the trace plugin for the server
debug bool // disable recover()
classChaos bool // allow non-INET class queries
}
// NewServer returns a new CoreDNS server and compiles all plugins in to it. By default CH class
// queries are blocked unless queries from enableChaos are loaded.
func NewServer(addr string, group []*Config) (*Server, error) {
s := &Server{
Addr: addr,
zones: make(map[string]*Config),
connTimeout: 5 * time.Second, // TODO(miek): was configurable
}
// We have to bound our wg with one increment
// to prevent a "race condition" that is hard-coded
// into sync.WaitGroup.Wait() - basically, an add
// with a positive delta must be guaranteed to
// occur before Wait() is called on the wg.
// In a way, this kind of acts as a safety barrier.
s.dnsWg.Add(1)
for _, site := range group {
if site.Debug {
s.debug = true
log.D = true
}
// set the config per zone
s.zones[site.Zone] = site
// compile custom plugin for everything
if site.registry != nil {
// this config is already computed with the chain of plugin
// set classChaos in accordance with previously registered plugins
for name := range enableChaos {
if _, ok := site.registry[name]; ok {
s.classChaos = true
break
}
}
// set trace handler in accordance with previously registered "trace" plugin
if handler, ok := site.registry["trace"]; ok {
s.trace = handler.(trace.Trace)
}
continue
}
var stack plugin.Handler
for i := len(site.Plugin) - 1; i >= 0; i-- {
stack = site.Plugin[i](stack)
// register the *handler* also
site.registerHandler(stack)
if s.trace == nil && stack.Name() == "trace" {
// we have to stash away the plugin, not the
// Tracer object, because the Tracer won't be initialized yet
if t, ok := stack.(trace.Trace); ok {
s.trace = t
}
}
// Unblock CH class queries when any of these plugins are loaded.
if _, ok := enableChaos[stack.Name()]; ok {
s.classChaos = true
}
}
site.pluginChain = stack
}
return s, nil
}
// Serve starts the server with an existing listener. It blocks until the server stops.
// This implements caddy.TCPServer interface.
func (s *Server) Serve(l net.Listener) error {
s.m.Lock()
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
ctx := context.WithValue(context.Background(), Key{}, s)
s.ServeDNS(ctx, w, r)
})}
s.m.Unlock()
return s.server[tcp].ActivateAndServe()
}
// ServePacket starts the server with an existing packetconn. It blocks until the server stops.
// This implements caddy.UDPServer interface.
func (s *Server) ServePacket(p net.PacketConn) error {
s.m.Lock()
s.server[udp] = &dns.Server{PacketConn: p, Net: "udp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
ctx := context.WithValue(context.Background(), Key{}, s)
s.ServeDNS(ctx, w, r)
})}
s.m.Unlock()
return s.server[udp].ActivateAndServe()
}
// Listen implements caddy.TCPServer interface.
func (s *Server) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportDNS+"://"):])
if err != nil {
return nil, err
}
return l, nil
}
// ListenPacket implements caddy.UDPServer interface.
func (s *Server) ListenPacket() (net.PacketConn, error) {
p, err := net.ListenPacket("udp", s.Addr[len(TransportDNS+"://"):])
if err != nil {
return nil, err
}
return p, nil
}
// Stop stops the server. It blocks until the server is
// totally stopped. On POSIX systems, it will wait for
// connections to close (up to a max timeout of a few
// seconds); on Windows it will close the listener
// immediately.
// This implements Caddy.Stopper interface.
func (s *Server) Stop() (err error) {
if runtime.GOOS != "windows" {
// force connections to close after timeout
done := make(chan struct{})
go func() {
s.dnsWg.Done() // decrement our initial increment used as a barrier
s.dnsWg.Wait()
close(done)
}()
// Wait for remaining connections to finish or
// force them all to close after timeout
select {
case <-time.After(s.connTimeout):
case <-done:
}
}
// Close the listener now; this stops the server without delay
s.m.Lock()
for _, s1 := range s.server {
// We might not have started and initialized the full set of servers
if s1 != nil {
err = s1.Shutdown()
}
}
s.m.Unlock()
return
}
// Address together with Stop() implement caddy.GracefulServer.
func (s *Server) Address() string { return s.Addr }
// ServeDNS is the entry point for every request to the address that s
// is bound to. It acts as a multiplexer for the requests zonename as
// defined in the request so that the correct zone
// (configuration and plugin stack) will handle the request.
func (s *Server) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) {
// The default dns.Mux checks the question section size, but we have our
// own mux here. Check if we have a question section. If not drop them here.
if r == nil || len(r.Question) == 0 {
DefaultErrorFunc(ctx, w, r, dns.RcodeServerFailure)
return
}
if !s.debug {
defer func() {
// In case the user doesn't enable error plugin, we still
// need to make sure that we stay alive up here
if rec := recover(); rec != nil {
vars.Panic.Inc()
DefaultErrorFunc(ctx, w, r, dns.RcodeServerFailure)
}
}()
}
if !s.classChaos && r.Question[0].Qclass != dns.ClassINET {
DefaultErrorFunc(ctx, w, r, dns.RcodeRefused)
return
}
if m, err := edns.Version(r); err != nil { // Wrong EDNS version, return at once.
w.WriteMsg(m)
return
}
ctx, err := incrementDepthAndCheck(ctx)
if err != nil {
DefaultErrorFunc(ctx, w, r, dns.RcodeServerFailure)
return
}
q := r.Question[0].Name
b := make([]byte, len(q))
var off int
var end bool
var dshandler *Config
for {
l := len(q[off:])
for i := 0; i < l; i++ {
b[i] = q[off+i]
// normalize the name for the lookup
if b[i] >= 'A' && b[i] <= 'Z' {
b[i] |= ('a' - 'A')
}
}
if h, ok := s.zones[string(b[:l])]; ok {
// Set server's address in the context so plugins can reference back to this,
// This will makes those metrics unique.
ctx = context.WithValue(ctx, plugin.ServerCtx{}, s.Addr)
if r.Question[0].Qtype != dns.TypeDS {
if h.FilterFunc == nil {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
}
return
}
// FilterFunc is set, call it to see if we should use this handler.
// This is given to full query name.
if h.FilterFunc(q) {
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
}
return
}
}
// The type is DS, keep the handler, but keep on searching as maybe we are serving
// the parent as well and the DS should be routed to it - this will probably *misroute* DS
// queries to a possibly grand parent, but there is no way for us to know at this point
// if there is an actually delegation from grandparent -> parent -> zone.
// In all fairness: direct DS queries should not be needed.
dshandler = h
}
off, end = dns.NextLabel(q, off)
if end {
break
}
}
if r.Question[0].Qtype == dns.TypeDS && dshandler != nil && dshandler.pluginChain != nil {
// DS request, and we found a zone, use the handler for the query.
rcode, _ := dshandler.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
}
return
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := s.zones["."]; ok && h.pluginChain != nil {
// See comment above.
ctx = context.WithValue(ctx, plugin.ServerCtx{}, s.Addr)
rcode, _ := h.pluginChain.ServeDNS(ctx, w, r)
if !plugin.ClientWrite(rcode) {
DefaultErrorFunc(ctx, w, r, rcode)
}
return
}
// Still here? Error out with REFUSED.
DefaultErrorFunc(ctx, w, r, dns.RcodeRefused)
}
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming Quiet is false.
func (s *Server) OnStartupComplete() {
if Quiet {
return
}
out := startUpZones("", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}
// Tracer returns the tracer in the server if defined.
func (s *Server) Tracer() ot.Tracer {
if s.trace == nil {
return nil
}
return s.trace.Tracer()
}
// DefaultErrorFunc responds to an DNS request with an error.
func DefaultErrorFunc(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, rc int) {
state := request.Request{W: w, Req: r}
answer := new(dns.Msg)
answer.SetRcode(r, rc)
state.SizeAndDo(answer)
vars.Report(ctx, state, vars.Dropped, rcode.ToString(rc), answer.Len(), time.Now())
w.WriteMsg(answer)
}
// incrementDepthAndCheck increments the loop counter in the context, and returns an error if
// the counter exceeds the max number of re-entries
func incrementDepthAndCheck(ctx context.Context) (context.Context, error) {
// Loop counter for self directed lookups
loop := ctx.Value(loopKey{})
if loop == nil {
ctx = context.WithValue(ctx, loopKey{}, 0)
return ctx, nil
}
iloop := loop.(int) + 1
if iloop > maxreentries {
return ctx, fmt.Errorf("too deep")
}
ctx = context.WithValue(ctx, loopKey{}, iloop)
return ctx, nil
}
const (
tcp = 0
udp = 1
maxreentries = 10
)
type (
// Key is the context key for the current server
Key struct{}
loopKey struct{} // loopKey is the context key for counting self loops
)
// enableChaos is a map with plugin names for which we should open CH class queries as
// we block these by default.
var enableChaos = map[string]bool{
"chaos": true,
"forward": true,
"proxy": true,
}
// Quiet mode will not show any informative output on initialization.
var Quiet bool

View File

@@ -0,0 +1,170 @@
package dnsserver
import (
"context"
"crypto/tls"
"errors"
"fmt"
"net"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
"github.com/miekg/dns"
"github.com/opentracing/opentracing-go"
"google.golang.org/grpc"
"google.golang.org/grpc/peer"
"github.com/coredns/coredns/pb"
)
// ServergRPC represents an instance of a DNS-over-gRPC server.
type ServergRPC struct {
*Server
grpcServer *grpc.Server
listenAddr net.Addr
tlsConfig *tls.Config
}
// NewServergRPC returns a new CoreDNS GRPC server and compiles all plugin in to it.
func NewServergRPC(addr string, group []*Config) (*ServergRPC, error) {
s, err := NewServer(addr, group)
if err != nil {
return nil, err
}
// The *tls* plugin must make sure that multiple conflicting
// TLS configuration return an error: it can only be specified once.
var tlsConfig *tls.Config
for _, conf := range s.zones {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
}
return &ServergRPC{Server: s, tlsConfig: tlsConfig}, nil
}
// Serve implements caddy.TCPServer interface.
func (s *ServergRPC) Serve(l net.Listener) error {
s.m.Lock()
s.listenAddr = l.Addr()
s.m.Unlock()
if s.Tracer() != nil {
onlyIfParent := func(parentSpanCtx opentracing.SpanContext, method string, req, resp interface{}) bool {
return parentSpanCtx != nil
}
intercept := otgrpc.OpenTracingServerInterceptor(s.Tracer(), otgrpc.IncludingSpans(onlyIfParent))
s.grpcServer = grpc.NewServer(grpc.UnaryInterceptor(intercept))
} else {
s.grpcServer = grpc.NewServer()
}
pb.RegisterDnsServiceServer(s.grpcServer, s)
if s.tlsConfig != nil {
l = tls.NewListener(l, s.tlsConfig)
}
return s.grpcServer.Serve(l)
}
// ServePacket implements caddy.UDPServer interface.
func (s *ServergRPC) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServergRPC) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportGRPC+"://"):])
if err != nil {
return nil, err
}
return l, nil
}
// ListenPacket implements caddy.UDPServer interface.
func (s *ServergRPC) ListenPacket() (net.PacketConn, error) { return nil, nil }
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming Quiet is false.
func (s *ServergRPC) OnStartupComplete() {
if Quiet {
return
}
out := startUpZones(TransportGRPC+"://", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}
// Stop stops the server. It blocks until the server is
// totally stopped.
func (s *ServergRPC) Stop() (err error) {
s.m.Lock()
defer s.m.Unlock()
if s.grpcServer != nil {
s.grpcServer.GracefulStop()
}
return
}
// Query is the main entry-point into the gRPC server. From here we call ServeDNS like
// any normal server. We use a custom responseWriter to pick up the bytes we need to write
// back to the client as a protobuf.
func (s *ServergRPC) Query(ctx context.Context, in *pb.DnsPacket) (*pb.DnsPacket, error) {
msg := new(dns.Msg)
err := msg.Unpack(in.Msg)
if err != nil {
return nil, err
}
p, ok := peer.FromContext(ctx)
if !ok {
return nil, errors.New("no peer in gRPC context")
}
a, ok := p.Addr.(*net.TCPAddr)
if !ok {
return nil, fmt.Errorf("no TCP peer in gRPC context: %v", p.Addr)
}
w := &gRPCresponse{localAddr: s.listenAddr, remoteAddr: a, Msg: msg}
s.ServeDNS(ctx, w, msg)
packed, err := w.Msg.Pack()
if err != nil {
return nil, err
}
return &pb.DnsPacket{Msg: packed}, nil
}
// Shutdown stops the server (non gracefully).
func (s *ServergRPC) Shutdown() error {
if s.grpcServer != nil {
s.grpcServer.Stop()
}
return nil
}
type gRPCresponse struct {
localAddr net.Addr
remoteAddr net.Addr
Msg *dns.Msg
}
// Write is the hack that makes this work. It does not actually write the message
// but returns the bytes we need to to write in r. We can then pick this up in Query
// and write a proper protobuf back to the client.
func (r *gRPCresponse) Write(b []byte) (int, error) {
r.Msg = new(dns.Msg)
return len(b), r.Msg.Unpack(b)
}
// These methods implement the dns.ResponseWriter interface from Go DNS.
func (r *gRPCresponse) Close() error { return nil }
func (r *gRPCresponse) TsigStatus() error { return nil }
func (r *gRPCresponse) TsigTimersOnly(b bool) { return }
func (r *gRPCresponse) Hijack() { return }
func (r *gRPCresponse) LocalAddr() net.Addr { return r.localAddr }
func (r *gRPCresponse) RemoteAddr() net.Addr { return r.remoteAddr }
func (r *gRPCresponse) WriteMsg(m *dns.Msg) error { r.Msg = m; return nil }

View File

@@ -0,0 +1,146 @@
package dnsserver
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"strconv"
"github.com/miekg/dns"
)
// ServerHTTPS represents an instance of a DNS-over-HTTPS server.
type ServerHTTPS struct {
*Server
httpsServer *http.Server
listenAddr net.Addr
tlsConfig *tls.Config
}
// NewServerHTTPS returns a new CoreDNS GRPC server and compiles all plugins in to it.
func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) {
s, err := NewServer(addr, group)
if err != nil {
return nil, err
}
// The *tls* plugin must make sure that multiple conflicting
// TLS configuration return an error: it can only be specified once.
var tlsConfig *tls.Config
for _, conf := range s.zones {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
}
sh := &ServerHTTPS{Server: s, tlsConfig: tlsConfig, httpsServer: new(http.Server)}
sh.httpsServer.Handler = sh
return sh, nil
}
// Serve implements caddy.TCPServer interface.
func (s *ServerHTTPS) Serve(l net.Listener) error {
s.m.Lock()
s.listenAddr = l.Addr()
s.m.Unlock()
if s.tlsConfig != nil {
l = tls.NewListener(l, s.tlsConfig)
}
return s.httpsServer.Serve(l)
}
// ServePacket implements caddy.UDPServer interface.
func (s *ServerHTTPS) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServerHTTPS) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportHTTPS+"://"):])
if err != nil {
return nil, err
}
return l, nil
}
// ListenPacket implements caddy.UDPServer interface.
func (s *ServerHTTPS) ListenPacket() (net.PacketConn, error) { return nil, nil }
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming Quiet is false.
func (s *ServerHTTPS) OnStartupComplete() {
if Quiet {
return
}
out := startUpZones(TransportHTTPS+"://", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}
// Stop stops the server. It blocks until the server is totally stopped.
func (s *ServerHTTPS) Stop() error {
s.m.Lock()
defer s.m.Unlock()
if s.httpsServer != nil {
s.httpsServer.Shutdown(context.Background())
}
return nil
}
// ServeHTTP is the handler that gets the HTTP request and converts to the dns format, calls the plugin
// chain, converts it back and write it to the client.
func (s *ServerHTTPS) ServeHTTP(w http.ResponseWriter, r *http.Request) {
msg := new(dns.Msg)
var err error
if r.URL.Path != pathDOH {
http.Error(w, "", http.StatusNotFound)
return
}
switch r.Method {
case http.MethodPost:
msg, err = postRequestToMsg(r)
case http.MethodGet:
msg, err = getRequestToMsg(r)
default:
http.Error(w, "", http.StatusMethodNotAllowed)
return
}
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Create a DoHWriter with the correct addresses in it.
h, p, _ := net.SplitHostPort(r.RemoteAddr)
port, _ := strconv.Atoi(p)
dw := &DoHWriter{laddr: s.listenAddr, raddr: &net.TCPAddr{IP: net.ParseIP(h), Port: port}}
// We just call the normal chain handler - all error handling is done there.
// We should expect a packet to be returned that we can send to the client.
s.ServeDNS(context.Background(), dw, msg)
buf, _ := dw.Msg.Pack()
w.Header().Set("Content-Type", mimeTypeDOH)
w.Header().Set("Cache-Control", "max-age=128") // TODO(issues/1823): implement proper fix.
w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
w.WriteHeader(http.StatusOK)
w.Write(buf)
}
// Shutdown stops the server (non gracefully).
func (s *ServerHTTPS) Shutdown() error {
if s.httpsServer != nil {
s.httpsServer.Shutdown(context.Background())
}
return nil
}

View File

@@ -0,0 +1,82 @@
package dnsserver
import (
"context"
"testing"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/test"
"github.com/miekg/dns"
)
type testPlugin struct{}
func (tp testPlugin) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
return 0, nil
}
func (tp testPlugin) Name() string { return "testplugin" }
func testConfig(transport string, p plugin.Handler) *Config {
c := &Config{
Zone: "example.com.",
Transport: transport,
ListenHosts: []string{"127.0.0.1"},
Port: "53",
Debug: false,
}
c.AddPlugin(func(next plugin.Handler) plugin.Handler { return p })
return c
}
func TestNewServer(t *testing.T) {
_, err := NewServer("127.0.0.1:53", []*Config{testConfig("dns", testPlugin{})})
if err != nil {
t.Errorf("Expected no error for NewServer, got %s", err)
}
_, err = NewServergRPC("127.0.0.1:53", []*Config{testConfig("grpc", testPlugin{})})
if err != nil {
t.Errorf("Expected no error for NewServergRPC, got %s", err)
}
_, err = NewServerTLS("127.0.0.1:53", []*Config{testConfig("tls", testPlugin{})})
if err != nil {
t.Errorf("Expected no error for NewServerTLS, got %s", err)
}
}
func TestIncrementDepthAndCheck(t *testing.T) {
ctx := context.Background()
var err error
for i := 0; i <= maxreentries; i++ {
ctx, err = incrementDepthAndCheck(ctx)
if err != nil {
t.Errorf("Expected no error for depthCheck (i=%v), got %s", i, err)
}
}
_, err = incrementDepthAndCheck(ctx)
if err == nil {
t.Errorf("Expected error for depthCheck (i=%v)", maxreentries+1)
}
}
func BenchmarkCoreServeDNS(b *testing.B) {
s, err := NewServer("127.0.0.1:53", []*Config{testConfig("dns", testPlugin{})})
if err != nil {
b.Errorf("Expected no error for NewServer, got %s", err)
}
ctx := context.TODO()
w := &test.ResponseWriter{}
m := new(dns.Msg)
m.SetQuestion("aaa.example.com.", dns.TypeTXT)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
s.ServeDNS(ctx, w, m)
}
}

View File

@@ -0,0 +1,80 @@
package dnsserver
import (
"context"
"crypto/tls"
"fmt"
"net"
"github.com/miekg/dns"
)
// ServerTLS represents an instance of a TLS-over-DNS-server.
type ServerTLS struct {
*Server
tlsConfig *tls.Config
}
// NewServerTLS returns a new CoreDNS TLS server and compiles all plugin in to it.
func NewServerTLS(addr string, group []*Config) (*ServerTLS, error) {
s, err := NewServer(addr, group)
if err != nil {
return nil, err
}
// The *tls* plugin must make sure that multiple conflicting
// TLS configuration return an error: it can only be specified once.
var tlsConfig *tls.Config
for _, conf := range s.zones {
// Should we error if some configs *don't* have TLS?
tlsConfig = conf.TLSConfig
}
return &ServerTLS{Server: s, tlsConfig: tlsConfig}, nil
}
// Serve implements caddy.TCPServer interface.
func (s *ServerTLS) Serve(l net.Listener) error {
s.m.Lock()
if s.tlsConfig != nil {
l = tls.NewListener(l, s.tlsConfig)
}
// Only fill out the TCP server for this one.
s.server[tcp] = &dns.Server{Listener: l, Net: "tcp-tls", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
ctx := context.Background()
s.ServeDNS(ctx, w, r)
})}
s.m.Unlock()
return s.server[tcp].ActivateAndServe()
}
// ServePacket implements caddy.UDPServer interface.
func (s *ServerTLS) ServePacket(p net.PacketConn) error { return nil }
// Listen implements caddy.TCPServer interface.
func (s *ServerTLS) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", s.Addr[len(TransportTLS+"://"):])
if err != nil {
return nil, err
}
return l, nil
}
// ListenPacket implements caddy.UDPServer interface.
func (s *ServerTLS) ListenPacket() (net.PacketConn, error) { return nil, nil }
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming Quiet is false.
func (s *ServerTLS) OnStartupComplete() {
if Quiet {
return
}
out := startUpZones(TransportTLS+"://", s.Addr, s.zones)
if out != "" {
fmt.Print(out)
}
return
}

View File

@@ -0,0 +1,47 @@
// generated by directives_generate.go; DO NOT EDIT
package dnsserver
// Directives are registered in the order they should be
// executed.
//
// Ordering is VERY important. Every plugin will
// feel the effects of all other plugin below
// (after) them during a request, but they must not
// care what plugin above them are doing.
var Directives = []string{
"tls",
"reload",
"nsid",
"root",
"bind",
"debug",
"trace",
"health",
"pprof",
"prometheus",
"errors",
"log",
"dnstap",
"chaos",
"loadbalance",
"cache",
"rewrite",
"dnssec",
"autopath",
"reverse",
"template",
"hosts",
"route53",
"federation",
"kubernetes",
"file",
"auto",
"secondary",
"etcd",
"forward",
"proxy",
"erratic",
"whoami",
"on",
}

View File

@@ -0,0 +1,41 @@
// generated by directives_generate.go; DO NOT EDIT
package plugin
import (
// Include all plugins.
_ "github.com/coredns/coredns/plugin/auto"
_ "github.com/coredns/coredns/plugin/autopath"
_ "github.com/coredns/coredns/plugin/bind"
_ "github.com/coredns/coredns/plugin/cache"
_ "github.com/coredns/coredns/plugin/chaos"
_ "github.com/coredns/coredns/plugin/debug"
_ "github.com/coredns/coredns/plugin/deprecated"
_ "github.com/coredns/coredns/plugin/dnssec"
_ "github.com/coredns/coredns/plugin/dnstap"
_ "github.com/coredns/coredns/plugin/erratic"
_ "github.com/coredns/coredns/plugin/errors"
_ "github.com/coredns/coredns/plugin/etcd"
_ "github.com/coredns/coredns/plugin/federation"
_ "github.com/coredns/coredns/plugin/file"
_ "github.com/coredns/coredns/plugin/forward"
_ "github.com/coredns/coredns/plugin/health"
_ "github.com/coredns/coredns/plugin/hosts"
_ "github.com/coredns/coredns/plugin/kubernetes"
_ "github.com/coredns/coredns/plugin/loadbalance"
_ "github.com/coredns/coredns/plugin/log"
_ "github.com/coredns/coredns/plugin/metrics"
_ "github.com/coredns/coredns/plugin/nsid"
_ "github.com/coredns/coredns/plugin/pprof"
_ "github.com/coredns/coredns/plugin/proxy"
_ "github.com/coredns/coredns/plugin/reload"
_ "github.com/coredns/coredns/plugin/rewrite"
_ "github.com/coredns/coredns/plugin/root"
_ "github.com/coredns/coredns/plugin/route53"
_ "github.com/coredns/coredns/plugin/secondary"
_ "github.com/coredns/coredns/plugin/template"
_ "github.com/coredns/coredns/plugin/tls"
_ "github.com/coredns/coredns/plugin/trace"
_ "github.com/coredns/coredns/plugin/whoami"
_ "github.com/mholt/caddy/onevent"
)

54
vendor/github.com/coredns/coredns/coredns.1.md generated vendored Normal file
View File

@@ -0,0 +1,54 @@
## Name
*coredns* - plugable DNS nameserver optimized for service discovery and flexibility.
## Synopsis
*coredns* *[OPTION]*...
## Description
CoreDNS is a DNS server that chains plugins. Each plugin handles a DNS feature, like rewriting
queries, kubernetes service discovery or just exporting metrics. There are many other plugins,
each described on <https://coredns.io/plugins> and there respective manual pages. Plugins not
bundled in CoreDNS are listed on <https://coredns.io/explugins>.
When started with no options CoreDNS will looks for a file names `Corefile` in the current
directory, if found it will parse its contents and start up accordingly. If no `Corefile` is found
it will start with the *whoami* plugin (coredns-whoami(7)) and start listening on port 53 (unless
overriden with `-dns.port`).
Available options:
**-conf** **FILE**
: specificy Corefile to load.
**-cpu** **CAP**
: specify maximum CPU capacity in percent.
**-dns.port** **PORT**
: override default port (53) to listen on.
**-pidfile** **FILE**
: write PID to **FILE**.
**-plugins**
: list all plugins and quit.
**-quiet**
: don't print any version and port information on startup.
**-version**
: show version and quit.
## Authors
CoreDNS Authors.
## Copyright
Apache License 2.0
## See Also
Corefile(5) @@PLUGINS@@.

14
vendor/github.com/coredns/coredns/coredns.go generated vendored Normal file
View File

@@ -0,0 +1,14 @@
package main
//go:generate go run directives_generate.go
import (
"github.com/coredns/coredns/coremain"
// Plug in CoreDNS
_ "github.com/coredns/coredns/core/plugin"
)
func main() {
coremain.Run()
}

115
vendor/github.com/coredns/coredns/corefile.5.md generated vendored Normal file
View File

@@ -0,0 +1,115 @@
## Name
*corefile* - configuration file for CoreDNS
## Description
A *corefile* specifies the (internal) servers CoreDNS should run and what plugins each of these
should chain. The syntax is as follows:
~~~ txt
[SCHEME://]ZONE [[SCHEME://]ZONE]...[:PORT] {
[PLUGIN]...
}
~~~
The **ZONE** defines for which name this server should be called, multiple zones are allowed and
should be *white space* separated. You can use a "reverse" syntax to specify a reverse zone (i.e.
ip6.arpa and in-addr.arpa), but using an IP address in the CIDR notation. The optional **SCHEME**
defaults to `dns://`, but can also be `tls://` (DNS over TLS) or `grpc://` (DNS over gRPC).
Specifying a **ZONE** *and* **PORT** combination multiple time for *different* servers will lead to
an error on startup.
When a query comes in it is matched again all zones for all servers, the server with the longest
match on the query name will receive the query.
The optional **PORT** controls on which port the server will bind, this default to 53. If you use
a port number here, you *can't* override it with `-dns.port` (coredns(1)).
**PLUGIN** defines the plugin(s) we want to load into this server. This is optional as well, but as
server with no plugins will just return SERVFAIL for all queries. Each plugin can have a number of
properties than can have arguments, see documentation for each plugin.
Comments begin with an unquoted hash `#` and continue to the end of the line. Comments may be
started anywhere on a line.
Enviroment variables are supported and either the Unix or Windows form may be used: `{$ENV_VAR_1}`
or `{%ENV_VAR_2%}`.
You can use the `import` "plugin" to include parts of other files, see <https://coredns.io/explugins/import>.
If CoreDNS cant find a Corefile to load it loads the following builtin one:
~~~ Corefile
. {
whoami
}
~~~
## Examples
The **ZONE** is root zone `.`, the **PLUGIN** is chaos. The chaos plugin takes an argument:
`CoreDNS-001`. This text is returned on a CH class query: `dig CH txt version.bind @localhost`.
~~~ Corefile
. {
chaos CoreDNS-001
}
~~~
When defining a new zone, you either create a new server, or add it to an existing one. Here we
define one server that handles two zones; that potentially chain different plugins:
~~~ Corefile
example.org {
whoami
}
org {
whoami
}
~~~
Is identical to:
~~~ Corefile
example.org org {
whoami
}
~~~
Reverse zones can be specified as domain names:
~~~ Corefile
0.0.10.in-addr.arpa {
whoami
}
~~~
or by just using the CIDR notation:
~~~ Corefile
10.0.0.0/24 {
whoami
}
~~~
This also works on a non octet boundary:
~~~ Corefile
10.0.0.0/27 {
whoami
}
~~~
## Authors
CoreDNS Authors.
## Copyright
Apache License 2.0
## See Also
The manual page for CoreDNS: coredns(1) and more documentation on <https://coredns.io>.

271
vendor/github.com/coredns/coredns/coremain/run.go generated vendored Normal file
View File

@@ -0,0 +1,271 @@
// Package coremain contains the functions for starting CoreDNS.
package coremain
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"runtime"
"strconv"
"strings"
"github.com/coredns/coredns/core/dnsserver"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/mholt/caddy"
)
func init() {
caddy.DefaultConfigFile = "Corefile"
caddy.Quiet = true // don't show init stuff from caddy
setVersion()
flag.StringVar(&conf, "conf", "", "Corefile to load (default \""+caddy.DefaultConfigFile+"\")")
flag.StringVar(&cpu, "cpu", "100%", "CPU cap")
flag.BoolVar(&plugins, "plugins", false, "List installed plugins")
flag.StringVar(&caddy.PidFile, "pidfile", "", "Path to write pid file")
flag.BoolVar(&version, "version", false, "Show version")
flag.BoolVar(&dnsserver.Quiet, "quiet", false, "Quiet mode (no initialization output)")
flag.BoolVar(&logfile, "log", false, "Log to standard output") // noop for 1.1.4; drop in 1.2.0.
caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader))
caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader))
caddy.AppName = coreName
caddy.AppVersion = CoreVersion
}
// Run is CoreDNS's main() function.
func Run() {
caddy.TrapSignals()
// Reset flag.CommandLine to get rid of unwanted flags for instance from glog (used in kubernetes).
// And readd the once we want to keep.
flag.VisitAll(func(f *flag.Flag) {
if _, ok := flagsBlacklist[f.Name]; ok {
return
}
flagsToKeep = append(flagsToKeep, f)
})
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
for _, f := range flagsToKeep {
flag.Var(f.Value, f.Name, f.Usage)
}
flag.Parse()
if len(flag.Args()) > 0 {
mustLogFatal(fmt.Errorf("extra command line arguments: %s", flag.Args()))
}
log.SetOutput(os.Stdout)
log.SetFlags(log.LstdFlags)
if version {
showVersion()
os.Exit(0)
}
if plugins {
fmt.Println(caddy.DescribePlugins())
os.Exit(0)
}
// Set CPU cap
if err := setCPU(cpu); err != nil {
mustLogFatal(err)
}
// Get Corefile input
corefile, err := caddy.LoadCaddyfile(serverType)
if err != nil {
mustLogFatal(err)
}
// Start your engines
instance, err := caddy.Start(corefile)
if err != nil {
mustLogFatal(err)
}
logVersion()
if !dnsserver.Quiet {
showVersion()
}
// Execute instantiation events
caddy.EmitEvent(caddy.InstanceStartupEvent, instance)
// Twiddle your thumbs
instance.Wait()
}
// mustLogFatal wraps log.Fatal() in a way that ensures the
// output is always printed to stderr so the user can see it
// if the user is still there, even if the process log was not
// enabled. If this process is an upgrade, however, and the user
// might not be there anymore, this just logs to the process
// log and exits.
func mustLogFatal(args ...interface{}) {
if !caddy.IsUpgrade() {
log.SetOutput(os.Stderr)
}
log.Fatal(args...)
}
// confLoader loads the Caddyfile using the -conf flag.
func confLoader(serverType string) (caddy.Input, error) {
if conf == "" {
return nil, nil
}
if conf == "stdin" {
return caddy.CaddyfileFromPipe(os.Stdin, serverType)
}
contents, err := ioutil.ReadFile(conf)
if err != nil {
return nil, err
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: conf,
ServerTypeName: serverType,
}, nil
}
// defaultLoader loads the Corefile from the current working directory.
func defaultLoader(serverType string) (caddy.Input, error) {
contents, err := ioutil.ReadFile(caddy.DefaultConfigFile)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
return caddy.CaddyfileInput{
Contents: contents,
Filepath: caddy.DefaultConfigFile,
ServerTypeName: serverType,
}, nil
}
// logVersion logs the version that is starting.
func logVersion() {
clog.Info(versionString())
clog.Info(releaseString())
}
// showVersion prints the version that is starting.
func showVersion() {
fmt.Print(versionString())
fmt.Print(releaseString())
if devBuild && gitShortStat != "" {
fmt.Printf("%s\n%s\n", gitShortStat, gitFilesModified)
}
}
// versionString returns the CoreDNS version as a string.
func versionString() string {
return fmt.Sprintf("%s-%s\n", caddy.AppName, caddy.AppVersion)
}
// releaseString returns the release information related to CoreDNS version:
// <OS>/<ARCH>, <go version>, <commit>
// e.g.,
// linux/amd64, go1.8.3, a6d2d7b5
func releaseString() string {
return fmt.Sprintf("%s/%s, %s, %s\n", runtime.GOOS, runtime.GOARCH, runtime.Version(), GitCommit)
}
// setVersion figures out the version information
// based on variables set by -ldflags.
func setVersion() {
// A development build is one that's not at a tag or has uncommitted changes
devBuild = gitTag == "" || gitShortStat != ""
// Only set the appVersion if -ldflags was used
if gitNearestTag != "" || gitTag != "" {
if devBuild && gitNearestTag != "" {
appVersion = fmt.Sprintf("%s (+%s %s)",
strings.TrimPrefix(gitNearestTag, "v"), GitCommit, buildDate)
} else if gitTag != "" {
appVersion = strings.TrimPrefix(gitTag, "v")
}
}
}
// setCPU parses string cpu and sets GOMAXPROCS
// according to its value. It accepts either
// a number (e.g. 3) or a percent (e.g. 50%).
func setCPU(cpu string) error {
var numCPU int
availCPU := runtime.NumCPU()
if strings.HasSuffix(cpu, "%") {
// Percent
var percent float32
pctStr := cpu[:len(cpu)-1]
pctInt, err := strconv.Atoi(pctStr)
if err != nil || pctInt < 1 || pctInt > 100 {
return errors.New("invalid CPU value: percentage must be between 1-100")
}
percent = float32(pctInt) / 100
numCPU = int(float32(availCPU) * percent)
} else {
// Number
num, err := strconv.Atoi(cpu)
if err != nil || num < 1 {
return errors.New("invalid CPU value: provide a number or percent greater than 0")
}
numCPU = num
}
if numCPU > availCPU {
numCPU = availCPU
}
runtime.GOMAXPROCS(numCPU)
return nil
}
// Flags that control program flow or startup
var (
conf string
cpu string
logfile bool
version bool
plugins bool
)
// Build information obtained with the help of -ldflags
var (
appVersion = "(untracked dev build)" // inferred at startup
devBuild = true // inferred at startup
buildDate string // date -u
gitTag string // git describe --exact-match HEAD 2> /dev/null
gitNearestTag string // git describe --abbrev=0 --tags HEAD
gitShortStat string // git diff-index --shortstat
gitFilesModified string // git diff-index --name-only HEAD
// Gitcommit contains the commit where we built CoreDNS from.
GitCommit string
)
// flagsBlacklist removes flags with these names from our flagset.
var flagsBlacklist = map[string]bool{
"logtostderr": true,
"alsologtostderr": true,
"v": true,
"stderrthreshold": true,
"vmodule": true,
"log_backtrace_at": true,
"log_dir": true,
}
var flagsToKeep []*flag.Flag

44
vendor/github.com/coredns/coredns/coremain/run_test.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
package coremain
import (
"runtime"
"testing"
)
func TestSetCPU(t *testing.T) {
currentCPU := runtime.GOMAXPROCS(-1)
maxCPU := runtime.NumCPU()
halfCPU := int(0.5 * float32(maxCPU))
if halfCPU < 1 {
halfCPU = 1
}
for i, test := range []struct {
input string
output int
shouldErr bool
}{
{"1", 1, false},
{"-1", currentCPU, true},
{"0", currentCPU, true},
{"100%", maxCPU, false},
{"50%", halfCPU, false},
{"110%", currentCPU, true},
{"-10%", currentCPU, true},
{"invalid input", currentCPU, true},
{"invalid input%", currentCPU, true},
{"9999", maxCPU, false}, // over available CPU
} {
err := setCPU(test.input)
if test.shouldErr && err == nil {
t.Errorf("Test %d: Expected error, but there wasn't any", i)
}
if !test.shouldErr && err != nil {
t.Errorf("Test %d: Expected no error, but there was one: %v", i, err)
}
if actual, expected := runtime.GOMAXPROCS(-1), test.output; actual != expected {
t.Errorf("Test %d: GOMAXPROCS was %d but expected %d", i, actual, expected)
}
// teardown
runtime.GOMAXPROCS(currentCPU)
}
}

View File

@@ -0,0 +1,8 @@
package coremain
// Various CoreDNS constants.
const (
CoreVersion = "1.1.4"
coreName = "CoreDNS"
serverType = "dns"
)

View File

@@ -0,0 +1,115 @@
//+build ignore
package main
import (
"bufio"
"go/format"
"io/ioutil"
"log"
"os"
"strings"
)
func main() {
mi := make(map[string]string, 0)
md := []string{}
file, err := os.Open(pluginFile)
if err != nil {
log.Fatalf("Failed to open %s: %q", pluginFile, err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "#") {
continue
}
items := strings.Split(line, ":")
if len(items) != 2 {
// ignore empty lines
continue
}
name, repo := items[0], items[1]
if _, ok := mi[name]; ok {
log.Fatalf("Duplicate entry %q", name)
}
md = append(md, name)
mi[name] = pluginPath + repo // Default, unless overridden by 3rd arg
if _, err := os.Stat(pluginFSPath + repo); err != nil { // External package has been given
mi[name] = repo
}
}
genImports("core/plugin/zplugin.go", "plugin", mi)
genDirectives("core/dnsserver/zdirectives.go", "dnsserver", md)
}
func genImports(file, pack string, mi map[string]string) {
outs := header + "package " + pack + "\n\n" + "import ("
if len(mi) > 0 {
outs += "\n"
}
outs += "// Include all plugins.\n"
for _, v := range mi {
outs += `_ "` + v + `"` + "\n"
}
outs += ")\n"
if err := formatAndWrite(file, outs); err != nil {
log.Fatalf("Failed to format and write: %q", err)
}
}
func genDirectives(file, pack string, md []string) {
outs := header + "package " + pack + "\n\n"
outs += `
// Directives are registered in the order they should be
// executed.
//
// Ordering is VERY important. Every plugin will
// feel the effects of all other plugin below
// (after) them during a request, but they must not
// care what plugin above them are doing.
var Directives = []string{
`
for i := range md {
outs += `"` + md[i] + `",` + "\n"
}
outs += "}\n"
if err := formatAndWrite(file, outs); err != nil {
log.Fatalf("Failed to format and write: %q", err)
}
}
func formatAndWrite(file string, data string) error {
res, err := format.Source([]byte(data))
if err != nil {
return err
}
if err = ioutil.WriteFile(file, res, 0644); err != nil {
return err
}
return nil
}
const (
pluginPath = "github.com/coredns/coredns/plugin/"
pluginFile = "plugin.cfg"
pluginFSPath = "plugin/" // Where the plugins are located on the file system
header = "// generated by directives_generate.go; DO NOT EDIT\n\n"
)

93
vendor/github.com/coredns/coredns/man/coredns-auto.7 generated vendored Normal file
View File

@@ -0,0 +1,93 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-AUTO" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIauto\fR \- enables serving zone data from an RFC 1035\-style master file, which is automatically picked up from disk\.
.
.SH "DESCRIPTION"
The \fIauto\fR plugin is used for an "old\-style" DNS server\. It serves from a preloaded file that exists on disk\. If the zone file contains signatures (i\.e\. is signed, i\.e\. using DNSSEC) correct DNSSEC answers are returned\. Only NSEC is supported! If you use this setup \fIyou\fR are responsible for re\-signing the zonefile\. New or changed zones are automatically picked up from disk\.
.
.SH "SYNTAX"
.
.nf
auto [ZONES\.\.\.] {
directory DIR [REGEXP ORIGIN_TEMPLATE [TIMEOUT]]
no_reload
upstream [ADDRESS\.\.\.]
}
.
.fi
.
.P
\fBZONES\fR zones it should be authoritative for\. If empty, the zones from the configuration block are used\.
.
.IP "\(bu" 4
\fBdirectory\fR loads zones from the speficied \fBDIR\fR\. If a file name matches \fBREGEXP\fR it will be used to extract the origin\. \fBORIGIN_TEMPLATE\fR will be used as a template for the origin\. Strings like \fB{<number>}\fR are replaced with the respective matches in the file name, e\.g\. \fB{1}\fR is the first match, \fB{2}\fR is the second\. The default is: \fBdb\e\.(\.*) {1}\fR i\.e\. from a file with the name \fBdb\.example\.com\fR, the extracted origin will be \fBexample\.com\fR\. \fBTIMEOUT\fR specifies how often CoreDNS should scan the directory; the default is every 60 seconds\. This value is in seconds\. The minimum value is 1 second\.
.
.IP "\(bu" 4
\fBno_reload\fR by default CoreDNS will try to reload a zone every minute and reloads if the SOA\'s serial has changed\. This option disables that behavior\.
.
.IP "\(bu" 4
\fBupstream\fR defines upstream resolvers to be used resolve external names found (think CNAMEs) pointing to external names\. \fBADDRESS\fR can be an IP address, an IP:port or a string pointing to a file that is structured as /etc/resolv\.conf\. If no \fBADDRESS\fR is given, CoreDNS will resolve CNAMEs against itself\.
.
.IP "" 0
.
.P
All directives from the \fIfile\fR plugin are supported\. Note that \fIauto\fR will load all zones found, even though the directive might only receive queries for a specific zone\. I\.e:
.
.IP "" 4
.
.nf
\&\. {
auto example\.org {
directory /etc/coredns/zones
}
}
.
.fi
.
.IP "" 0
.
.P
Will happily pick up a zone for \fBexample\.COM\fR, except it will never be queried, because the \fIauto\fR directive only is authoritative for \fBexample\.ORG\fR\.
.
.SH "EXAMPLES"
Load \fBorg\fR domains from \fB/etc/coredns/zones/org\fR and allow transfers to the internet, but send notifies to 10\.240\.1\.1
.
.IP "" 4
.
.nf
\&\. {
auto org {
directory /etc/coredns/zones/org
transfer to *
transfer to 10\.240\.1\.1
}
}
.
.fi
.
.IP "" 0
.
.P
Load \fBorg\fR domains from \fB/etc/coredns/zones/org\fR and looks for file names as \fBwww\.db\.example\.org\fR, where \fBexample\.org\fR is the origin\. Scan every 45 seconds\.
.
.IP "" 4
.
.nf
org {
auto {
directory /etc/coredns/zones/org www\e\.db\e\.(\.*) {1} 45
}
}
.
.fi
.
.IP "" 0

View File

@@ -0,0 +1,64 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-AUTOPATH" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIautopath\fR \- allows for server\-side search path completion\.
.
.SH "DESCRIPTION"
If it sees a query that matches the first element of the configured search path, \fIautopath\fR will follow the chain of search path elements and return the first reply that is not NXDOMAIN\. On any failures, the original reply is returned\. Because \fIautopath\fR returns a reply for a name that wasn\'t the original question it will add a CNAME that points from the original name (with the search path element in it) to the name of this answer\.
.
.SH "SYNTAX"
.
.nf
autopath [ZONE\.\.\.] RESOLV\-CONF
.
.fi
.
.IP "\(bu" 4
\fBZONES\fR zones \fIautopath\fR should be authoritative for\.
.
.IP "\(bu" 4
\fBRESOLV\-CONF\fR points to a \fBresolv\.conf\fR like file or uses a special syntax to point to another plugin\. For instance \fB@kubernetes\fR, will call out to the kubernetes plugin (for each query) to retrieve the search list it should use\.
.
.IP "" 0
.
.P
If a plugin implements the \fBAutoPather\fR interface then it can be used\.
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metric is exported:
.
.IP "\(bu" 4
\fBcoredns_autopath_success_count_total{server}\fR \- counter of successfully autopath\-ed queries\.
.
.IP "" 0
.
.P
The \fBserver\fR label is explained in the \fImetrics\fR plugin documentation\.
.
.SH "EXAMPLES"
.
.nf
autopath my\-resolv\.conf
.
.fi
.
.P
Use \fBmy\-resolv\.conf\fR as the file to get the search path from\. This file only needs so have one line: \fBsearch domain1 domain2 \.\.\.\fR
.
.IP "" 4
.
.nf
autopath @kubernetes
.
.fi
.
.IP "" 0
.
.P
Use the search path dynamically retrieved from the \fIkubernetes\fR plugin\.

74
vendor/github.com/coredns/coredns/man/coredns-bind.7 generated vendored Normal file
View File

@@ -0,0 +1,74 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-BIND" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIbind\fR \- overrides the host to which the server should bind\.
.
.SH "DESCRIPTION"
Normally, the listener binds to the wildcard host\. However, you may want the listener to bind to another IP instead\.
.
.P
If several addresses are provided, a listener will be open on each of the IP provided\.
.
.P
Each address has to be an IP of one of the interfaces of the host\.
.
.SH "SYNTAX"
.
.nf
bind ADDRESS \.\.\.
.
.fi
.
.P
\fBADDRESS\fR is an IP address to bind to\. When several addresses are provided a listener will be opened on each of the addresses\.
.
.SH "EXAMPLES"
To make your socket accessible only to that machine, bind to IP 127\.0\.0\.1 (localhost):
.
.IP "" 4
.
.nf
\&\. {
bind 127\.0\.0\.1
}
.
.fi
.
.IP "" 0
.
.P
To allow processing DNS requests only local host on both IPv4 and IPv6 stacks, use the syntax:
.
.IP "" 4
.
.nf
\&\. {
bind 127\.0\.0\.1 ::1
}
.
.fi
.
.IP "" 0
.
.P
If the configuration comes up with several \fIbind\fR directives, all addresses are consolidated together: The following sample is equivalent to the preceding:
.
.IP "" 4
.
.nf
\&\. {
bind 127\.0\.0\.1
bind ::1
}
.
.fi
.
.IP "" 0

122
vendor/github.com/coredns/coredns/man/coredns-cache.7 generated vendored Normal file
View File

@@ -0,0 +1,122 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-CACHE" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIcache\fR \- enables a frontend cache\.
.
.SH "DESCRIPTION"
With \fIcache\fR enabled, all records except zone transfers and metadata records will be cached for up to 3600s\. Caching is mostly useful in a scenario when fetching data from the backend (upstream, database, etc\.) is expensive\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
cache [TTL] [ZONES\.\.\.]
.
.fi
.
.IP "\(bu" 4
\fBTTL\fR max TTL in seconds\. If not specified, the maximum TTL will be used, which is 3600 for noerror responses and 1800 for denial of existence ones\. Setting a TTL of 300: \fBcache 300\fR would cache records up to 300 seconds\.
.
.IP "\(bu" 4
\fBZONES\fR zones it should cache for\. If empty, the zones from the configuration block are used\.
.
.IP "" 0
.
.P
Each element in the cache is cached according to its TTL (with \fBTTL\fR as the max)\. For the negative cache, the SOA\'s MinTTL value is used\. A TTL of zero is not allowed\. A cache is divided into 256 shards, each holding up to 512 items by default \- for a total size of 256 * 512 = 131,072 items\.
.
.P
If you want more control:
.
.IP "" 4
.
.nf
cache [TTL] [ZONES\.\.\.] {
success CAPACITY [TTL]
denial CAPACITY [TTL]
prefetch AMOUNT [[DURATION] [PERCENTAGE%]]
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBTTL\fR and \fBZONES\fR as above\.
.
.IP "\(bu" 4
\fBsuccess\fR, override the settings for caching successful responses\. \fBCAPACITY\fR indicates the maximum number of packets we cache before we start evicting (\fIrandomly\fR)\. \fBTTL\fR overrides the cache maximum TTL\.
.
.IP "\(bu" 4
\fBdenial\fR, override the settings for caching denial of existence responses\. \fBCAPACITY\fR indicates the maximum number of packets we cache before we start evicting (LRU)\. \fBTTL\fR overrides the cache maximum TTL\. There is a third category (\fBerror\fR) but those responses are never cached\.
.
.IP "\(bu" 4
\fBprefetch\fR will prefetch popular items when they are about to be expunged from the cache\. Popular means \fBAMOUNT\fR queries have been seen with no gaps of \fBDURATION\fR or more between them\. \fBDURATION\fR defaults to 1m\. Prefetching will happen when the TTL drops below \fBPERCENTAGE\fR, which defaults to \fB10%\fR, or latest 1 second before TTL expiration\. Values should be in the range \fB[10%, 90%]\fR\. Note the percent sign is mandatory\. \fBPERCENTAGE\fR is treated as an \fBint\fR\.
.
.IP "" 0
.
.SH "CAPACITY AND EVICTION"
When specifying \fBCAPACITY\fR, the minimum cache capacity is 131,072\. Specifying a lower value will be ignored\. Specifying a \fBCAPACITY\fR of zero does not disable the cache\.
.
.P
Eviction is done per shard \- i\.e\. when a shard reaches capacity, items are evicted from that shard\. Since shards don\'t fill up perfectly evenly, evictions will occur before the entire cache reaches full capacity\. Each shard capacity is equal to the total cache size / number of shards (256)\.
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metrics are exported:
.
.IP "\(bu" 4
\fBcoredns_cache_size{server, type}\fR \- Total elements in the cache by cache type\.
.
.IP "\(bu" 4
\fBcoredns_cache_hits_total{server, type}\fR \- Counter of cache hits by cache type\.
.
.IP "\(bu" 4
\fBcoredns_cache_misses_total{server}\fR \- Counter of cache misses\.
.
.IP "\(bu" 4
\fBcoredns_cache_drops_total{server}\fR \- Counter of dropped messages\.
.
.IP "" 0
.
.P
Cache types are either "denial" or "success"\. \fBServer\fR is the server handling the request, see the metrics plugin for documentation\.
.
.SH "EXAMPLES"
Enable caching for all zones, but cap everything to a TTL of 10 seconds:
.
.IP "" 4
.
.nf
\&\. {
cache 10
whoami
}
.
.fi
.
.IP "" 0
.
.P
Proxy to Google Public DNS and only cache responses for example\.org (or below)\.
.
.IP "" 4
.
.nf
\&\. {
proxy \. 8\.8\.8\.8:53
cache example\.org
}
.
.fi
.
.IP "" 0

77
vendor/github.com/coredns/coredns/man/coredns-chaos.7 generated vendored Normal file
View File

@@ -0,0 +1,77 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-CHAOS" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIchaos\fR \- allows for responding to TXT queries in the CH class\.
.
.SH "DESCRIPTION"
This is useful for retrieving version or author information from the server by querying a TXT record for a special domainname in the CH class\.
.
.SH "SYNTAX"
.
.nf
chaos [VERSION] [AUTHORS\.\.\.]
.
.fi
.
.IP "\(bu" 4
\fBVERSION\fR is the version to return\. Defaults to \fBCoreDNS\-<version>\fR, if not set\.
.
.IP "\(bu" 4
\fBAUTHORS\fR is what authors to return\. No default\.
.
.IP "" 0
.
.P
Note that you have to make sure that this plugin will get actual queries for the following zones: \fBversion\.bind\fR, \fBversion\.server\fR, \fBauthors\.bind\fR, \fBhostname\.bind\fR and \fBid\.server\fR\.
.
.SH "EXAMPLES"
Specify all the zones in full\.
.
.IP "" 4
.
.nf
version\.bind version\.server authors\.bind hostname\.bind id\.server {
chaos CoreDNS\-001 info@coredns\.io
}
.
.fi
.
.IP "" 0
.
.P
Or just default to \fB\.\fR:
.
.IP "" 4
.
.nf
\&\. {
chaos CoreDNS\-001 info@coredns\.io
}
.
.fi
.
.IP "" 0
.
.P
And test with \fBdig\fR:
.
.IP "" 4
.
.nf
% dig @localhost CH TXT version\.bind
\.\.\.
;; ANSWER SECTION:
version\.bind\. 0 CH TXT "CoreDNS\-001"
\.\.\.
.
.fi
.
.IP "" 0

37
vendor/github.com/coredns/coredns/man/coredns-debug.7 generated vendored Normal file
View File

@@ -0,0 +1,37 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-DEBUG" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIdebug\fR \- disables the automatic recovery upon a crash so that you\'ll get a nice stack trace\.
.
.SH "DESCRIPTION"
Normally CoreDNS will recover from panics, using \fIdebug\fR inhibits this\. The main use of \fIdebug\fR is to help testing\. A side effect of using \fIdebug\fR is that \fBlog\.Debug\fR and \fBlog\.Debugf\fR will be printed to standard output\.
.
.P
Note that the \fIerrors\fR plugin (if loaded) will also set a \fBrecover\fR negating this setting\.
.
.SH "SYNTAX"
.
.nf
debug
.
.fi
.
.SH "EXAMPLES"
Disable the ability to recover from crashes and show debug logging:
.
.IP "" 4
.
.nf
\&\. {
debug
}
.
.fi
.
.IP "" 0

107
vendor/github.com/coredns/coredns/man/coredns-dnssec.7 generated vendored Normal file
View File

@@ -0,0 +1,107 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-DNSSEC" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIdnssec\fR \- enable on\-the\-fly DNSSEC signing of served data\.
.
.SH "DESCRIPTION"
With \fIdnssec\fR any reply that doesn\'t (or can\'t) do DNSSEC will get signed on the fly\. Authenticated denial of existence is implemented with NSEC black lies\. Using ECDSA as an algorithm is preferred as this leads to smaller signatures (compared to RSA)\. NSEC3 is \fInot\fR supported\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
dnssec [ZONES\.\.\. ] {
key file KEY\.\.\.
cache_capacity CAPACITY
}
.
.fi
.
.P
The specified key is used for all signing operations\. The DNSSEC signing will treat this key as a CSK (common signing key), forgoing the ZSK/KSK split\. All signing operations are done online\. Authenticated denial of existence is implemented with NSEC black lies\. Using ECDSA as an algorithm is preferred as this leads to smaller signatures (compared to RSA)\. NSEC3 is \fInot\fR supported\.
.
.P
If multiple \fIdnssec\fR plugins are specified in the same zone, the last one specified will be used (See \fIbugs\fR)\.
.
.IP "\(bu" 4
\fBZONES\fR zones that should be signed\. If empty, the zones from the configuration block are used\.
.
.IP "\(bu" 4
\fBkey file\fR indicates that \fBKEY\fR file(s) should be read from disk\. When multiple keys are specified, RRsets will be signed with all keys\. Generating a key can be done with \fBdnssec\-keygen\fR: \fBdnssec\-keygen \-a ECDSAP256SHA256 <zonename>\fR\. A key created for zone \fIA\fR can be safely used for zone \fIB\fR\. The name of the key file can be specified in one of the following formats
.
.IP "\(bu" 4
basename of the generated key \fBKexample\.org+013+45330\fR
.
.IP "\(bu" 4
generated public key \fBKexample\.org+013+45330\.key\fR
.
.IP "\(bu" 4
generated private key \fBKexample\.org+013+45330\.private\fR
.
.IP "" 0
.
.IP "\(bu" 4
\fBcache_capacity\fR indicates the capacity of the cache\. The dnssec plugin uses a cache to store RRSIGs\. The default for \fBCAPACITY\fR is 10000\.
.
.IP "" 0
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metrics are exported:
.
.IP "\(bu" 4
\fBcoredns_dnssec_cache_size{server, type}\fR \- total elements in the cache, type is "signature"\.
.
.IP "\(bu" 4
\fBcoredns_dnssec_cache_hits_total{server}\fR \- Counter of cache hits\.
.
.IP "\(bu" 4
\fBcoredns_dnssec_cache_misses_total{server}\fR \- Counter of cache misses\.
.
.IP "" 0
.
.P
The label \fBserver\fR indicated the server handling the request, see the \fImetrics\fR plugin for details\.
.
.SH "EXAMPLES"
Sign responses for \fBexample\.org\fR with the key "Kexample\.org\.+013+45330\.key"\.
.
.IP "" 4
.
.nf
example\.org {
dnssec {
key file Kexample\.org\.+013+45330
}
whoami
}
.
.fi
.
.IP "" 0
.
.P
Sign responses for a kubernetes zone with the key "Kcluster\.local+013+45129\.key"\.
.
.IP "" 4
.
.nf
cluster\.local {
kubernetes
dnssec {
key file Kcluster\.local+013+45129
}
}
.
.fi
.
.IP "" 0

139
vendor/github.com/coredns/coredns/man/coredns-dnstap.7 generated vendored Normal file
View File

@@ -0,0 +1,139 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-DNSTAP" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIdnstap\fR \- enable logging to dnstap
.
.SH "DESCRIPTION"
dnstap is a flexible, structured binary log format for DNS software: http://dnstap\.info\. With this plugin you make CoreDNS output dnstap logging\.
.
.P
Note that there is an internal buffer, so expect at least 13 requests before the server sends its dnstap messages to the socket\.
.
.SH "SYNTAX"
.
.nf
dnstap SOCKET [full]
.
.fi
.
.IP "\(bu" 4
\fBSOCKET\fR is the socket path supplied to the dnstap command line tool\.
.
.IP "\(bu" 4
\fBfull\fR to include the wire\-format DNS message\.
.
.IP "" 0
.
.SH "EXAMPLES"
Log information about client requests and responses to \fI/tmp/dnstap\.sock\fR\.
.
.IP "" 4
.
.nf
dnstap /tmp/dnstap\.sock
.
.fi
.
.IP "" 0
.
.P
Log information including the wire\-format DNS message about client requests and responses to \fI/tmp/dnstap\.sock\fR\.
.
.IP "" 4
.
.nf
dnstap unix:///tmp/dnstap\.sock full
.
.fi
.
.IP "" 0
.
.P
Log to a remote endpoint\.
.
.IP "" 4
.
.nf
dnstap tcp://127\.0\.0\.1:6000 full
.
.fi
.
.IP "" 0
.
.SH "COMMAND LINE TOOL"
Dnstap has a command line tool that can be used to inspect the logging\. The tool can be found at Github: \fIhttps://github\.com/dnstap/golang\-dnstap\fR\. It\'s written in Go\.
.
.P
The following command listens on the given socket and decodes messages to stdout\.
.
.IP "" 4
.
.nf
$ dnstap \-u /tmp/dnstap\.sock
.
.fi
.
.IP "" 0
.
.P
The following command listens on the given socket and saves message payloads to a binary dnstap\-format log file\.
.
.IP "" 4
.
.nf
$ dnstap \-u /tmp/dnstap\.sock \-w /tmp/test\.dnstap
.
.fi
.
.IP "" 0
.
.P
Listen for dnstap messages on port 6000\.
.
.IP "" 4
.
.nf
$ dnstap \-l 127\.0\.0\.1:6000
.
.fi
.
.IP "" 0
.
.SH "USING DNSTAP IN YOUR PLUGIN"
.
.nf
import (
"github\.com/coredns/coredns/plugin/dnstap"
"github\.com/coredns/coredns/plugin/dnstap/msg"
)
func (h Dnstap) ServeDNS(ctx context\.Context, w dns\.ResponseWriter, r *dns\.Msg) (int, error) {
// log client query to Dnstap
if t := dnstap\.TapperFromContext(ctx); t != nil {
b := msg\.New()\.Time(time\.Now())\.Addr(w\.RemoteAddr())
if t\.Pack() {
b\.Msg(r)
}
if m, err := b\.ToClientQuery(); err == nil {
t\.TapMessage(m)
}
}
// \.\.\.
}
.
.fi
.
.SH "SEE ALSO"
dnstap\.info \fIhttp://dnstap\.info\fR\.

122
vendor/github.com/coredns/coredns/man/coredns-erratic.7 generated vendored Normal file
View File

@@ -0,0 +1,122 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-ERRATIC" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIerratic\fR \- a plugin useful for testing client behavior\.
.
.SH "DESCRIPTION"
\fIerratic\fR returns a static response to all queries, but the responses can be delayed, dropped or truncated\. The \fIerratic\fR plugin will respond to every A or AAAA query\. For any other type it will return a SERVFAIL response\. The reply for A will return 192\.0\.2\.53 (see RFC 5737 \fIhttps://tools\.ietf\.org/html/rfc5737\fR, for AAAA it returns 2001:DB8::53 (see RFC 3849 \fIhttps://tools\.ietf\.org/html/rfc3849\fR)\.
.
.P
\fIerratic\fR can also be used in conjunction with the \fIautopath\fR plugin\. This is mostly to aid in testing\.
.
.SH "SYNTAX"
.
.nf
erratic {
drop [AMOUNT]
truncate [AMOUNT]
delay [AMOUNT [DURATION]]
}
.
.fi
.
.IP "\(bu" 4
\fBdrop\fR: drop 1 per \fBAMOUNT\fR of queries, the default is 2\.
.
.IP "\(bu" 4
\fBtruncate\fR: truncate 1 per \fBAMOUNT\fR of queries, the default is 2\.
.
.IP "\(bu" 4
\fBdelay\fR: delay 1 per \fBAMOUNT\fR of queries for \fBDURATION\fR, the default for \fBAMOUNT\fR is 2 and the default for \fBDURATION\fR is 100ms\.
.
.IP "" 0
.
.SH "HEALTH"
This plugin implements dynamic health checking\. For every dropped query it turns unhealthy\.
.
.SH "EXAMPLES"
.
.nf
\&\. {
erratic {
drop 3
}
}
.
.fi
.
.P
Or even shorter if the defaults suits you\. Note this only drops queries, it does not delay them\.
.
.IP "" 4
.
.nf
\&\. {
erratic
}
.
.fi
.
.IP "" 0
.
.P
Delay 1 in 3 queries for 50ms
.
.IP "" 4
.
.nf
\&\. {
erratic {
delay 3 50ms
}
}
.
.fi
.
.IP "" 0
.
.P
Delay 1 in 3 and truncate 1 in 5\.
.
.IP "" 4
.
.nf
\&\. {
erratic {
delay 3 5ms
truncate 5
}
}
.
.fi
.
.IP "" 0
.
.P
Drop every second query\.
.
.IP "" 4
.
.nf
\&\. {
erratic {
drop 2
truncate 2
}
}
.
.fi
.
.IP "" 0
.
.SH "ALSO SEE"
RFC 3849 \fIhttps://tools\.ietf\.org/html/rfc3849\fR and RFC 5737 \fIhttps://tools\.ietf\.org/html/rfc5737\fR\.

38
vendor/github.com/coredns/coredns/man/coredns-errors.7 generated vendored Normal file
View File

@@ -0,0 +1,38 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-ERRORS" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIerrors\fR \- enable error logging\.
.
.SH "DESCRIPTION"
Any errors encountered during the query processing will be printed to standard output\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
errors
.
.fi
.
.SH "EXAMPLES"
Use the \fIwhoami\fR to respond to queries and Log errors to standard output\.
.
.IP "" 4
.
.nf
\&\. {
whoami
errors
}
.
.fi
.
.IP "" 0

240
vendor/github.com/coredns/coredns/man/coredns-etcd.7 generated vendored Normal file
View File

@@ -0,0 +1,240 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-ETCD" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIetcd\fR \- enables reading zone data from an etcd instance\.
.
.SH "DESCRIPTION"
The data in etcd has to be encoded as a message \fIhttps://github\.com/skynetservices/skydns/blob/2fcff74cdc9f9a7dd64189a447ef27ac354b725f/msg/service\.go#L26\fR like SkyDNS \fIhttps://github\.com/skynetservices/skydns\fR\. It should also work just like SkyDNS\.
.
.P
The etcd plugin makes extensive use of the proxy plugin to forward and query other servers in the network\.
.
.SH "SYNTAX"
.
.nf
etcd [ZONES\.\.\.]
.
.fi
.
.IP "\(bu" 4
\fBZONES\fR zones etcd should be authoritative for\.
.
.IP "" 0
.
.P
The path will default to \fB/skydns\fR the local etcd proxy (http://localhost:2379)\. If no zones are specified the block\'s zone will be used as the zone\.
.
.P
If you want to \fBround robin\fR A and AAAA responses look at the \fBloadbalance\fR plugin\.
.
.IP "" 4
.
.nf
etcd [ZONES\.\.\.] {
stubzones
fallthrough [ZONES\.\.\.]
path PATH
endpoint ENDPOINT\.\.\.
upstream [ADDRESS\.\.\.]
tls CERT KEY CACERT
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBstubzones\fR enables the stub zones feature\. The stubzone is \fIonly\fR done in the etcd tree located under the \fIfirst\fR zone specified\.
.
.IP "\(bu" 4
\fBfallthrough\fR If zone matches but no record can be generated, pass request to the next plugin\. If \fB[ZONES\.\.\.]\fR is omitted, then fallthrough happens for all zones for which the plugin is authoritative\. If specific zones are listed (for example \fBin\-addr\.arpa\fR and \fBip6\.arpa\fR), then only queries for those zones will be subject to fallthrough\.
.
.IP "\(bu" 4
\fBPATH\fR the path inside etcd\. Defaults to "/skydns"\.
.
.IP "\(bu" 4
\fBENDPOINT\fR the etcd endpoints\. Defaults to "http://localhost:2379"\.
.
.IP "\(bu" 4
\fBupstream\fR upstream resolvers to be used resolve external names found in etcd (think CNAMEs) pointing to external names\. If you want CoreDNS to act as a proxy for clients, you\'ll need to add the proxy plugin\. If no \fBADDRESS\fR is given, CoreDNS will resolve CNAMEs against itself\. \fBADDRESS\fR can be an IP address, and IP:port or a string pointing to a file that is structured as /etc/resolv\.conf\.
.
.IP "\(bu" 4
\fBtls\fR followed by:
.
.IP "\(bu" 4
no arguments, if the server certificate is signed by a system\-installed CA and no client cert is needed
.
.IP "\(bu" 4
a single argument that is the CA PEM file, if the server cert is not signed by a system CA and no client cert is needed
.
.IP "\(bu" 4
two arguments \- path to cert PEM file, the path to private key PEM file \- if the server certificate is signed by a system\-installed CA and a client certificate is needed
.
.IP "\(bu" 4
three arguments \- path to cert PEM file, path to client private key PEM file, path to CA PEM file \- if the server certificate is not signed by a system\-installed CA and client certificate is needed\.
.
.IP "" 0
.
.IP "" 0
.
.SH "EXAMPLES"
This is the default SkyDNS setup, with everying specified in full:
.
.IP "" 4
.
.nf
\&\. {
etcd skydns\.local {
stubzones
path /skydns
endpoint http://localhost:2379
upstream 8\.8\.8\.8:53 8\.8\.4\.4:53
}
prometheus
cache 160 skydns\.local
loadbalance
proxy \. 8\.8\.8\.8:53 8\.8\.4\.4:53
}
.
.fi
.
.IP "" 0
.
.P
Or a setup where we use \fB/etc/resolv\.conf\fR as the basis for the proxy and the upstream when resolving external pointing CNAMEs\.
.
.IP "" 4
.
.nf
\&\. {
etcd skydns\.local {
path /skydns
upstream /etc/resolv\.conf
}
cache 160 skydns\.local
proxy \. /etc/resolv\.conf
}
.
.fi
.
.IP "" 0
.
.P
Multiple endpoints are supported as well\.
.
.IP "" 4
.
.nf
etcd skydns\.local {
endpoint http://localhost:2379 http://localhost:4001
\.\.\.
.
.fi
.
.IP "" 0
.
.SS "REVERSE ZONES"
Reverse zones are supported\. You need to make CoreDNS aware of the fact that you are also authoritative for the reverse\. For instance if you want to add the reverse for 10\.0\.0\.0/24, you\'ll need to add the zone \fB0\.0\.10\.in\-addr\.arpa\fR to the list of zones\. Showing a snippet of a Corefile:
.
.IP "" 4
.
.nf
etcd skydns\.local 10\.0\.0\.0/24 {
stubzones
\.\.\.
.
.fi
.
.IP "" 0
.
.P
Next you\'ll need to populate the zone with reverse records, here we add a reverse for 10\.0\.0\.127 pointing to reverse\.skydns\.local\.
.
.IP "" 4
.
.nf
% curl \-XPUT http://127\.0\.0\.1:4001/v2/keys/skydns/arpa/in\-addr/10/0/0/127 \e
\-d value=\'{"host":"reverse\.skydns\.local\."}\'
.
.fi
.
.IP "" 0
.
.P
Querying with dig:
.
.IP "" 4
.
.nf
% dig @localhost \-x 10\.0\.0\.127 +short
reverse\.skydns\.local\.
.
.fi
.
.IP "" 0
.
.SS "ZONE NAME AS A RECORD"
The zone name itself can be used A record\. This behavior can be achieved by writing special entries to the ETCD path of your zone\. If your zone is named \fBskydns\.local\fR for example, you can create an \fBA\fR record for this zone as follows:
.
.IP "" 4
.
.nf
% curl \-XPUT http://127\.0\.0\.1:2379/v2/keys/skydns/local/skydns/dns/apex \-d value=\'{"host":"1\.1\.1\.1","ttl":"60"}\'
.
.fi
.
.IP "" 0
.
.P
If you query the zone name itself, you will receive the created \fBA\fR record:
.
.IP "" 4
.
.nf
% dig +short skydns\.local @localhost
1\.1\.1\.1
.
.fi
.
.IP "" 0
.
.P
If you would like to use DNS RR for the zone name, you can set the following: ~~~ % curl \-XPUT http://127\.0\.0\.1:2379/v2/keys/skydns/local/skydns/dns/apex/x1 \-d value=\'{"host":"1\.1\.1\.1","ttl":"60"}\' % curl \-XPUT http://127\.0\.0\.1:2379/v2/keys/skydns/local/skydns/dns/apex/x2 \-d value=\'{"host":"1\.1\.1\.2","ttl":"60"}\' ~~~
.
.P
If you query the zone name now, you will get the following response:
.
.IP "" 4
.
.nf
dig +short skydns\.local @localhost
1\.1\.1\.1
1\.1\.1\.2
.
.fi
.
.IP "" 0
.
.P
If you would like to use \fBAAAA\fR records for the zone name too, you can set the following: ~~~ % curl \-XPUT http://127\.0\.0\.1:2379/v2/keys/skydns/local/skydns/dns/apex/x3 \-d value=\'{"host":"2003::8:1","ttl":"60"}\' % curl \-XPUT http://127\.0\.0\.1:2379/v2/keys/skydns/local/skydns/dns/apex/x4 \-d value=\'{"host":"2003::8:2","ttl":"60"}\' ~~~
.
.P
If you query the zone name now for \fBAAAA\fR now, you will get the following response: ~~~ sh dig +short skydns\.local AAAA @localhost 2003::8:1 2003::8:2 ~~~
.
.SH "BUGS"
Only the etcdv2 protocol is supported\.

View File

@@ -0,0 +1,67 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-FEDERATION" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIfederation\fR \- enables federated queries to be resolved via the kubernetes plugin\.
.
.SH "DESCRIPTION"
Enabling this plugin allows Federated \fIhttps://kubernetes\.io/docs/tasks/federation/federation\-service\-discovery/\fR queries to be resolved via the kubernetes plugin\.
.
.P
Enabling \fIfederation\fR without also having \fIkubernetes\fR is a noop\.
.
.SH "SYNTAX"
.
.nf
federation [ZONES\.\.\.] {
NAME DOMAIN
}
.
.fi
.
.IP "\(bu" 4
Each \fBNAME\fR and \fBDOMAIN\fR defines federation membership\. One entry for each\. A duplicate \fBNAME\fR will silently overwrite any previous value\.
.
.IP "" 0
.
.SH "EXAMPLES"
Here we handle all service requests in the \fBprod\fR and \fBstage\fR federations\.
.
.IP "" 4
.
.nf
\&\. {
kubernetes cluster\.local
federation cluster\.local {
prod prod\.feddomain\.com
staging staging\.feddomain\.com
}
}
.
.fi
.
.IP "" 0
.
.P
Or slightly shorter:
.
.IP "" 4
.
.nf
cluster\.local {
kubernetes
federation {
prod prod\.feddomain\.com
staging staging\.feddomain\.com
}
}
.
.fi
.
.IP "" 0

91
vendor/github.com/coredns/coredns/man/coredns-file.7 generated vendored Normal file
View File

@@ -0,0 +1,91 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-FILE" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIfile\fR \- enables serving zone data from an RFC 1035\-style master file\.
.
.SH "DESCRIPTION"
The file plugin is used for an "old\-style" DNS server\. It serves from a preloaded file that exists on disk\. If the zone file contains signatures (i\.e\. is signed, i\.e\. DNSSEC) correct DNSSEC answers are returned\. Only NSEC is supported! If you use this setup \fIyou\fR are responsible for resigning the zonefile\.
.
.SH "SYNTAX"
.
.nf
file DBFILE [ZONES\.\.\.]
.
.fi
.
.IP "\(bu" 4
\fBDBFILE\fR the database file to read and parse\. If the path is relative the path from the \fIroot\fR directive will be prepended to it\.
.
.IP "\(bu" 4
\fBZONES\fR zones it should be authoritative for\. If empty, the zones from the configuration block are used\.
.
.IP "" 0
.
.P
If you want to round robin A and AAAA responses look at the \fIloadbalance\fR plugin\.
.
.IP "" 4
.
.nf
file DBFILE [ZONES\.\.\. ] {
transfer to ADDRESS\.\.\.
no_reload
upstream [ADDRESS\.\.\.]
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBtransfer\fR enables zone transfers\. It may be specified multiples times\. \fBTo\fR or \fBfrom\fR signals the direction\. \fBADDRESS\fR must be denoted in CIDR notation (127\.0\.0\.1/32 etc\.) or just as plain addresses\. The special wildcard \fB*\fR means: the entire internet (only valid for \'transfer to\')\. When an address is specified a notify message will be send whenever the zone is reloaded\.
.
.IP "\(bu" 4
\fBno_reload\fR by default CoreDNS will try to reload a zone every minute and reloads if the SOA\'s serial has changed\. This option disables that behavior\.
.
.IP "\(bu" 4
\fBupstream\fR defines upstream resolvers to be used resolve external names found (think CNAMEs) pointing to external names\. This is only really useful when CoreDNS is configured as a proxy, for normal authoritative serving you don\'t need \fIor\fR want to use this\. \fBADDRESS\fR can be an IP address, and IP:port or a string pointing to a file that is structured as /etc/resolv\.conf\. If no \fBADDRESS\fR is given, CoreDNS will resolve CNAMEs against itself\.
.
.IP "" 0
.
.SH "EXAMPLES"
Load the \fBexample\.org\fR zone from \fBexample\.org\.signed\fR and allow transfers to the internet, but send notifies to 10\.240\.1\.1
.
.IP "" 4
.
.nf
example\.org {
file example\.org\.signed {
transfer to *
transfer to 10\.240\.1\.1
}
}
.
.fi
.
.IP "" 0
.
.P
Or use a single zone file for multiple zones:
.
.IP "" 4
.
.nf
\&\. {
file example\.org\.signed example\.org example\.net {
transfer to *
transfer to 10\.240\.1\.1
}
}
.
.fi
.
.IP "" 0

233
vendor/github.com/coredns/coredns/man/coredns-forward.7 generated vendored Normal file
View File

@@ -0,0 +1,233 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-FORWARD" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIforward\fR \- facilitates proxying DNS messages to upstream resolvers\.
.
.SH "DESCRIPTION"
The \fIforward\fR plugin re\-uses already opened sockets to the upstreams\. It supports UDP, TCP and DNS\-over\-TLS and uses in band health checking\.
.
.P
When it detects an error a health check is performed\. This checks runs in a loop, every \fI0\.5s\fR, for as long as the upstream reports unhealthy\. Once healthy we stop health checking (until the next error)\. The health checks use a recursive DNS query (\fB\. IN NS\fR) to get upstream health\. Any response that is not a network error (REFUSED, NOTIMPL, SERVFAIL, etc) is taken as a healthy upstream\. The health check uses the same protocol as specified in \fBTO\fR\. If \fBmax_fails\fR is set to 0, no checking is performed and upstreams will always be considered healthy\.
.
.P
When \fIall\fR upstreams are down it assumes health checking as a mechanism has failed and will try to connect to a random upstream (which may or may not work)\.
.
.P
This plugin can only be used once per Server Block\.
.
.P
How does \fIforward\fR relate to \fIproxy\fR? This plugin is the "new" version of \fIproxy\fR and is faster because it re\-uses connections to the upstreams\. It also does in\-band health checks \- using DNS instead of HTTP\. Since it is newer it has a little less (production) mileage on it\.
.
.SH "SYNTAX"
In its most basic form, a simple forwarder uses this syntax:
.
.IP "" 4
.
.nf
forward FROM TO\.\.\.
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR is the base domain to match for the request to be forwarded\.
.
.IP "\(bu" 4
\fBTO\.\.\.\fR are the destination endpoints to forward to\. The \fBTO\fR syntax allows you to specify a protocol, \fBtls://9\.9\.9\.9\fR or \fBdns://\fR (or no protocol) for plain DNS\. The number of upstreams is limited to 15\.
.
.IP "" 0
.
.P
Multiple upstreams are randomized (see \fBpolicy\fR) on first use\. When a healthy proxy returns an error during the exchange the next upstream in the list is tried\.
.
.P
Extra knobs are available with an expanded syntax:
.
.IP "" 4
.
.nf
forward FROM TO\.\.\. {
except IGNORED_NAMES\.\.\.
force_tcp
expire DURATION
max_fails INTEGER
tls CERT KEY CA
tls_servername NAME
policy random|round_robin|sequential
health_check DURATION
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR and \fBTO\.\.\.\fR as above\.
.
.IP "\(bu" 4
\fBIGNORED_NAMES\fR in \fBexcept\fR is a space\-separated list of domains to exclude from forwarding\. Requests that match none of these names will be passed through\.
.
.IP "\(bu" 4
\fBforce_tcp\fR, use TCP even when the request comes in over UDP\.
.
.IP "\(bu" 4
\fBmax_fails\fR is the number of subsequent failed health checks that are needed before considering an upstream to be down\. If 0, the upstream will never be marked as down (nor health checked)\. Default is 2\.
.
.IP "\(bu" 4
\fBexpire\fR \fBDURATION\fR, expire (cached) connections after this time, the default is 10s\.
.
.IP "\(bu" 4
\fBtls\fR \fBCERT\fR \fBKEY\fR \fBCA\fR define the TLS properties for TLS connection\. From 0 to 3 arguments can be provided with the meaning as described below
.
.IP "\(bu" 4
\fBtls\fR \- no client authentication is used, and the system CAs are used to verify the server certificate
.
.IP "\(bu" 4
\fBtls\fR \fBCA\fR \- no client authentication is used, and the file CA is used to verify the server certificate
.
.IP "\(bu" 4
\fBtls\fR \fBCERT\fR \fBKEY\fR \- client authentication is used with the specified cert/key pair\. The server certificate is verified with the system CAs
.
.IP "\(bu" 4
\fBtls\fR \fBCERT\fR \fBKEY\fR \fBCA\fR \- client authentication is used with the specified cert/key pair\. The server certificate is verified using the specified CA file
.
.IP "" 0
.
.IP "\(bu" 4
\fBtls_servername\fR \fBNAME\fR allows you to set a server name in the TLS configuration; for instance 9\.9\.9\.9 needs this to be set to \fBdns\.quad9\.net\fR\.
.
.IP "\(bu" 4
\fBpolicy\fR specifies the policy to use for selecting upstream servers\. The default is \fBrandom\fR\.
.
.IP "\(bu" 4
\fBhealth_check\fR, use a different \fBDURATION\fR for health checking, the default duration is 0\.5s\.
.
.IP "" 0
.
.P
Also note the TLS config is "global" for the whole forwarding proxy if you need a different \fBtls\-name\fR for different upstreams you\'re out of luck\.
.
.P
On each endpoint, the timeouts of the communication are set by default and automatically tuned depending early results\. \- dialTimeout by default is 30 sec, and can decrease automatically down to 100ms \- readTimeout by default is 2 sec, and can decrease automatically down to 10ms
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metric are exported:
.
.IP "\(bu" 4
\fBcoredns_forward_request_duration_seconds{to}\fR \- duration per upstream interaction\.
.
.IP "\(bu" 4
\fBcoredns_forward_request_count_total{to}\fR \- query count per upstream\.
.
.IP "\(bu" 4
\fBcoredns_forward_response_rcode_total{to, rcode}\fR \- count of RCODEs per upstream\.
.
.IP "\(bu" 4
\fBcoredns_forward_healthcheck_failure_count_total{to}\fR \- number of failed health checks per upstream\.
.
.IP "\(bu" 4
\fBcoredns_forward_healthcheck_broken_count_total{}\fR \- counter of when all upstreams are unhealthy, and we are randomly (this always uses the \fBrandom\fR policy) spraying to an upstream\.
.
.IP "\(bu" 4
\fBcoredns_forward_socket_count_total{to}\fR \- number of cached sockets per upstream\.
.
.IP "" 0
.
.P
Where \fBto\fR is one of the upstream servers (\fBTO\fR from the config), \fBproto\fR is the protocol used by the incoming query ("tcp" or "udp"), and family the transport family ("1" for IPv4, and "2" for IPv6)\.
.
.SH "EXAMPLES"
Proxy all requests within \fBexample\.org\.\fR to a nameserver running on a different port:
.
.IP "" 4
.
.nf
example\.org {
forward \. 127\.0\.0\.1:9005
}
.
.fi
.
.IP "" 0
.
.P
Load balance all requests between three resolvers, one of which has a IPv6 address\.
.
.IP "" 4
.
.nf
\&\. {
forward \. 10\.0\.0\.10:53 10\.0\.0\.11:1053 [2003::1]:53
}
.
.fi
.
.IP "" 0
.
.P
Forward everything except requests to \fBexample\.org\fR
.
.IP "" 4
.
.nf
\&\. {
forward \. 10\.0\.0\.10:1234 {
except example\.org
}
}
.
.fi
.
.IP "" 0
.
.P
Proxy everything except \fBexample\.org\fR using the host\'s \fBresolv\.conf\fR\'s nameservers:
.
.IP "" 4
.
.nf
\&\. {
forward \. /etc/resolv\.conf {
except example\.org
}
}
.
.fi
.
.IP "" 0
.
.P
Proxy all requests to 9\.9\.9\.9 using the DNS\-over\-TLS protocol, and cache every answer for up to 30 seconds\. Note the \fBtls_servername\fR is mandatory if you want a working setup, as 9\.9\.9\.9 can\'t be used in the TLS negotiation\. Also set the health check duration to 5s to not completely swamp the service with health checks\.
.
.IP "" 4
.
.nf
\&\. {
forward \. tls://9\.9\.9\.9 {
tls_servername dns\.quad9\.net
health_check 5s
}
cache 30
}
.
.fi
.
.IP "" 0
.
.SH "BUGS"
The TLS config is global for the whole forwarding proxy if you need a different \fBtls_servername\fR for different upstreams you\'re out of luck\.
.
.SH "ALSO SEE"
RFC 7858 \fIhttps://tools\.ietf\.org/html/rfc7858\fR for DNS over TLS\.

128
vendor/github.com/coredns/coredns/man/coredns-health.7 generated vendored Normal file
View File

@@ -0,0 +1,128 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-HEALTH" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIhealth\fR \- enables a health check endpoint\.
.
.SH "DESCRIPTION"
By enabling \fIhealth\fR any plugin that implements healt\.Healther interface \fIhttps://godoc\.org/github\.com/coredns/coredns/plugin/health#Healther\fR will be queried for it\'s health\. The combined health is exported, by default, on port 8080/health \.
.
.SH "SYNTAX"
.
.nf
health [ADDRESS]
.
.fi
.
.P
Optionally takes an address; the default is \fB:8080\fR\. The health path is fixed to \fB/health\fR\. The health endpoint returns a 200 response code and the word "OK" when this server is healthy\. It returns a 503\. \fIhealth\fR periodically (1s) polls plugins that exports health information\. If any of the plugins signals that it is unhealthy, the server will go unhealthy too\. Each plugin that supports health checks has a section "Health" in their README\.
.
.P
More options can be set with this extended syntax:
.
.IP "" 4
.
.nf
health [ADDRESS] {
lameduck DURATION
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
Where \fBlameduck\fR will make the process unhealthy then \fIwait\fR for \fBDURATION\fR before the process shuts down\.
.
.IP "" 0
.
.P
If you have multiple Server Blocks and need to export health for each of the plugins, you must run health endpoints on different ports:
.
.IP "" 4
.
.nf
com {
whoami
health :8080
}
net {
erratic
health :8081
}
.
.fi
.
.IP "" 0
.
.P
Note that if you format this in one server block you will get an error on startup, that the second server can\'t setup the health plugin (on the same port)\.
.
.IP "" 4
.
.nf
com net {
whoami
erratic
health :8080
}
.
.fi
.
.IP "" 0
.
.SH "PLUGINS"
Any plugin that implements the Healther interface will be used to report health\.
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metric is exported:
.
.IP "\(bu" 4
\fBcoredns_health_request_duration_seconds{}\fR \- duration to process a /health query\. As this should be a local operation it should be fast\. A (large) increases in this duration indicates the CoreDNS process is having trouble keeping up with its query load\.
.
.IP "" 0
.
.P
Note that this metric \fIdoes not\fR have a \fBserver\fR label, because being overloaded is a symptom of the running process, \fInot\fR a specific server\.
.
.SH "EXAMPLES"
Run another health endpoint on http://localhost:8091\.
.
.IP "" 4
.
.nf
\&\. {
health localhost:8091
}
.
.fi
.
.IP "" 0
.
.P
Set a lameduck duration of 1 second:
.
.IP "" 4
.
.nf
\&\. {
health localhost:8092 {
lameduck 1s
}
}
.
.fi
.
.IP "" 0
.
.SH "BUGS"
When reloading, the Health handler is stopped before the new server instance is started\. If that new server fails to start, then the initial server instance is still available and DNS queries still served, but Health handler stays down\. Health will not reply HTTP request until a successful reload or a complete restart of CoreDNS\.

131
vendor/github.com/coredns/coredns/man/coredns-hosts.7 generated vendored Normal file
View File

@@ -0,0 +1,131 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-HOSTS" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIhosts\fR \- enables serving zone data from a \fB/etc/hosts\fR style file\.
.
.SH "DESCRIPTION"
The hosts plugin is useful for serving zones from a \fB/etc/hosts\fR file\. It serves from a preloaded file that exists on disk\. It checks the file for changes and updates the zones accordingly\. This plugin only supports A, AAAA, and PTR records\. The hosts plugin can be used with readily available hosts files that block access to advertising servers\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "THE HOSTS FILE"
Commonly the entries are of the from \fBIP_address canonical_hostname [aliases\.\.\.]\fR as explained by the hosts(5) man page\.
.
.P
Examples:
.
.IP "" 4
.
.nf
# THE FOLLOWING LINES ARE DESIRABLE FOR IPV4 CAPABLE HOSTS
127\.0\.0\.1 localhost
192\.168\.1\.10 example\.com example
# THE FOLLOWING LINES ARE DESIRABLE FOR IPV6 CAPABLE HOSTS
::1 localhost ip6\-localhost ip6\-loopback
fdfc:a744:27b5:3b0e::1 example\.com example
.
.fi
.
.IP "" 0
.
.SS "PTR RECORDS"
PTR records for reverse lookups are generated automatically by CoreDNS (based on the hosts file entries) and cannot be created manually\.
.
.SH "SYNTAX"
.
.nf
hosts [FILE [ZONES\.\.\.]] {
[INLINE]
fallthrough [ZONES\.\.\.]
}
.
.fi
.
.IP "\(bu" 4
\fBFILE\fR the hosts file to read and parse\. If the path is relative the path from the \fIroot\fR directive will be prepended to it\. Defaults to /etc/hosts if omitted\. We scan the file for changes every 5 seconds\.
.
.IP "\(bu" 4
\fBZONES\fR zones it should be authoritative for\. If empty, the zones from the configuration block are used\.
.
.IP "\(bu" 4
\fBINLINE\fR the hosts file contents inlined in Corefile\. If there are any lines before fallthrough then all of them will be treated as the additional content for hosts file\. The specified hosts file path will still be read but entries will be overrided\.
.
.IP "\(bu" 4
\fBfallthrough\fR If zone matches and no record can be generated, pass request to the next plugin\. If \fB[ZONES\.\.\.]\fR is omitted, then fallthrough happens for all zones for which the plugin is authoritative\. If specific zones are listed (for example \fBin\-addr\.arpa\fR and \fBip6\.arpa\fR), then only queries for those zones will be subject to fallthrough\.
.
.IP "" 0
.
.SH "EXAMPLES"
Load \fB/etc/hosts\fR file\.
.
.IP "" 4
.
.nf
\&\. {
hosts
}
.
.fi
.
.IP "" 0
.
.P
Load \fBexample\.hosts\fR file in the current directory\.
.
.IP "" 4
.
.nf
\&\. {
hosts example\.hosts
}
.
.fi
.
.IP "" 0
.
.P
Load example\.hosts file and only serve example\.org and example\.net from it and fall through to the next plugin if query doesn\'t match\.
.
.IP "" 4
.
.nf
\&\. {
hosts example\.hosts example\.org example\.net {
fallthrough
}
}
.
.fi
.
.IP "" 0
.
.P
Load hosts file inlined in Corefile\.
.
.IP "" 4
.
.nf
\&\. {
hosts example\.hosts example\.org {
10\.0\.0\.1 example\.org
fallthrough
}
}
.
.fi
.
.IP "" 0
.
.SH "SEE ALSO"
The form of the entries in the \fB/etc/hosts\fR file are based on IETF RFC 952 \fIhttps://tools\.ietf\.org/html/rfc952\fR which was updated by IETF RFC 1123 \fIhttps://tools\.ietf\.org/html/rfc1123\fR\.

View File

@@ -0,0 +1,252 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-KUBERNETES" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIkubernetes\fR \- enables the reading zone data from a Kubernetes cluster\.
.
.SH "DESCRIPTION"
It implements the Kubernetes DNS\-Based Service Discovery Specification \fIhttps://github\.com/kubernetes/dns/blob/master/docs/specification\.md\fR\.
.
.P
CoreDNS running the kubernetes plugin can be used as a replacement of kube\-dns in a kubernetes cluster\. See the deployment \fIhttps://github\.com/coredns/deployment\fR repository for details on how to deploy CoreDNS in Kubernetes \fIhttps://github\.com/coredns/deployment/tree/master/kubernetes\fR\.
.
.P
stubDomains and upstreamNameservers \fIhttp://blog\.kubernetes\.io/2017/04/configuring\-private\-dns\-zones\-upstream\-nameservers\-kubernetes\.html\fR are implemented via the \fIproxy\fR plugin and kubernetes \fIupstream\fR\. See example below\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
kubernetes [ZONES\.\.\.]
.
.fi
.
.P
With only the directive specified, the \fIkubernetes\fR plugin will default to the zone specified in the server\'s block\. It will handle all queries in that zone and connect to Kubernetes in\-cluster\. It will not provide PTR records for services, or A records for pods\. If \fBZONES\fR is used it specifies all the zones the plugin should be authoritative for\.
.
.IP "" 4
.
.nf
kubernetes [ZONES\.\.\.] {
resyncperiod DURATION
endpoint URL [URL\.\.\.]
tls CERT KEY CACERT
namespaces NAMESPACE\.\.\.
labels EXPRESSION
pods POD\-MODE
endpoint_pod_names
upstream [ADDRESS\.\.\.]
ttl TTL
transfer to ADDRESS\.\.\.
fallthrough [ZONES\.\.\.]
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBresyncperiod\fR specifies the Kubernetes data API \fBDURATION\fR period\.
.
.IP "\(bu" 4
\fBendpoint\fR specifies the \fBURL\fR for a remote k8s API endpoint\. If omitted, it will connect to k8s in\-cluster using the cluster service account\. Multiple k8s API endpoints could be specified: \fBendpoint http://k8s\-endpoint1:8080 http://k8s\-endpoint2:8080\fR\. CoreDNS will automatically perform a healthcheck and proxy to the healthy k8s API endpoint\.
.
.IP "\(bu" 4
\fBtls\fR \fBCERT\fR \fBKEY\fR \fBCACERT\fR are the TLS cert, key and the CA cert file names for remote k8s connection\. This option is ignored if connecting in\-cluster (i\.e\. endpoint is not specified)\.
.
.IP "\(bu" 4
\fBnamespaces\fR \fBNAMESPACE [NAMESPACE\.\.\.]\fR, only exposes the k8s namespaces listed\. If this option is omitted all namespaces are exposed
.
.IP "\(bu" 4
\fBlabels\fR \fBEXPRESSION\fR only exposes the records for Kubernetes objects that match this label selector\. The label selector syntax is described in the Kubernetes User Guide \- Labels \fIhttp://kubernetes\.io/docs/user\-guide/labels/\fR\. An example that only exposes objects labeled as "application=nginx" in the "staging" or "qa" environments, would use: \fBlabels environment in (staging, qa),application=nginx\fR\.
.
.IP "\(bu" 4
\fBpods\fR \fBPOD\-MODE\fR sets the mode for handling IP\-based pod A records, e\.g\. \fB1\-2\-3\-4\.ns\.pod\.cluster\.local\. in A 1\.2\.3\.4\fR\. This option is provided to facilitate use of SSL certs when connecting directly to pods\. Valid values for \fBPOD\-MODE\fR:
.
.IP "\(bu" 4
\fBdisabled\fR: Default\. Do not process pod requests, always returning \fBNXDOMAIN\fR
.
.IP "\(bu" 4
\fBinsecure\fR: Always return an A record with IP from request (without checking k8s)\. This option is is vulnerable to abuse if used maliciously in conjunction with wildcard SSL certs\. This option is provided for backward compatibility with kube\-dns\.
.
.IP "\(bu" 4
\fBverified\fR: Return an A record if there exists a pod in same namespace with matching IP\. This option requires substantially more memory than in insecure mode, since it will maintain a watch on all pods\.
.
.IP "" 0
.
.IP "\(bu" 4
\fBendpoint_pod_names\fR uses the pod name of the pod targeted by the endpoint as the endpoint name in A records, e\.g\. \fBendpoint\-name\.my\-service\.namespace\.svc\.cluster\.local\. in A 1\.2\.3\.4\fR By default, the endpoint\-name name selection is as follows: Use the hostname of the endpoint, or if hostname is not set, use the dashed form of the endpoint IP address (e\.g\. \fB1\-2\-3\-4\.my\-service\.namespace\.svc\.cluster\.local\.\fR) If this directive is included, then name selection for endpoints changes as follows: Use the hostname of the endpoint, or if hostname is not set, use the pod name of the pod targeted by the endpoint\. If there is no pod targeted by the endpoint, use the dashed IP address form\.
.
.IP "\(bu" 4
\fBupstream\fR [\fBADDRESS\fR\.\.\.] defines the upstream resolvers used for resolving services that point to external hosts (aka External Services aka CNAMEs)\. If no \fBADDRESS\fR is given, CoreDNS will resolve External Services against itself\. \fBADDRESS\fR can be an IP, an IP:port, or a path to a file structured like resolv\.conf\.
.
.IP "\(bu" 4
\fBttl\fR allows you to set a custom TTL for responses\. The default (and allowed minimum) is to use 5 seconds, the maximum is capped at 3600 seconds\.
.
.IP "\(bu" 4
\fBnoendpoints\fR will turn off the serving of endpoint records by disabling the watch on endpoints\. All endpoint queries and headless service queries will result in an NXDOMAIN\.
.
.IP "\(bu" 4
\fBtransfer\fR enables zone transfers\. It may be specified multiples times\. \fBTo\fR signals the direction (only \fBto\fR is alllow)\. \fBADDRESS\fR must be denoted in CIDR notation (127\.0\.0\.1/32 etc\.) or just as plain addresses\. The special wildcard \fB*\fR means: the entire internet\. Sending DNS notifies is not supported\.
.
.IP "\(bu" 4
\fBfallthrough\fR \fB[ZONES\.\.\.]\fR If a query for a record in the zones for which the plugin is authoritative results in NXDOMAIN, normally that is what the response will be\. However, if you specify this option, the query will instead be passed on down the plugin chain, which can include another plugin to handle the query\. If \fB[ZONES\.\.\.]\fR is omitted, then fallthrough happens for all zones for which the plugin is authoritative\. If specific zones are listed (for example \fBin\-addr\.arpa\fR and \fBip6\.arpa\fR), then only queries for those zones will be subject to fallthrough\.
.
.IP "\(bu" 4
\fBignore empty_service\fR return NXDOMAIN for services without any ready endpoint addresses (e\.g\. ready pods)\. This allows the querying pod to continue searching for the service in the search path\. The search path could, for example, include another kubernetes cluster\.
.
.IP "" 0
.
.SH "HEALTH"
This plugin implements dynamic health checking\. Currently this is limited to reporting healthy when the API has synced\.
.
.SH "EXAMPLES"
Handle all queries in the \fBcluster\.local\fR zone\. Connect to Kubernetes in\-cluster\. Also handle all \fBin\-addr\.arpa\fR \fBPTR\fR requests for \fB10\.0\.0\.0/17\fR \. Verify the existence of pods when answering pod requests\. Resolve upstream records against \fB10\.102\.3\.10\fR\. Note we show the entire server block here:
.
.IP "" 4
.
.nf
10\.0\.0\.0/17 cluster\.local {
kubernetes {
pods verified
upstream 10\.102\.3\.10:53
}
}
.
.fi
.
.IP "" 0
.
.P
Or you can selectively expose some namespaces:
.
.IP "" 4
.
.nf
kubernetes cluster\.local {
namespaces test staging
}
.
.fi
.
.IP "" 0
.
.P
Connect to Kubernetes with CoreDNS running outside the cluster:
.
.IP "" 4
.
.nf
kubernetes cluster\.local {
endpoint https://k8s\-endpoint:8443
tls cert key cacert
}
.
.fi
.
.IP "" 0
.
.SH "STUBDOMAINS AND UPSTREAMNAMESERVERS"
Here we use the \fIproxy\fR plugin to implement a stubDomain that forwards \fBexample\.local\fR to the nameserver \fB10\.100\.0\.10:53\fR\. The \fIupstream\fR option in kubernetes means that ExternalName services (CNAMEs) will be resolved using the respective proxy\. Also configured is an upstreamNameserver \fB8\.8\.8\.8:53\fR that will be used for resolving names that do not fall in \fBcluster\.local\fR or \fBexample\.local\fR\.
.
.IP "" 4
.
.nf
\&\.:53 {
kubernetes cluster\.local {
upstream
}
proxy example\.local 10\.100\.0\.10:53
proxy \. 8\.8\.8\.8:53
}
.
.fi
.
.IP "" 0
.
.P
The configuration above represents the following Kube\-DNS stubDomains and upstreamNameservers configuration\.
.
.IP "" 4
.
.nf
stubDomains: |
{“example\.local”: [“10\.100\.0\.10:53”]}
upstreamNameservers: |
[“8\.8\.8\.8:53”]
.
.fi
.
.IP "" 0
.
.SH "AUTOPATH"
The \fIkubernetes\fR plugin can be used in conjunction with the \fIautopath\fR plugin\. Using this feature enables server\-side domain search path completion in kubernetes clusters\. Note: \fBpods\fR must be set to \fBverified\fR for this to function properly\.
.
.IP "" 4
.
.nf
cluster\.local {
autopath @kubernetes
kubernetes {
pods verified
}
}
.
.fi
.
.IP "" 0
.
.SH "FEDERATION"
The \fIkubernetes\fR plugin can be used in conjunction with the \fIfederation\fR plugin\. Using this feature enables serving federated domains from the kubernetes clusters\.
.
.IP "" 4
.
.nf
cluster\.local {
federation {
prod prod\.example\.org
staging staging\.example\.org
}
kubernetes
}
.
.fi
.
.IP "" 0
.
.SH "WILDCARDS"
Some query labels accept a wildcard value to match any value\. If a label is a valid wildcard (*, or the word "any"), then that label will match all values\. The labels that accept wildcards are:
.
.IP "\(bu" 4
\fIendpoint\fR in an \fBA\fR record request: \fIendpoint\fR\.service\.namespace\.svc\.zone, e\.g\. \fB*\.nginx\.ns\.svc\.cluster\.local\fR
.
.IP "\(bu" 4
\fIservice\fR in an \fBA\fR record request: \fIservice\fR\.namespace\.svc\.zone, e\.g\. \fB*\.ns\.svc\.cluster\.local\fR
.
.IP "\(bu" 4
\fInamespace\fR in an \fBA\fR record request: service\.\fInamespace\fR\.svc\.zone, e\.g\. \fBnginx\.*\.svc\.cluster\.local\fR
.
.IP "\(bu" 4
\fIport and/or protocol\fR in an \fBSRV\fR request: \fBport_\.\fRprotocol_\.service\.namespace\.svc\.zone\., e\.g\. \fB_http\.*\.service\.ns\.svc\.cluster\.local\fR
.
.IP "\(bu" 4
multiple wild cards are allowed in a single query, e\.g\. \fBA\fR Request \fB*\.*\.svc\.zone\.\fR or \fBSRV\fR request \fB*\.*\.*\.*\.svc\.zone\.\fR
.
.IP "" 0
.
.P
For example, Wildcards can be used to resolve all Endpoints for a Service as \fBA\fR records\. e\.g\.: \fB*\.service\.ns\.svc\.myzone\.local\fR will return the Endpoint IPs in the Service \fBservice\fR in namespace \fBdefault\fR: \fB*\.service\.default\.svc\.cluster\.local\. 5 IN A 192\.168\.10\.10 *\.service\.default\.svc\.cluster\.local\. 5 IN A 192\.168\.25\.15\fR This response can be randomized using the \fBloadbalance\fR plugin

View File

@@ -0,0 +1,40 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-LOADBALANCE" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIloadbalance\fR \- acts as a round\-robin DNS loadbalancer by randomizing the order of A and AAAA records in the answer\.
.
.SH "DESCRIPTION"
See Wikipedia \fIhttps://en\.wikipedia\.org/wiki/Round\-robin_DNS\fR about the pros and cons on this setup\. It will take care to sort any CNAMEs before any address records, because some stub resolver implementations (like glibc) are particular about that\.
.
.SH "SYNTAX"
.
.nf
loadbalance [POLICY]
.
.fi
.
.IP "\(bu" 4
\fBPOLICY\fR is how to balance, the default is "round_robin"
.
.IP "" 0
.
.SH "EXAMPLES"
Load balance replies coming back from Google Public DNS:
.
.IP "" 4
.
.nf
\&\. {
loadbalance round_robin
proxy \. 8\.8\.8\.8 8\.8\.4\.4
}
.
.fi
.
.IP "" 0

257
vendor/github.com/coredns/coredns/man/coredns-log.7 generated vendored Normal file
View File

@@ -0,0 +1,257 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-LOG" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIlog\fR \- enables query logging to standard output\.
.
.SH "DESCRIPTION"
By just using \fIlog\fR you dump all queries (and parts for the reply) on standard output\. Options exist to tweak the output a little\.
.
.P
Note that for busy servers this will incur a performance hit\.
.
.SH "SYNTAX"
.
.nf
log
.
.fi
.
.IP "\(bu" 4
With no arguments, a query log entry is written to \fIstdout\fR in the common log format for all requests
.
.IP "" 0
.
.P
Or if you want/need slightly more control:
.
.IP "" 4
.
.nf
log [NAME] [FORMAT]
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBNAME\fR is the name to match in order to be logged
.
.IP "\(bu" 4
\fBFORMAT\fR is the log format to use (default is Common Log Format)
.
.IP "" 0
.
.P
You can further specify the classes of responses that get logged:
.
.IP "" 4
.
.nf
log [NAME] [FORMAT] {
class CLASSES\.\.\.
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBCLASSES\fR is a space\-separated list of classes of responses that should be logged
.
.IP "" 0
.
.P
The classes of responses have the following meaning:
.
.IP "\(bu" 4
\fBsuccess\fR: successful response
.
.IP "\(bu" 4
\fBdenial\fR: either NXDOMAIN or NODATA (name exists, type does not)
.
.IP "\(bu" 4
\fBerror\fR: SERVFAIL, NOTIMP, REFUSED, etc\. Anything that indicates the remote server is not willing to resolve the request\.
.
.IP "\(bu" 4
\fBall\fR: the default \- nothing is specified\. Using of this class means that all messages will be logged whatever we mix together with "all"\.
.
.IP "" 0
.
.P
If no class is specified, it defaults to \fIall\fR\.
.
.SH "LOG FORMAT"
You can specify a custom log format with any placeholder values\. Log supports both request and response placeholders\.
.
.P
The following place holders are supported:
.
.IP "\(bu" 4
\fB{type}\fR: qtype of the request
.
.IP "\(bu" 4
\fB{name}\fR: qname of the request
.
.IP "\(bu" 4
\fB{class}\fR: qclass of the request
.
.IP "\(bu" 4
\fB{proto}\fR: protocol used (tcp or udp)
.
.IP "\(bu" 4
\fB{when}\fR: time of the query
.
.IP "\(bu" 4
\fB{remote}\fR: client\'s IP address, for IPv6 addresses these are enclosed in brackets: \fB[::1]\fR
.
.IP "\(bu" 4
\fB{size}\fR: request size in bytes
.
.IP "\(bu" 4
\fB{port}\fR: client\'s port
.
.IP "\(bu" 4
\fB{duration}\fR: response duration
.
.IP "\(bu" 4
\fB{rcode}\fR: response RCODE
.
.IP "\(bu" 4
\fB{rsize}\fR: response size
.
.IP "\(bu" 4
\fB{>rflags}\fR: response flags, each set flag will be displayed, e\.g\. "aa, tc"\. This includes the qr bit as well\.
.
.IP "\(bu" 4
\fB{>bufsize}\fR: the EDNS0 buffer size advertised in the query
.
.IP "\(bu" 4
\fB{>do}\fR: is the EDNS0 DO (DNSSEC OK) bit set in the query
.
.IP "\(bu" 4
\fB{>id}\fR: query ID
.
.IP "\(bu" 4
\fB{>opcode}\fR: query OPCODE
.
.IP "" 0
.
.P
The default Common Log Format is:
.
.IP "" 4
.
.nf
`{remote}:{port} \- [{when}] {>id} "{type} {class} {name} {proto} {size} {>do} {>bufsize}" {rcode} {>rflags} {rsize} {duration}`
.
.fi
.
.IP "" 0
.
.SH "EXAMPLES"
Log all requests to stdout
.
.IP "" 4
.
.nf
\&\. {
log
whoami
}
.
.fi
.
.IP "" 0
.
.P
Custom log format, for all zones (\fB\.\fR)
.
.IP "" 4
.
.nf
\&\. {
log \. "{proto} Request: {name} {type} {>id}"
}
.
.fi
.
.IP "" 0
.
.P
Only log denials for example\.org (and below to a file)
.
.IP "" 4
.
.nf
\&\. {
log example\.org {
class denial
}
}
.
.fi
.
.IP "" 0
.
.P
Log all queries which were not resolved successfully
.
.IP "" 4
.
.nf
\&\. {
log \. {
class denial error
}
}
.
.fi
.
.IP "" 0
.
.P
Log all queries on which we did not get errors
.
.IP "" 4
.
.nf
\&\. {
log \. {
class denial success
}
}
.
.fi
.
.IP "" 0
.
.P
Also the multiple statements can be OR\-ed, for example, we can rewrite the above case as following:
.
.IP "" 4
.
.nf
\&\. {
log \. {
class denial
class success
}
}
.
.fi
.
.IP "" 0

115
vendor/github.com/coredns/coredns/man/coredns-metrics.7 generated vendored Normal file
View File

@@ -0,0 +1,115 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-METRICS" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIprometheus\fR \- enables Prometheus \fIhttps://prometheus\.io/\fR metrics\.
.
.SH "DESCRIPTION"
With \fIprometheus\fR you export metrics from CoreDNS and any plugin that has them\. The default location for the metrics is \fBlocalhost:9153\fR\. The metrics path is fixed to \fB/metrics\fR\. The following metrics are exported:
.
.IP "\(bu" 4
\fBcoredns_build_info{version, revision, goversion}\fR \- info about CoreDNS itself\.
.
.IP "\(bu" 4
\fBcoredns_panic_count_total{}\fR \- total number of panics\.
.
.IP "\(bu" 4
\fBcoredns_dns_request_count_total{server, zone, proto, family}\fR \- total query count\.
.
.IP "\(bu" 4
\fBcoredns_dns_request_duration_seconds{server, zone}\fR \- duration to process each query\.
.
.IP "\(bu" 4
\fBcoredns_dns_request_size_bytes{server, zone, proto}\fR \- size of the request in bytes\.
.
.IP "\(bu" 4
\fBcoredns_dns_request_do_count_total{server, zone}\fR \- queries that have the DO bit set
.
.IP "\(bu" 4
\fBcoredns_dns_request_type_count_total{server, zone, type}\fR \- counter of queries per zone and type\.
.
.IP "\(bu" 4
\fBcoredns_dns_response_size_bytes{server, zone, proto}\fR \- response size in bytes\.
.
.IP "\(bu" 4
\fBcoredns_dns_response_rcode_count_total{server, zone, rcode}\fR \- response per zone and rcode\.
.
.IP "" 0
.
.P
Each counter has a label \fBzone\fR which is the zonename used for the request/response\.
.
.P
Extra labels used are:
.
.IP "\(bu" 4
\fBserver\fR is identifying the server responsible for the request\. This is a string formatted as the server\'s listening address: \fB<scheme>://[<bind>]:<port>\fR\. I\.e\. for a "normal" DNS server this is \fBdns://:53\fR\. If you are using the \fIbind\fR plugin an IP address is included, e\.g\.: \fBdns://127\.0\.0\.53:53\fR\.
.
.IP "\(bu" 4
\fBproto\fR which holds the transport of the response ("udp" or "tcp")
.
.IP "\(bu" 4
The address family (\fBfamily\fR) of the transport (1 = IP (IP version 4), 2 = IP6 (IP version 6))\.
.
.IP "\(bu" 4
\fBtype\fR which holds the query type\. It holds most common types (A, AAAA, MX, SOA, CNAME, PTR, TXT, NS, SRV, DS, DNSKEY, RRSIG, NSEC, NSEC3, IXFR, AXFR and ANY) and "other" which lumps together all other types\.
.
.IP "\(bu" 4
The \fBresponse_rcode_count_total\fR has an extra label \fBrcode\fR which holds the rcode of the response\.
.
.IP "" 0
.
.P
If monitoring is enabled, queries that do not enter the plugin chain are exported under the fake name "dropped" (without a closing dot \- this is never a valid domain name)\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
prometheus [ADDRESS]
.
.fi
.
.P
For each zone that you want to see metrics for\.
.
.P
It optionally takes an address to which the metrics are exported; the default is \fBlocalhost:9153\fR\. The metrics path is fixed to \fB/metrics\fR\.
.
.SH "EXAMPLES"
Use an alternative address:
.
.IP "" 4
.
.nf
\&\. {
prometheus localhost:9253
}
.
.fi
.
.IP "" 0
.
.P
Or via an enviroment variable (this is supported throughout the Corefile): \fBexport PORT=9253\fR, and then:
.
.IP "" 4
.
.nf
\&\. {
prometheus localhost:{$PORT}
}
.
.fi
.
.IP "" 0
.
.SH "BUGS"
When reloading, the Prometheus handler is stopped before the new server instance is started\. If that new server fails to start, then the initial server instance is still available and DNS queries still served, but Prometheus handler stays down\. Prometheus will not reply HTTP request until a successful reload or a complete restart of CoreDNS\.

71
vendor/github.com/coredns/coredns/man/coredns-nsid.7 generated vendored Normal file
View File

@@ -0,0 +1,71 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-NSID" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fInsid\fR \- adds an identifier of this server to each reply\.
.
.SH "DESCRIPTION"
This plugin implements RFC 5001 \fIhttps://tools\.ietf\.org/html/rfc5001\fR and adds an EDNS0 OPT resource record to replies that uniquely identify the server\. This is useful in anycast setups to see which server was responsible for generating the reply and for debugging\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
nsid [DATA]
.
.fi
.
.P
\fBDATA\fR is the string to use in the nsid record\.
.
.P
If \fBDATA\fR is not given, the host\'s name is used\.
.
.SH "EXAMPLES"
Enable nsid:
.
.IP "" 4
.
.nf
\&\. {
whoami
nsid Use The Force
}
.
.fi
.
.IP "" 0
.
.P
And now a client with NSID support will see an OPT record with the NSID option:
.
.IP "" 4
.
.nf
% dig +nsid @localhost a whoami\.example\.org
;; Got answer:
;; \->>HEADER<<\- opcode: QUERY, status: NOERROR, id: 46880
;; flags: qr aa rd; QUERY: 1, ANSWER: 0, AUTHORITY: 0, ADDITIONAL: 3
\.\.\.\.
; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
; NSID: 55 73 65 20 54 68 65 20 46 6f 72 63 65 ("Use The Force")
;; QUESTION SECTION:
;whoami\.example\.org\. IN A
.
.fi
.
.IP "" 0
.
.SH "ALSO SEE"
RFC 5001 \fIhttps://tools\.ietf\.org/html/rfc5001\fR

78
vendor/github.com/coredns/coredns/man/coredns-pprof.7 generated vendored Normal file
View File

@@ -0,0 +1,78 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-PPROF" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIpprof\fR \- publishes runtime profiling data at endpoints under \fB/debug/pprof\fR\.
.
.SH "DESCRIPTION"
You can visit \fB/debug/pprof\fR on your site for an index of the available endpoints\. By default it will listen on localhost:6053\.
.
.P
This is a debugging tool\. Certain requests (such as collecting execution traces) can be slow\. If you use pprof on a live server, consider restricting access or enabling it only temporarily\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
pprof [ADDRESS]
.
.fi
.
.P
If not specified, ADDRESS defaults to localhost:6053\.
.
.SH "EXAMPLES"
Enable pprof endpoints:
.
.IP "" 4
.
.nf
\&\. {
pprof
}
.
.fi
.
.IP "" 0
.
.P
And use the pprof tool to get statistics: \fBgo tool pprof http://localhost:6053\fR\.
.
.P
Listen on an alternate address:
.
.IP "" 4
.
.nf
\&\. {
pprof 10\.9\.8\.7:6060
}
.
.fi
.
.IP "" 0
.
.P
Listen on an all addresses on port 6060:
.
.IP "" 4
.
.nf
\&\. {
pprof :6060
}
.
.fi
.
.IP "" 0
.
.SH "ALSO SEE"
See Go\'s pprof documentation \fIhttps://golang\.org/pkg/net/http/pprof/\fR and Profiling Go Programs \fIhttps://blog\.golang\.org/profiling\-go\-programs\fR\.

227
vendor/github.com/coredns/coredns/man/coredns-proxy.7 generated vendored Normal file
View File

@@ -0,0 +1,227 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-PROXY" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIproxy\fR \- facilitates both a basic reverse proxy and a robust load balancer\.
.
.SH "DESCRIPTION"
The proxy has support for multiple backends\. The load balancing features include multiple policies, health checks, and failovers\. If all hosts fail their health check the proxy plugin will fail back to randomly selecting a target and sending packets to it\.
.
.SH "SYNTAX"
In its most basic form, a simple reverse proxy uses this syntax:
.
.IP "" 4
.
.nf
proxy FROM TO
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR is the base domain to match for the request to be proxied\.
.
.IP "\(bu" 4
\fBTO\fR is the destination endpoint to proxy to\.
.
.IP "" 0
.
.P
However, advanced features including load balancing can be utilized with an expanded syntax:
.
.IP "" 4
.
.nf
proxy FROM TO\.\.\. {
policy random|least_conn|round_robin|sequential
fail_timeout DURATION
max_fails INTEGER
health_check PATH:PORT [DURATION]
except IGNORED_NAMES\.\.\.
spray
protocol [dns [force_tcp]|grpc [insecure|CACERT|KEY CERT|KEY CERT CACERT]]
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR is the name to match for the request to be proxied\.
.
.IP "\(bu" 4
\fBTO\fR is the destination endpoint to proxy to\. At least one is required, but multiple may be specified\. \fBTO\fR may be an IP:Port pair, or may reference a file in resolv\.conf format
.
.IP "\(bu" 4
\fBpolicy\fR is the load balancing policy to use; applies only with multiple backends\. May be one of random, least_conn, round_robin or sequential\. Default is random\.
.
.IP "\(bu" 4
\fBfail_timeout\fR specifies how long to consider a backend as down after it has failed\. While it is down, requests will not be routed to that backend\. A backend is "down" if CoreDNS fails to communicate with it\. The default value is 2 seconds ("2s")\.
.
.IP "\(bu" 4
\fBmax_fails\fR is the number of failures within fail_timeout that are needed before considering a backend to be down\. If 0, the backend will never be marked as down\. Default is 1\.
.
.IP "\(bu" 4
\fBhealth_check\fR will check \fBPATH\fR (on \fBPORT\fR) on each backend\. If a backend returns a status code of 200\-399, then that backend is marked healthy for double the healthcheck duration\. If it doesn\'t, it is marked as unhealthy and no requests are routed to it\. If this option is not provided then health checks are disabled\. The default duration is 4 seconds ("4s")\.
.
.IP "\(bu" 4
\fBIGNORED_NAMES\fR in \fBexcept\fR is a space\-separated list of domains to exclude from proxying\. Requests that match none of these names will be passed through\.
.
.IP "\(bu" 4
\fBspray\fR when all backends are unhealthy, randomly pick one to send the traffic to\. (This is a failsafe\.)
.
.IP "\(bu" 4
\fBprotocol\fR specifies what protocol to use to speak to an upstream, \fBdns\fR (the default) is plain old DNS\. The \fBgrpc\fR option will talk to a server that has implemented the DnsService \fIhttps://github\.com/coredns/coredns/blob/master/pb/dns\.proto\fR\.
.
.IP "" 0
.
.SH "POLICIES"
There are four load\-balancing policies available: * \fBrandom\fR (default) \- Randomly select a backend * \fBleast_conn\fR \- Select the backend with the fewest active connections * \fBround_robin\fR \- Select the backend in round\-robin fashion * \fBsequential\fR \- Select the first available backend looking by order of declaration from left to right * \fBfirst\fR \- Deprecated\. Use sequential instead
.
.P
All polices implement randomly spraying packets to backend hosts when \fIno healthy\fR hosts are available\. This is to preeempt the case where the healthchecking (as a mechanism) fails\.
.
.SH "UPSTREAM PROTOCOLS"
.
.TP
\fBdns\fR
uses the standard DNS exchange\. You can pass \fBforce_tcp\fR to make sure that the proxied connection is performed over TCP, regardless of the inbound request\'s protocol\.
.
.TP
\fBgrpc\fR
extra options are used to control how the TLS connection is made to the gRPC server\.
.
.IP "\(bu" 4
None \- No client authentication is used, and the system CAs are used to verify the server certificate\.
.
.IP "\(bu" 4
\fBinsecure\fR \- TLS is not used, the connection is made in plaintext (not good in production)\.
.
.IP "\(bu" 4
\fBCACERT\fR \- No client authentication is used, and the file \fBCACERT\fR is used to verify the server certificate\.
.
.IP "\(bu" 4
\fBKEY\fR \fBCERT\fR \- Client authentication is used with the specified key/cert pair\. The server certificate is verified with the system CAs\.
.
.IP "\(bu" 4
\fBKEY\fR \fBCERT\fR \fBCACERT\fR \- Client authentication is used with the specified key/cert pair\. The server certificate is verified using the \fBCACERT\fR file\.
.
.IP "" 0
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metric is exported:
.
.IP "\(bu" 4
\fBcoredns_proxy_request_duration_seconds{server, proto, proto_proxy, family, to}\fR \- duration per upstream interaction\.
.
.IP "\(bu" 4
\fBcoredns_proxy_request_count_total{server, proto, proto_proxy, family, to}\fR \- query count per upstream\.
.
.IP "" 0
.
.P
Where \fBproxy_proto\fR is the protocol used (\fBdns\fR or \fBgrpc\fR) and \fBto\fR is \fBTO\fR specified in the config, \fBproto\fR is the protocol used by the incoming query ("tcp" or "udp"), family the transport family ("1" for IPv4, and "2" for IPv6)\. \fBServer\fR is the server responsible for the request (and metric)\. See the documention in the metrics plugin\.
.
.SH "EXAMPLES"
Proxy all requests within example\.org\. to a backend system:
.
.IP "" 4
.
.nf
proxy example\.org 127\.0\.0\.1:9005
.
.fi
.
.IP "" 0
.
.P
Load\-balance all requests between three backends (using random policy):
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.10:53 10\.0\.0\.11:1053 10\.0\.0\.12
}
.
.fi
.
.IP "" 0
.
.P
Same as above, but round\-robin style:
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.10:53 10\.0\.0\.11:1053 10\.0\.0\.12 {
policy round_robin
}
}
.
.fi
.
.IP "" 0
.
.P
With health checks and proxy headers to pass hostname, IP, and scheme upstream:
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.11:53 10\.0\.0\.11:53 10\.0\.0\.12:53 {
policy round_robin
health_check /health:8080
}
}
.
.fi
.
.IP "" 0
.
.P
Proxy everything except requests to miek\.nl or example\.org
.
.IP "" 4
.
.nf
\&\. {
proxy \. 10\.0\.0\.10:1234 {
except miek\.nl example\.org
}
}
.
.fi
.
.IP "" 0
.
.P
Proxy everything except \fBexample\.org\fR using the host\'s \fBresolv\.conf\fR\'s nameservers:
.
.IP "" 4
.
.nf
\&\. {
proxy \. /etc/resolv\.conf {
except example\.org
}
}
.
.fi
.
.IP "" 0

121
vendor/github.com/coredns/coredns/man/coredns-reload.7 generated vendored Normal file
View File

@@ -0,0 +1,121 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-RELOAD" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIreload\fR \- allows automatic reload of a changed Corefile
.
.SH "DESCRIPTION"
This plugin allows automatic reload of a changed \fICorefile\fR\. To enable automatic reloading of \fIzone file\fR changes, use the \fBauto\fR plugin\.
.
.P
This plugin periodically checks if the Corefile has changed by reading it and calculating its MD5 checksum\. If the file has changed, it reloads CoreDNS with the new Corefile\. This eliminates the need to send a SIGHUP or SIGUSR1 after changing the Corefile\.
.
.P
The reloads are graceful \- you should not see any loss of service when the reload happens\. Even if the new Corefile has an error, CoreDNS will continue to run the old config and an error message will be printed to the log\. But see the Bugs section for failure modes\.
.
.P
In some environments (for example, Kubernetes), there may be many CoreDNS instances that started very near the same time and all share a common Corefile\. To prevent these all from reloading at the same time, some jitter is added to the reload check interval\. This is jitter from the perspective of multiple CoreDNS instances; each instance still checks on a regular interval, but all of these instances will have their reloads spread out across the jitter duration\. This isn\'t strictly necessary given that the reloads are graceful, and can be disabled by setting the jitter to \fB0s\fR\.
.
.P
Jitter is re\-calculated whenever the Corefile is reloaded\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
reload [INTERVAL] [JITTER]
.
.fi
.
.IP "\(bu" 4
The plugin will check for changes every \fBINTERVAL\fR, subject to +/\- the \fBJITTER\fR duration
.
.IP "\(bu" 4
\fBINTERVAL\fR and \fBJITTER\fR are Golang (durations)[https://golang\.org/pkg/time/#ParseDuration]
.
.IP "\(bu" 4
Default \fBINTERVAL\fR is 30s, default \fBJITTER\fR is 15s
.
.IP "\(bu" 4
Minimal value for \fBINTERVAL\fR is 2s, and for \fBJITTER\fR is 1s
.
.IP "\(bu" 4
If \fBJITTER\fR is more than half of \fBINTERVAL\fR, it will be set to half of \fBINTERVAL\fR
.
.IP "" 0
.
.SH "EXAMPLES"
Check with the default intervals:
.
.IP "" 4
.
.nf
\&\. {
reload
erratic
}
.
.fi
.
.IP "" 0
.
.P
Check every 10 seconds (jitter is automatically set to 10 / 2 = 5 in this case):
.
.IP "" 4
.
.nf
\&\. {
reload 10s
erratic
}
.
.fi
.
.IP "" 0
.
.SH "BUGS"
The reload happens without data loss (i\.e\. DNS queries keep flowing), but there is a corner case where the reload fails, and you loose functionality\. Consider the following Corefile:
.
.IP "" 4
.
.nf
\&\. {
health :8080
whoami
}
.
.fi
.
.IP "" 0
.
.P
CoreDNS starts and serves health from :8080\. Now you change \fB:8080\fR to \fB:443\fR not knowing a process is already listening on that port\. The process reloads and performs the following steps:
.
.IP "1." 4
close the listener on 8080
.
.IP "2." 4
reload and parse the config again
.
.IP "3." 4
fail to start a new listener on 443
.
.IP "4." 4
fail loading the new Corefile, abort and keep using the old process
.
.IP "" 0
.
.P
After the aborted attempt to reload we are left with the old proceses running, but the listener is closed in step 1; so the health endpoint is broken\. The same can hopen in the prometheus metrics plugin\.
.
.P
In general be careful with assigning new port and expecting reload to work fully\.

105
vendor/github.com/coredns/coredns/man/coredns-reverse.7 generated vendored Normal file
View File

@@ -0,0 +1,105 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-REVERSE" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIreverse\fR \- allows for dynamic responses to PTR and the related A/AAAA requests\.
.
.SH "DESCRIPTION"
If a request matches a regular expression (see Template Syntax below) this plugin will generate a response\. This is only done for "address" records (PTR, A and AAAA)\.
.
.SH "SYNTAX"
.
.nf
reverse NETWORK\.\.\. {
hostname TEMPLATE
[ttl TTL]
[fallthrough [ZONES\.\.\.]]
[wildcard]
.
.fi
.
.IP "\(bu" 4
\fBNETWORK\fR one or more CIDR formatted networks to respond on\.
.
.IP "\(bu" 4
\fBhostname\fR injects the IP and zone to a template for the hostname\. Defaults to "ip\-{IP}\.{zone[1]}"\. See below for template\.
.
.IP "\(bu" 4
\fBttl\fR defaults to 60
.
.IP "\(bu" 4
\fBfallthrough\fR if zone matches and no record can be generated, pass request to the next plugin\. If \fB[ZONES\.\.\.]\fR is omitted, then fallthrough happens for all zones for which the plugin is authoritative\. If specific zones are listed (for example \fBin\-addr\.arpa\fR and \fBip6\.arpa\fR), then only queries for those zones will be subject to fallthrough\.
.
.IP "\(bu" 4
\fBwildcard\fR allows matches to catch all subdomains as well\.
.
.IP "" 0
.
.SS "TEMPLATE SYNTAX"
The template for the hostname is used for generating the PTR for a reverse lookup and matching the forward lookup back to an IP\.
.
.P
The \fB{ip}\fR symbol is \fBrequired\fR to make reverse work\. For IPv4 lookups the IP is directly extracted With IPv6 lookups the ":" is removed, and any zero ranged are expanded, e\.g\., "ffff::ffff" results in "ffff000000000000000000000000ffff"
.
.P
The \fB{zone[i]}\fR symbol is \fBoptional\fR and can be replaced by a fixed (zone) string\. The zone will be matched by the zones listed in \fIthis\fR configuration stanza\. \fBi\fR needs to be replaced with the index of the configured listener zones, starting with 1\.
.
.SH "EXAMPLES"
.
.nf
arpa compute\.internal {
# proxy unmatched requests
proxy \. 8\.8\.8\.8
# answer requests for IPs in this network
# PTR 1\.0\.32\.10\.in\-addr\.arpa\. 3600 ip\-10\.0\.32\.1\.compute\.internal\.
# A ip\-10\.0\.32\.1\.compute\.internal\. 3600 10\.0\.32\.1
# v6 is also possible
# PTR 1\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.0\.1\.0\.d\.f\.ip6\.arpa\. 3600 ip\-fd010000000000000000000000000001\.compute\.internal\.
# AAAA ip\-fd010000000000000000000000000001\.compute\.internal\. 3600 fd01::1
reverse 10\.32\.0\.0/16 fd01::/16 {
# template of the ip injection to hostname, zone resolved to compute\.internal\.
hostname ip\-{ip}\.{zone[2]}
ttl 3600
# Forward unanswered or unmatched requests to proxy
# without this flag, requesting A/AAAA records on compute\.internal\. will end here\.
fallthrough
}
}
.
.fi
.
.IP "" 4
.
.nf
32\.10\.in\-addr\.arpa\.arpa arpa\.company\.org {
reverse 10\.32\.0\.0/16 {
# template of the ip injection to hostname, zone resolved to arpa\.company\.org\.
hostname "ip\-{ip}\.v4\.{zone[2]}"
ttl 3600
# fallthrough is not required, v4\.arpa\.company\.org\. will be only answered here
}
# cidr closer to the ip wins, so we can overwrite the "default"
reverse 10\.32\.2\.0/24 {
# its also possible to set fix domain suffix
hostname ip\-{ip}\.fix\.arpa\.company\.org\.
ttl 3600
}
}
.
.fi
.
.IP "" 0

357
vendor/github.com/coredns/coredns/man/coredns-rewrite.7 generated vendored Normal file
View File

@@ -0,0 +1,357 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-REWRITE" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIrewrite\fR \- performs internal message rewriting\.
.
.SH "DESCRIPTION"
Rewrites are invisible to the client\. There are simple rewrites (fast) and complex rewrites (slower), but they\'re powerful enough to accommodate most dynamic back\-end applications\.
.
.SH "SYNTAX"
A simplified/easy to digest syntax for \fIrewrite\fR is\.\.\. ~~~ rewrite [continue|stop] FIELD FROM TO ~~~
.
.IP "\(bu" 4
\fBFIELD\fR indicates what part of the request/response is being re\-written\.
.
.IP "\(bu" 4
\fBtype\fR \- the type field of the request will be rewritten\. FROM/TO must be a DNS record type (\fBA\fR, \fBMX\fR, etc); e\.g\., to rewrite ANY queries to HINFO, use \fBrewrite type ANY HINFO\fR\.
.
.IP "\(bu" 4
\fBclass\fR \- the class of the message will be rewritten\. FROM/TO must be a DNS class type (\fBIN\fR, \fBCH\fR, or \fBHS\fR) e\.g\., to rewrite CH queries to IN use \fBrewrite class CH IN\fR\.
.
.IP "\(bu" 4
\fBname\fR \- the query name in the \fIrequest\fR is rewritten; by default this is a full match of the name, e\.g\., \fBrewrite name miek\.nl example\.org\fR\. Other match types are supported, see the \fBName Field Rewrites\fR section below\.
.
.IP "\(bu" 4
\fBanswer name\fR \- the query name in the \fIresponse\fR is rewritten\. This option has special restrictions and requirements, in particular it must always combined with a \fBname\fR rewrite\. See below in the \fBResponse Rewrites\fR section\.
.
.IP "\(bu" 4
\fBedns0\fR \- an EDNS0 option can be appended to the request as described below in the \fBEDNS0 Options\fR section\.
.
.IP "" 0
.
.IP "\(bu" 4
\fBFROM\fR is the name or type to match
.
.IP "\(bu" 4
\fBTO\fR is the destination name or type to rewrite to
.
.IP "" 0
.
.P
If you specify multiple rules and an incoming query matches on multiple rules, the rewrite will behave as following * \fBcontinue\fR will continue apply the next rule in the rule list\. * \fBstop\fR will consider the current rule is the last rule and will not continue\. Default behaviour for not specifying this rule processing mode is \fBstop\fR
.
.SS "NAME FIELD REWRITES"
The \fBrewrite\fR plugin offers the ability to match on the name in the question section of a DNS request\. The match could be exact, substring, or based on a prefix, suffix, or regular expression\.
.
.P
The syntax for the name re\-writing is as follows:
.
.IP "" 4
.
.nf
rewrite [continue|stop] name [exact|prefix|suffix|substring|regex] STRING STRING
.
.fi
.
.IP "" 0
.
.P
The match type, i\.e\. \fBexact\fR, \fBsubstring\fR, etc\., triggers re\-write:
.
.IP "\(bu" 4
\fBexact\fR (default): on exact match of the name in the question section of a request
.
.IP "\(bu" 4
\fBsubstring\fR: on a partial match of the name in the question section of a request
.
.IP "\(bu" 4
\fBprefix\fR: when the name begins with the matching string
.
.IP "\(bu" 4
\fBsuffix\fR: when the name ends with the matching string
.
.IP "\(bu" 4
\fBregex\fR: when the name in the question section of a request matches a regular expression
.
.IP "" 0
.
.P
If the match type is omitted, the \fBexact\fR match type is being assumed\.
.
.P
The following instruction allows re\-writing the name in the query that contains \fBservice\.us\-west\-1\.example\.org\fR substring\.
.
.IP "" 4
.
.nf
rewrite name substring service\.us\-west\-1\.example\.org service\.us\-west\-1\.consul
.
.fi
.
.IP "" 0
.
.P
Thus:
.
.IP "\(bu" 4
Incoming Request Name: \fBftp\.service\.us\-west\-1\.example\.org\fR
.
.IP "\(bu" 4
Re\-written Request Name: \fBftp\.service\.us\-west\-1\.consul\fR
.
.IP "" 0
.
.P
The following instruction uses regular expressions\. The name in a request matching \fB(\.*)\-(us\-west\-1)\e\.example\e\.org\fR regular expression is being replaces with \fB{1}\.service\.{2}\.consul\fR, where \fB{1}\fR and \fB{2}\fR are regular expression match groups\.
.
.IP "" 4
.
.nf
rewrite name regex (\.*)\-(us\-west\-1)\e\.example\e\.org {1}\.service\.{2}\.consul
.
.fi
.
.IP "" 0
.
.P
Thus:
.
.IP "\(bu" 4
Incoming Request Name: \fBftp\-us\-west\-1\.example\.org\fR
.
.IP "\(bu" 4
Re\-written Request Name: \fBftp\.service\.us\-west\-1\.consul\fR
.
.IP "" 0
.
.SS "RESPONSE REWRITES"
When re\-writing incoming DNS requests\' names, CoreDNS re\-writes the \fBQUESTION SECTION\fR section of the requests\. It may be necessary to re\-write the \fBANSWER SECTION\fR of the requests, because some DNS resolvers would treat the mismatch between \fBQUESTION SECTION\fR and \fBANSWER SECTION\fR as a man\-in\-the\-middle attack (MITM)\.
.
.P
For example, a user tries to resolve \fBftp\-us\-west\-1\.coredns\.rocks\fR\. The CoreDNS configuration file has the following rule:
.
.IP "" 4
.
.nf
rewrite name regex (\.*)\-(us\-west\-1)\e\.coredns\e\.rocks {1}\.service\.{2}\.consul
.
.fi
.
.IP "" 0
.
.P
CoreDNS instance re\-wrote the request to \fBftp\-us\-west\-1\.coredns\.rocks\fR with \fBftp\.service\.us\-west\-1\.consul\fR and ultimately resolved it to 3 records\. The resolved records, see \fBANSWER SECTION\fR, were not from \fBcoredns\.rocks\fR, but rather from \fBservice\.us\-west\-1\.consul\fR\.
.
.IP "" 4
.
.nf
$ dig @10\.1\.1\.1 ftp\-us\-west\-1\.coredns\.rocks
; <<>> DiG 9\.8\.3\-P1 <<>> @10\.1\.1\.1 ftp\-us\-west\-1\.coredns\.rocks
; (1 server found)
;; global options: +cmd
;; Got answer:
;; \->>HEADER<<\- opcode: QUERY, status: NOERROR, id: 8619
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;ftp\-us\-west\-1\.coredns\.rocks\. IN A
;; ANSWER SECTION:
ftp\.service\.us\-west\-1\.consul\. 0 IN A 10\.10\.10\.10
ftp\.service\.us\-west\-1\.consul\. 0 IN A 10\.20\.20\.20
ftp\.service\.us\-west\-1\.consul\. 0 IN A 10\.30\.30\.30
.
.fi
.
.IP "" 0
.
.P
The above is the mismatch\.
.
.P
The following configuration snippet allows for the re\-writing of the \fBANSWER SECTION\fR, provided that the \fBQUESTION SECTION\fR was re\-written:
.
.IP "" 4
.
.nf
rewrite stop {
name regex (\.*)\-(us\-west\-1)\e\.coredns\e\.rocks {1}\.service\.{2}\.consul
answer name (\.*)\e\.service\e\.(us\-west\-1)\e\.consul {1}\-{2}\.coredns\.rocks
}
.
.fi
.
.IP "" 0
.
.P
Now, the \fBANSWER SECTION\fR matches the \fBQUESTION SECTION\fR:
.
.IP "" 4
.
.nf
$ dig @10\.1\.1\.1 ftp\-us\-west\-1\.coredns\.rocks
; <<>> DiG 9\.8\.3\-P1 <<>> @10\.1\.1\.1 ftp\-us\-west\-1\.coredns\.rocks
; (1 server found)
;; global options: +cmd
;; Got answer:
;; \->>HEADER<<\- opcode: QUERY, status: NOERROR, id: 8619
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 0, ADDITIONAL: 0
;; QUESTION SECTION:
;ftp\-us\-west\-1\.coredns\.rocks\. IN A
;; ANSWER SECTION:
ftp\-us\-west\-1\.coredns\.rocks\. 0 IN A 10\.10\.10\.10
ftp\-us\-west\-1\.coredns\.rocks\. 0 IN A 10\.20\.20\.20
ftp\-us\-west\-1\.coredns\.rocks\. 0 IN A 10\.30\.30\.30
.
.fi
.
.IP "" 0
.
.P
The syntax for the rewrite of DNS request and response is as follows:
.
.IP "" 4
.
.nf
rewrite [continue|stop] {
name regex STRING STRING
answer name STRING STRING
}
.
.fi
.
.IP "" 0
.
.P
Note that the above syntax is strict\. For response rewrites only \fBname\fR rules are allowed to match the question section, and only by match type \fBregex\fR\. The answer rewrite must be after the name, as ordered in the syntax example\. There must only be two lines (a \fBname\fR follwed by an \fBanswer\fR) in the brackets, additional rules are not supported\.
.
.P
An alternate syntax for the rewrite of DNS request and response is as follows:
.
.IP "" 4
.
.nf
rewrite [continue|stop] name regex STRING STRING answer name STRING STRING
.
.fi
.
.IP "" 0
.
.SH "EDNS0 OPTIONS"
Using FIELD edns0, you can set, append, or replace specific EDNS0 options on the request\.
.
.IP "\(bu" 4
\fBreplace\fR will modify any "matching" option with the specified option\. The criteria for "matching" varies based on EDNS0 type\.
.
.IP "\(bu" 4
\fBappend\fR will add the option only if no matching option exists
.
.IP "\(bu" 4
\fBset\fR will modify a matching option or add one if none is found
.
.IP "" 0
.
.P
Currently supported are \fBEDNS0_LOCAL\fR, \fBEDNS0_NSID\fR and \fBEDNS0_SUBNET\fR\.
.
.SS "EDNS0_LOCAL"
This has two fields, code and data\. A match is defined as having the same code\. Data may be a string or a variable\.
.
.TP
A string data can be treated as hex if it starts with \fB0x\fR\. Example:
.
.IP "" 4
.
.nf
\&\. {
rewrite edns0 local set 0xffee 0x61626364
whoami
}
.
.fi
.
.IP "" 0
.
.P
rewrites the first local option with code 0xffee, setting the data to "abcd"\. Equivalent:
.
.IP "" 4
.
.nf
\&\. {
rewrite edns0 local set 0xffee abcd
}
.
.fi
.
.IP "" 0
.
.TP
A variable data is specified with a pair of curly brackets \fB{}\fR\. Following are the supported variables
{qname}, {qtype}, {client_ip}, {client_port}, {protocol}, {server_ip}, {server_port}\.
.
.P
Example:
.
.IP "" 4
.
.nf
rewrite edns0 local set 0xffee {client_ip}
.
.fi
.
.IP "" 0
.
.SS "EDNS0_NSID"
This has no fields; it will add an NSID option with an empty string for the NSID\. If the option already exists and the action is \fBreplace\fR or \fBset\fR, then the NSID in the option will be set to the empty string\.
.
.SS "EDNS0_SUBNET"
This has two fields, IPv4 bitmask length and IPv6 bitmask length\. The bitmask length is used to extract the client subnet from the source IP address in the query\.
.
.P
Example:
.
.IP "" 4
.
.nf
rewrite edns0 subnet set 24 56
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
If the query has source IP as IPv4, the first 24 bits in the IP will be the network subnet\.
.
.IP "\(bu" 4
If the query has source IP as IPv6, the first 56 bits in the IP will be the network subnet\.
.
.IP "" 0
.
.SH "FULL SYNTAX"
The full plugin usage syntax is harder to digest\.\.\. ~~~ rewrite [continue|stop] {type|class|edns0|name [exact|prefix|suffix|substring|regex [FROM TO answer name]]} FROM TO ~~~
.
.P
The syntax above doesn\'t cover the multi line block option for specifying a name request+response rewrite rule described in the \fBResponse Rewrite\fR section\.

40
vendor/github.com/coredns/coredns/man/coredns-root.7 generated vendored Normal file
View File

@@ -0,0 +1,40 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-ROOT" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIroot\fR \- simply specifies the root of where to find (zone) files\.
.
.SH "DESCRIPTION"
The default root is the current working directory of CoreDNS\. The \fIroot\fR plugin allows you to change this\. A relative root path is relative to the current working directory\.
.
.P
This plugin can only be used once per Server Block\.
.
.SH "SYNTAX"
.
.nf
root PATH
.
.fi
.
.P
\fBPATH\fR is the directory to set as CoreDNS\' root\.
.
.SH "EXAMPLES"
Serve zone data (when the \fIfile\fR plugin is used) from \fB/etc/coredns/zones\fR:
.
.IP "" 4
.
.nf
\&\. {
root /etc/coredns/zones
}
.
.fi
.
.IP "" 0

View File

@@ -0,0 +1,64 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-ROUTE53" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIroute53\fR \- enables serving zone data from AWS route53\.
.
.SH "DESCRIPTION"
The route53 plugin is useful for serving zones from resource record sets in AWS route53\. This plugin only supports A and AAAA records\. The route53 plugin can be used when coredns is deployed on AWS\.
.
.SH "SYNTAX"
.
.nf
route53 [ZONE:HOSTED_ZONE_ID\.\.\.] {
[aws_access_key AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY]
}
.
.fi
.
.IP "\(bu" 4
\fBZONE\fR the name of the domain to be accessed\.
.
.IP "\(bu" 4
\fBHOSTED_ZONE_ID\fR the ID of the hosted zone that contains the resource record sets to be accessed\.
.
.IP "\(bu" 4
\fBAWS_ACCESS_KEY_ID\fR and \fBAWS_SECRET_ACCESS_KEY\fR the AWS access key ID and secret access key to be used when query AWS (optional)\. If they are not provided, then coredns tries to access AWS credentials the same way as AWS CLI, e\.g\., environmental variables, AWS credentials file, instance profile credentials, etc\.
.
.IP "" 0
.
.SH "EXAMPLES"
Enable route53, with implicit aws credentials:
.
.IP "" 4
.
.nf
\&\. {
route53 example\.org\.:Z1Z2Z3Z4DZ5Z6Z7
}
.
.fi
.
.IP "" 0
.
.P
Enable route53, with explicit aws credentials:
.
.IP "" 4
.
.nf
\&\. {
route53 example\.org\.:Z1Z2Z3Z4DZ5Z6Z7 {
aws_access_key AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY
}
}
.
.fi
.
.IP "" 0

View File

@@ -0,0 +1,95 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-SECONDARY" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIsecondary\fR \- enables serving a zone retrieved from a primary server\.
.
.SH "DESCRIPTION"
With \fIsecondary\fR you can transfer (via AXFR) a zone from another server\. The retrieved zone is \fInot committed\fR to disk (a violation of the RFC)\. This means restarting CoreDNS will cause it to retrieve all secondary zones\.
.
.IP "" 4
.
.nf
secondary [ZONES\.\.\.]
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBZONES\fR zones it should be authoritative for\. If empty, the zones from the configuration block are used\. Note that without a remote address to \fIget\fR the zone from, the above is not that useful\.
.
.IP "" 0
.
.P
A working syntax would be:
.
.IP "" 4
.
.nf
secondary [zones\.\.\.] {
transfer from ADDRESS
transfer to ADDRESS
upstream [ADDRESS\.\.\.]
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBtransfer from\fR specifies from which address to fetch the zone\. It can be specified multiple times; if one does not work, another will be tried\.
.
.IP "\(bu" 4
\fBtransfer to\fR can be enabled to allow this secondary zone to be transferred again\.
.
.IP "\(bu" 4
\fBupstream\fR defines upstream resolvers to be used resolve external names found (think CNAMEs) pointing to external names\. This is only really useful when CoreDNS is configured as a proxy, for normal authoritative serving you don\'t need \fIor\fR want to use this\. \fBADDRESS\fR can be an IP address, and IP:port or a string pointing to a file that is structured as /etc/resolv\.conf\. If no \fBADDRESS\fR is given, CoreDNS will resolve CNAMEs against itself\.
.
.IP "" 0
.
.P
When a zone is due to be refreshed (Refresh timer fires) a random jitter of 5 seconds is applied, before fetching\. In the case of retry this will be 2 seconds\. If there are any errors during the transfer the transfer fails; this will be logged\.
.
.SH "EXAMPLES"
Transfer \fBexample\.org\fR from 10\.0\.1\.1, and if that fails try 10\.1\.2\.1\.
.
.IP "" 4
.
.nf
example\.org {
secondary {
transfer from 10\.0\.1\.1
transfer from 10\.1\.2\.1
}
}
.
.fi
.
.IP "" 0
.
.P
Or re\-export the retrieved zone to other secondaries\.
.
.IP "" 4
.
.nf
\&\. {
secondary example\.net {
transfer from 10\.1\.2\.1
transfer to *
}
}
.
.fi
.
.IP "" 0
.
.SH "BUGS"
Only AXFR is supported and the retrieved zone is not committed to disk\.

View File

@@ -0,0 +1,348 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-TEMPLATE" "7" "June 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fItemplate\fR \- allows for dynamic responses based on the incoming query\.
.
.SH "DESCRIPTION"
The \fItemplate\fR plugin allows you to dynamically respond to queries by just writing a (Go) template\.
.
.SH "SYNTAX"
.
.nf
template CLASS TYPE [ZONE\.\.\.] {
[match REGEX\.\.\.]
[answer RR]
[additional RR]
[authority RR]
[\.\.\.]
[rcode CODE]
[upstream [ADDRESS\.\.\.]]
[fallthrough [ZONE\.\.\.]]
}
.
.fi
.
.IP "\(bu" 4
\fBCLASS\fR the query class (usually IN or ANY)\.
.
.IP "\(bu" 4
\fBTYPE\fR the query type (A, PTR, \.\.\. can be ANY to match all types)\.
.
.IP "\(bu" 4
\fBZONE\fR the zone scope(s) for this template\. Defaults to the server zones\.
.
.IP "\(bu" 4
\fBREGEX\fR Go regexp \fIhttps://golang\.org/pkg/regexp/\fR that are matched against the incoming question name\. Specifying no regex matches everything (default: \fB\.*\fR)\. First matching regex wins\.
.
.IP "\(bu" 4
\fBanswer|additional|authority\fR \fBRR\fR A RFC 1035 \fIhttps://tools\.ietf\.org/html/rfc1035#section\-5\fR style resource record fragment built by a Go template \fIhttps://golang\.org/pkg/text/template/\fR that contains the reply\.
.
.IP "\(bu" 4
\fBrcode\fR \fBCODE\fR A response code (\fBNXDOMAIN, SERVFAIL, \.\.\.\fR)\. The default is \fBSUCCESS\fR\.
.
.IP "\(bu" 4
\fBupstream\fR [\fBADDRESS\fR\.\.\.] defines the upstream resolvers used for resolving CNAME\. If no \fBADDRESS\fR is given, CoreDNS will resolve CNAMEs against itself\. \fBADDRESS\fR can be an IP, an IP:port, or a path to a file structured like resolv\.conf\.
.
.IP "\(bu" 4
\fBfallthrough\fR Continue with the next plugin if the zone matched but no regex matched\. If specific zones are listed (for example \fBin\-addr\.arpa\fR and \fBip6\.arpa\fR), then only queries for those zones will be subject to fallthrough\.
.
.IP "" 0
.
.P
At least one \fBanswer\fR or \fBrcode\fR directive is needed (e\.g\. \fBrcode NXDOMAIN\fR)\.
.
.P
\fIAlso see\fR contains an additional reading list\.
.
.SH "TEMPLATES"
Each resource record is a full\-featured Go template \fIhttps://golang\.org/pkg/text/template/\fR with the following predefined data
.
.IP "\(bu" 4
\fB\.Zone\fR the matched zone string (e\.g\. \fBexample\.\fR)\.
.
.IP "\(bu" 4
\fB\.Name\fR the query name, as a string (lowercased)\.
.
.IP "\(bu" 4
\fB\.Class\fR the query class (usually \fBIN\fR)\.
.
.IP "\(bu" 4
\fB\.Type\fR the RR type requested (e\.g\. \fBPTR\fR)\.
.
.IP "\(bu" 4
\fB\.Match\fR an array of all matches\. \fBindex \.Match 0\fR refers to the whole match\.
.
.IP "\(bu" 4
\fB\.Group\fR a map of the named capture groups\.
.
.IP "\(bu" 4
\fB\.Message\fR the complete incoming DNS message\.
.
.IP "\(bu" 4
\fB\.Question\fR the matched question section\.
.
.IP "" 0
.
.P
The output of the template must be a RFC 1035 \fIhttps://tools\.ietf\.org/html/rfc1035\fR style resource record (commonly referred to as a "zone file")\.
.
.P
\fBWARNING\fR there is a syntactical problem with Go templates and CoreDNS config files\. Expressions like \fB{{$var}}\fR will be interpreted as a reference to an environment variable by CoreDNS (and Caddy) while \fB{{ $var }}\fR will work\. See \fIBugs\fR and corefile(5)\.
.
.SH "METRICS"
If monitoring is enabled (via the \fIprometheus\fR directive) then the following metrics are exported:
.
.IP "\(bu" 4
\fBcoredns_template_matches_total{server, regex}\fR the total number of matched requests by regex\.
.
.IP "\(bu" 4
\fBcoredns_template_template_failures_total{server, regex,section,template}\fR the number of times the Go templating failed\. Regex, section and template label values can be used to map the error back to the config file\.
.
.IP "\(bu" 4
\fBcoredns_template_rr_failures_total{server, regex,section,template}\fR the number of times the templated resource record was invalid and could not be parsed\. Regex, section and template label values can be used to map the error back to the config file\.
.
.IP "" 0
.
.P
Both failure cases indicate a problem with the template configuration\. The \fBserver\fR label indicates the server incrementing the metric, see the \fImetrics\fR plugin for details\.
.
.SH "EXAMPLES"
.
.SS "RESOLVE EVERYTHING TO NXDOMAIN"
The most simplistic template is
.
.IP "" 4
.
.nf
\&\. {
template ANY ANY {
rcode NXDOMAIN
}
}
.
.fi
.
.IP "" 0
.
.IP "1." 4
This template uses the default zone (\fB\.\fR or all queries)
.
.IP "2." 4
All queries will be answered (no \fBfallthrough\fR)
.
.IP "3." 4
The answer is always NXDOMAIN
.
.IP "" 0
.
.SS "RESOLVE \.INVALID AS NXDOMAIN"
The \fB\.invalid\fR domain is a reserved TLD (see RFC 2606 Reserved Top Level DNS Names \fIhttps://tools\.ietf\.org/html/rfc2606#section\-2\fR) to indicate invalid domains\.
.
.IP "" 4
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
template ANY ANY invalid {
rcode NXDOMAIN
authority "invalid\. 60 {{ \.Class }} SOA ns\.invalid\. hostmaster\.invalid\. (1 60 60 60 60)"
}
}
.
.fi
.
.IP "" 0
.
.IP "1." 4
A query to \.invalid will result in NXDOMAIN (rcode)
.
.IP "2." 4
A dummy SOA record is sent to hand out a TTL of 60s for caching purposes
.
.IP "3." 4
Querying \fB\.invalid\fR in the \fBCH\fR class will also cause a NXDOMAIN/SOA response
.
.IP "4." 4
The default regex is \fB\.*\fR
.
.IP "" 0
.
.SS "BLOCK INVALID SEARCH DOMAIN COMPLETIONS"
Imagine you run \fBexample\.com\fR with a datacenter \fBdc1\.example\.com\fR\. The datacenter domain is part of the DNS search domain\. However \fBsomething\.example\.com\.dc1\.example\.com\fR would indicate a fully qualified domain name (\fBsomething\.example\.com\fR) that inadvertently has the default domain or search path (\fBdc1\.example\.com\fR) added\.
.
.IP "" 4
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
template IN ANY example\.com\.dc1\.example\.com {
rcode NXDOMAIN
authority "{{ \.Zone }} 60 IN SOA ns\.example\.com hostmaster\.example\.com (1 60 60 60 60)"
}
}
.
.fi
.
.IP "" 0
.
.P
A more verbose regex based equivalent would be
.
.IP "" 4
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
template IN ANY example\.com {
match "example\e\.com\e\.(dc1\e\.example\e\.com\e\.)$"
rcode NXDOMAIN
authority "{{ index \.Match 1 }} 60 IN SOA ns\.{{ index \.Match 1 }} hostmaster\.{{ index \.Match 1 }} (1 60 60 60 60)"
fallthrough
}
}
.
.fi
.
.IP "" 0
.
.P
The regex\-based version can do more complex matching/templating while zone\-based templating is easier to read and use\.
.
.SS "RESOLVE A/PTR FOR \.EXAMPLE"
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
# ip\-a\-b\-c\-d\.example\.com A a\.b\.c\.d
template IN A example {
match (^|[\.])ip\-10\-(?P<b>[0\-9]*)\-(?P<c>[0\-9]*)\-(?P<d>[0\-9]*)[\.]example[\.]$
answer "{{ \.Name }} 60 IN A 10\.{{ \.Group\.b }}\.{{ \.Group\.c }}\.{{ \.Group\.d }}"
fallthrough
}
# d\.c\.b\.a\.in\-addr\.arpa PTR ip\-a\-b\-c\-d\.example
template IN PTR 10\.in\-addr\.arpa\. {
match ^(?P<d>[0\-9]*)[\.](?P<c>[0\-9]*)[\.](?P<b>[0\-9]*)[\.]10[\.]in\-addr[\.]arpa[\.]$
answer "{{ \.Name }} 60 IN PTR ip\-10\-{{ \.Group\.b }}\-{{ \.Group\.c }}\-{{ \.Group\.d }}\.example\.com\."
}
}
.
.fi
.
.P
An IPv4 address consists of 4 bytes, \fBa\.b\.c\.d\fR\. Named groups make it less error\-prone to reverse the IP address in the PTR case\. Try to use named groups to explain what your regex and template are doing\.
.
.P
Note that the A record is actually a wildcard: any subdomain of the IP address will resolve to the IP address\.
.
.P
Having templates to map certain PTR/A pairs is a common pattern\.
.
.P
Fallthrough is needed for mixed domains where only some responses are templated\.
.
.SS "RESOLVE MULTIPLE IP PATTERNS"
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
template IN A example {
match "^ip\-(?P<a>10)\-(?P<b>[0\-9]*)\-(?P<c>[0\-9]*)\-(?P<d>[0\-9]*)[\.]dc[\.]example[\.]$"
match "^(?P<a>[0\-9]*)[\.](?P<b>[0\-9]*)[\.](?P<c>[0\-9]*)[\.](?P<d>[0\-9]*)[\.]ext[\.]example[\.]$"
answer "{{ \.Name }} 60 IN A {{ \.Group\.a}}\.{{ \.Group\.b }}\.{{ \.Group\.c }}\.{{ \.Group\.d }}"
fallthrough
}
}
.
.fi
.
.P
Named capture groups can be used to template one response for multiple patterns\.
.
.SS "RESOLVE A AND MX RECORDS FOR IP TEMPLATES IN \.EXAMPLE"
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
template IN A example {
match ^ip\-10\-(?P<b>[0\-9]*)\-(?P<c>[0\-9]*)\-(?P<d>[0\-9]*)[\.]example[\.]$
answer "{{ \.Name }} 60 IN A 10\.{{ \.Group\.b }}\.{{ \.Group\.c }}\.{{ \.Group\.d }}"
fallthrough
}
template IN MX example {
match ^ip\-10\-(?P<b>[0\-9]*)\-(?P<c>[0\-9]*)\-(?P<d>[0\-9]*)[\.]example[\.]$
answer "{{ \.Name }} 60 IN MX 10 {{ \.Name }}"
additional "{{ \.Name }} 60 IN A 10\.{{ \.Group\.b }}\.{{ \.Group\.c }}\.{{ \.Group\.d }}"
fallthrough
}
}
.
.fi
.
.SS "ADDING AUTHORITATIVE NAMESERVERS TO THE RESPONSE"
.
.nf
\&\. {
proxy \. 8\.8\.8\.8
template IN A example {
match ^ip\-10\-(?P<b>[0\-9]*)\-(?P<c>[0\-9]*)\-(?P<d>[0\-9]*)[\.]example[\.]$
answer "{{ \.Name }} 60 IN A 10\.{{ \.Group\.b }}\.{{ \.Group\.c }}\.{{ \.Group\.d }}"
authority "example\. 60 IN NS ns0\.example\."
authority "example\. 60 IN NS ns1\.example\."
additional "ns0\.example\. 60 IN A 203\.0\.113\.8"
additional "ns1\.example\. 60 IN A 198\.51\.100\.8"
fallthrough
}
template IN MX example {
match ^ip\-10\-(?P<b>[0\-9]*)\-(?P<c>[0\-9]*)\-(?P<d>[0\-9]*)[\.]example[\.]$
answer "{{ \.Name }} 60 IN MX 10 {{ \.Name }}"
additional "{{ \.Name }} 60 IN A 10\.{{ \.Group\.b }}\.{{ \.Group\.c }}\.{{ \.Group\.d }}"
authority "example\. 60 IN NS ns0\.example\."
authority "example\. 60 IN NS ns1\.example\."
additional "ns0\.example\. 60 IN A 203\.0\.113\.8"
additional "ns1\.example\. 60 IN A 198\.51\.100\.8"
fallthrough
}
}
.
.fi
.
.SH "ALSO SEE"
.
.IP "\(bu" 4
Go regexp \fIhttps://golang\.org/pkg/regexp/\fR for details about the regex implementation
.
.IP "\(bu" 4
RE2 syntax reference \fIhttps://github\.com/google/re2/wiki/Syntax\fR for details about the regex syntax
.
.IP "\(bu" 4
RFC 1034 \fIhttps://tools\.ietf\.org/html/rfc1034#section\-3\.6\.1\fR and RFC 1035 \fIhttps://tools\.ietf\.org/html/rfc1035#section\-5\fR for the resource record format
.
.IP "\(bu" 4
Go template \fIhttps://golang\.org/pkg/text/template/\fR for the template language reference
.
.IP "" 0
.
.SH "BUGS"
CoreDNS supports caddyfile environment variables \fIhttps://caddyserver\.com/docs/caddyfile#env\fR with notion of \fB{$ENV_VAR}\fR\. This parser feature will break Go template variables \fIhttps://golang\.org/pkg/text/template/#hdr\-Variables\fR notations like\fB{{$variable}}\fR\. The equivalent notation \fB{{ $variable }}\fR will work\. Try to avoid Go template variables in the context of this plugin\.

68
vendor/github.com/coredns/coredns/man/coredns-tls.7 generated vendored Normal file
View File

@@ -0,0 +1,68 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-TLS" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fItls\fR \- allows you to configure the server certificates for the TLS and gRPC servers\.
.
.SH "DESCRIPTION"
CoreDNS supports queries that are encrypted using TLS (DNS over Transport Layer Security, RFC 7858) or are using gRPC (https://grpc\.io/, not an IETF standard)\. Normally DNS traffic isn\'t encrypted at all (DNSSEC only signs resource records)\.
.
.P
The \fIproxy\fR plugin also support gRPC (\fBprotocol gRPC\fR), meaning you can chain CoreDNS servers using this protocol\.
.
.P
The \fItls\fR "plugin" allows you to configure the cryptographic keys that are needed for both DNS\-over\-TLS and DNS\-over\-gRPC\. If the \fBtls\fR directive is omitted, then no encryption takes place\.
.
.P
The gRPC protobuffer is defined in \fBpb/dns\.proto\fR\. It defines the proto as a simple wrapper for the wire data of a DNS message\.
.
.SH "SYNTAX"
.
.nf
tls CERT KEY [CA]
.
.fi
.
.P
Parameter CA is optional\. If not set, system CAs can be used to verify the client certificate
.
.SH "EXAMPLES"
Start a DNS\-over\-TLS server that picks up incoming DNS\-over\-TLS queries on port 5553 and uses the nameservers defined in \fB/etc/resolv\.conf\fR to resolve the query\. This proxy path uses plain old DNS\.
.
.IP "" 4
.
.nf
tls://\.:5553 {
tls cert\.pem key\.pem ca\.pem
proxy \. /etc/resolv\.conf
}
.
.fi
.
.IP "" 0
.
.P
Start a DNS\-over\-gRPC server that is similar to the previous example, but using DNS\-over\-gRPC for incoming queries\.
.
.IP "" 4
.
.nf
grpc://\. {
tls cert\.pem key\.pem ca\.pem
proxy \. /etc/resolv\.conf
}
.
.fi
.
.IP "" 0
.
.P
Only Knot DNS\' \fBkdig\fR supports DNS\-over\-TLS queries, no command line client supports gRPC making debugging these transports harder than it should be\.
.
.SH "ALSO SEE"
RFC 7858 and https://grpc\.io\.

147
vendor/github.com/coredns/coredns/man/coredns-trace.7 generated vendored Normal file
View File

@@ -0,0 +1,147 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-TRACE" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fItrace\fR \- enables OpenTracing\-based tracing of DNS requests as they go through the plugin chain\.
.
.SH "DESCRIPTION"
With \fItrace\fR you enable OpenTracing of how a request flows through CoreDNS\.
.
.SH "SYNTAX"
The simplest form is just:
.
.IP "" 4
.
.nf
trace [ENDPOINT\-TYPE] [ENDPOINT]
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBENDPOINT\-TYPE\fR is the type of tracing destination\. Currently only \fBzipkin\fR and \fBdatadog\fR are supported\. Defaults to \fBzipkin\fR\.
.
.IP "\(bu" 4
\fBENDPOINT\fR is the tracing destination, and defaults to \fBlocalhost:9411\fR\. For Zipkin, if ENDPOINT does not begin with \fBhttp\fR, then it will be transformed to \fBhttp://ENDPOINT/api/v1/spans\fR\.
.
.IP "" 0
.
.P
With this form, all queries will be traced\.
.
.P
Additional features can be enabled with this syntax:
.
.IP "" 4
.
.nf
trace [ENDPOINT\-TYPE] [ENDPOINT] {
every AMOUNT
service NAME
client_server
}
.
.fi
.
.IP "" 0
.
.IP "\(bu" 4
\fBevery\fR \fBAMOUNT\fR will only trace one query of each AMOUNT queries\. For example, to trace 1 in every 100 queries, use AMOUNT of 100\. The default is 1\.
.
.IP "\(bu" 4
\fBservice\fR \fBNAME\fR allows you to specify the service name reported to the tracing server\. Default is \fBcoredns\fR\.
.
.IP "\(bu" 4
\fBclient_server\fR will enable the \fBClientServerSameSpan\fR OpenTracing feature\.
.
.IP "" 0
.
.SH "ZIPKIN"
You can run Zipkin on a Docker host like this:
.
.IP "" 4
.
.nf
docker run \-d \-p 9411:9411 openzipkin/zipkin
.
.fi
.
.IP "" 0
.
.SH "EXAMPLES"
Use an alternative Zipkin address:
.
.IP "" 4
.
.nf
trace tracinghost:9253
.
.fi
.
.IP "" 0
.
.P
or
.
.IP "" 4
.
.nf
\&\. {
trace zipkin tracinghost:9253
}
.
.fi
.
.IP "" 0
.
.P
If for some reason you are using an API reverse proxy or something and need to remap the standard Zipkin URL you can do something like:
.
.IP "" 4
.
.nf
trace http://tracinghost:9411/zipkin/api/v1/spans
.
.fi
.
.IP "" 0
.
.P
Using DataDog:
.
.IP "" 4
.
.nf
trace datadog localhost:8125
.
.fi
.
.IP "" 0
.
.P
Trace one query every 10000 queries, rename the service, and enable same span:
.
.IP "" 4
.
.nf
trace tracinghost:9411 {
every 10000
service dnsproxy
client_server
}
.
.fi
.
.IP "" 0

73
vendor/github.com/coredns/coredns/man/coredns-whoami.7 generated vendored Normal file
View File

@@ -0,0 +1,73 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS\-WHOAMI" "7" "May 2018" "CoreDNS" "CoreDNS plugins"
.
.SH "NAME"
\fIwhoami\fR \- returns your resolver\'s local IP address, port and transport\.
.
.SH "DESCRIPTION"
The \fIwhoami\fR plugin is not really that useful, but can be used for having a simple (fast) endpoint to test clients against\. When \fIwhoami\fR returns a response it will have your client\'s IP address the additional section as either an A or AAAA record\.
.
.P
The reply always has an empty answer section\. The port and transport are included in the additional section as a SRV record, transport can be "tcp" or "udp"\.
.
.IP "" 4
.
.nf
\&\._<transport>\.qname\. 0 IN SRV 0 0 <port> \.
.
.fi
.
.IP "" 0
.
.P
The \fIwhoami\fR plugin will respond to every A or AAAA query, regardless of the query name\.
.
.P
If CoreDNS can\'t find a Corefile on startup this is the \fIdefault\fR plugin that gets loaded\. As such it can be used to check that CoreDNS is responding to queries\. Other than that this plugin is of limited use in production\.
.
.SH "SYNTAX"
.
.nf
whoami
.
.fi
.
.SH "EXAMPLES"
Start a server on the default port and load the \fIwhoami\fR plugin\.
.
.IP "" 4
.
.nf
\&\. {
whoami
}
.
.fi
.
.IP "" 0
.
.P
When queried for "example\.org A", CoreDNS will respond with:
.
.IP "" 4
.
.nf
;; QUESTION SECTION:
;example\.org\. IN A
;; ADDITIONAL SECTION:
example\.org\. 0 IN A 10\.240\.0\.1
_udp\.example\.org\. 0 IN SRV 0 0 40212
.
.fi
.
.IP "" 0
.
.SH "SEE ALSO"
Read the blog post \fIhttps://coredns\.io/2017/03/01/how\-to\-add\-plugins\-to\-coredns/\fR on how this plugin is built, or explore the source code \fIhttps://github\.com/coredns/coredns/blob/master/plugin/whoami/\fR\.

59
vendor/github.com/coredns/coredns/man/coredns.1 generated vendored Normal file
View File

@@ -0,0 +1,59 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREDNS" "1" "June 2018" "CoreDNS" "CoreDNS"
.
.SH "NAME"
\fBcoredns\fR
.
.SH "NAME"
\fIcoredns\fR \- plugable DNS nameserver optimized for service discovery and flexibility\.
.
.SH "SYNOPSIS"
\fIcoredns\fR \fI[OPTION]\fR\.\.\.
.
.SH "DESCRIPTION"
CoreDNS is a DNS server that chains plugins\. Each plugin handles a DNS feature, like rewriting queries, kubernetes service discovery or just exporting metrics\. There are many other plugins, each described on \fIhttps://coredns\.io/plugins\fR and there respective manual pages\. Plugins not bundled in CoreDNS are listed on \fIhttps://coredns\.io/explugins\fR\.
.
.P
When started with no options CoreDNS will looks for a file names \fBCorefile\fR in the current directory, if found it will parse its contents and start up accordingly\. If no \fBCorefile\fR is found it will start with the \fIwhoami\fR plugin (coredns\-whoami(7)) and start listening on port 53 (unless overriden with \fB\-dns\.port\fR)\.
.
.P
Available options:
.
.TP
\fB\-conf\fR \fBFILE\fR
specificy Corefile to load\.
.
.TP
\fB\-cpu\fR \fBCAP\fR
specify maximum CPU capacity in percent\.
.
.TP
\fB\-dns\.port\fR \fBPORT\fR
override default port (53) to listen on\.
.
.TP
\fB\-pidfile\fR \fBFILE\fR
write PID to \fBFILE\fR\.
.
.TP
\fB\-plugins\fR
list all plugins and quit\.
.
.TP
\fB\-quiet\fR
don\'t print any version and port information on startup\.
.
.TP
\fB\-version\fR
show version and quit\.
.
.SH "AUTHORS"
CoreDNS Authors\.
.
.SH "COPYRIGHT"
Apache License 2\.0
.
.SH "SEE ALSO"
Corefile(5) coredns\-debug(7) coredns\-dnssec(7) coredns\-health(7) coredns\-log(7) coredns\-file(7) coredns\-nsid(7) coredns\-auto(7) coredns\-erratic(7) coredns\-chaos(7) coredns\-dnstap(7) coredns\-pprof(7) coredns\-tls(7) coredns\-loadbalance(7) coredns\-cache(7) coredns\-root(7) coredns\-whoami(7) coredns\-bind(7) coredns\-hosts(7) coredns\-template(7) coredns\-proxy(7) coredns\-autopath(7) coredns\-kubernetes(7) coredns\-forward(7) coredns\-secondary(7) coredns\-route53(7) coredns\-errors(7) coredns\-metrics(7) coredns\-reload(7) coredns\-rewrite(7) coredns\-federation(7) coredns\-etcd(7) coredns\-trace(7)\.

166
vendor/github.com/coredns/coredns/man/corefile.5 generated vendored Normal file
View File

@@ -0,0 +1,166 @@
.\" generated with Ronn/v0.7.3
.\" http://github.com/rtomayko/ronn/tree/0.7.3
.
.TH "COREFILE" "5" "May 2018" "CoreDNS" "CoreDNS"
.
.SH "NAME"
\fBcorefile\fR
.
.SH "NAME"
\fIcorefile\fR \- configuration file for CoreDNS
.
.SH "DESCRIPTION"
A \fIcorefile\fR specifies the (internal) servers CoreDNS should run and what plugins each of these should chain\. The syntax is as follows:
.
.IP "" 4
.
.nf
[SCHEME://]ZONE [[SCHEME://]ZONE]\.\.\.[:PORT] {
[PLUGIN]\.\.\.
}
.
.fi
.
.IP "" 0
.
.P
The \fBZONE\fR defines for which name this server should be called, multiple zones are allowed and should be \fIwhite space\fR separated\. You can use a "reverse" syntax to specify a reverse zone (i\.e\. ip6\.arpa and in\-addr\.arpa), but using an IP address in the CIDR notation\. The optional \fBSCHEME\fR defaults to \fBdns://\fR, but can also be \fBtls://\fR (DNS over TLS) or \fBgrpc://\fR (DNS over gRPC)\.
.
.P
Specifying a \fBZONE\fR \fIand\fR \fBPORT\fR combination multiple time for \fIdifferent\fR servers will lead to an error on startup\.
.
.P
When a query comes in it is matched again all zones for all servers, the server with the longest match on the query name will receive the query\.
.
.P
The optional \fBPORT\fR controls on which port the server will bind, this default to 53\. If you use a port number here, you \fIcan\'t\fR override it with \fB\-dns\.port\fR (coredns(1))\.
.
.P
\fBPLUGIN\fR defines the plugin(s) we want to load into this server\. This is optional as well, but as server with no plugins will just return SERVFAIL for all queries\. Each plugin can have a number of properties than can have arguments, see documentation for each plugin\.
.
.P
Comments begin with an unquoted hash \fB#\fR and continue to the end of the line\. Comments may be started anywhere on a line\.
.
.P
Enviroment variables are supported and either the Unix or Windows form may be used: \fB{$ENV_VAR_1}\fR or \fB{%ENV_VAR_2%}\fR\.
.
.P
You can use the \fBimport\fR "plugin" to include parts of other files, see \fIhttps://coredns\.io/explugins/import\fR\.
.
.P
If CoreDNS cant find a Corefile to load it loads the following builtin one:
.
.IP "" 4
.
.nf
\&\. {
whoami
}
.
.fi
.
.IP "" 0
.
.SH "EXAMPLES"
The \fBZONE\fR is root zone \fB\.\fR, the \fBPLUGIN\fR is chaos\. The chaos plugin takes an argument: \fBCoreDNS\-001\fR\. This text is returned on a CH class query: \fBdig CH txt version\.bind @localhost\fR\.
.
.IP "" 4
.
.nf
\&\. {
chaos CoreDNS\-001
}
.
.fi
.
.IP "" 0
.
.P
When defining a new zone, you either create a new server, or add it to an existing one\. Here we define one server that handles two zones; that potentially chain different plugins:
.
.IP "" 4
.
.nf
example\.org {
whoami
}
org {
whoami
}
.
.fi
.
.IP "" 0
.
.P
Is identical to:
.
.IP "" 4
.
.nf
example\.org org {
whoami
}
.
.fi
.
.IP "" 0
.
.P
Reverse zones can be specified as domain names:
.
.IP "" 4
.
.nf
0\.0\.10\.in\-addr\.arpa {
whoami
}
.
.fi
.
.IP "" 0
.
.P
or by just using the CIDR notation:
.
.IP "" 4
.
.nf
10\.0\.0\.0/24 {
whoami
}
.
.fi
.
.IP "" 0
.
.P
This also works on a non octet boundary:
.
.IP "" 4
.
.nf
10\.0\.0\.0/27 {
whoami
}
.
.fi
.
.IP "" 0
.
.SH "AUTHORS"
CoreDNS Authors\.
.
.SH "COPYRIGHT"
Apache License 2\.0
.
.SH "SEE ALSO"
The manual page for CoreDNS: coredns(1) and more documentation on \fIhttps://coredns\.io\fR\.

11
vendor/github.com/coredns/coredns/pb/Makefile generated vendored Normal file
View File

@@ -0,0 +1,11 @@
# Generate the Go files from the dns.proto protobuf, you need the utilities
# from: https://github.com/golang/protobuf to make this work.
# The generate dns.pb.go is checked into git, so for normal builds we don't need
# to run this generation step.
all: dns.pb.go
dns.pb.go: dns.proto
protoc --go_out=plugins=grpc:. dns.proto && \
sed -e s?golang.org/x/net/context?context? < dns.pb.go > dns.pb.go.tmp && \
mv dns.pb.go.tmp dns.pb.go

141
vendor/github.com/coredns/coredns/pb/dns.pb.go generated vendored Normal file
View File

@@ -0,0 +1,141 @@
// Code generated by protoc-gen-go.
// source: dns.proto
// DO NOT EDIT!
/*
Package pb is a generated protocol buffer package.
It is generated from these files:
dns.proto
It has these top-level messages:
DnsPacket
*/
package pb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type DnsPacket struct {
Msg []byte `protobuf:"bytes,1,opt,name=msg,proto3" json:"msg,omitempty"`
}
func (m *DnsPacket) Reset() { *m = DnsPacket{} }
func (m *DnsPacket) String() string { return proto.CompactTextString(m) }
func (*DnsPacket) ProtoMessage() {}
func (*DnsPacket) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *DnsPacket) GetMsg() []byte {
if m != nil {
return m.Msg
}
return nil
}
func init() {
proto.RegisterType((*DnsPacket)(nil), "coredns.dns.DnsPacket")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for DnsService service
type DnsServiceClient interface {
Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error)
}
type dnsServiceClient struct {
cc *grpc.ClientConn
}
func NewDnsServiceClient(cc *grpc.ClientConn) DnsServiceClient {
return &dnsServiceClient{cc}
}
func (c *dnsServiceClient) Query(ctx context.Context, in *DnsPacket, opts ...grpc.CallOption) (*DnsPacket, error) {
out := new(DnsPacket)
err := grpc.Invoke(ctx, "/coredns.dns.DnsService/Query", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for DnsService service
type DnsServiceServer interface {
Query(context.Context, *DnsPacket) (*DnsPacket, error)
}
func RegisterDnsServiceServer(s *grpc.Server, srv DnsServiceServer) {
s.RegisterService(&_DnsService_serviceDesc, srv)
}
func _DnsService_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DnsPacket)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DnsServiceServer).Query(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/coredns.dns.DnsService/Query",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DnsServiceServer).Query(ctx, req.(*DnsPacket))
}
return interceptor(ctx, in, info, handler)
}
var _DnsService_serviceDesc = grpc.ServiceDesc{
ServiceName: "coredns.dns.DnsService",
HandlerType: (*DnsServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Query",
Handler: _DnsService_Query_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "dns.proto",
}
func init() { proto.RegisterFile("dns.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 120 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xc9, 0x2b, 0xd6,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4e, 0xce, 0x2f, 0x4a, 0x05, 0x71, 0x53, 0xf2, 0x8a,
0x95, 0x64, 0xb9, 0x38, 0x5d, 0xf2, 0x8a, 0x03, 0x12, 0x93, 0xb3, 0x53, 0x4b, 0x84, 0x04, 0xb8,
0x98, 0x73, 0x8b, 0xd3, 0x25, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0x40, 0x4c, 0x23, 0x57, 0x2e,
0x2e, 0x97, 0xbc, 0xe2, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x73, 0x2e, 0xd6, 0xc0,
0xd2, 0xd4, 0xa2, 0x4a, 0x21, 0x31, 0x3d, 0x24, 0x33, 0xf4, 0xe0, 0x06, 0x48, 0xe1, 0x10, 0x77,
0x62, 0x89, 0x62, 0x2a, 0x48, 0x4a, 0x62, 0x03, 0xdb, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
0xf5, 0xd1, 0x3f, 0x26, 0x8c, 0x00, 0x00, 0x00,
}

12
vendor/github.com/coredns/coredns/pb/dns.proto generated vendored Normal file
View File

@@ -0,0 +1,12 @@
syntax = "proto3";
package coredns.dns;
option go_package = "pb";
message DnsPacket {
bytes msg = 1;
}
service DnsService {
rpc Query (DnsPacket) returns (DnsPacket);
}

55
vendor/github.com/coredns/coredns/plugin.cfg generated vendored Normal file
View File

@@ -0,0 +1,55 @@
# Directives are registered in the order they should be
# executed.
#
# Ordering is VERY important. Every plugin will
# feel the effects of all other plugin below
# (after) them during a request, but they must not
# care what plugin above them are doing.
# How to rebuild with updated plugin configurations:
# Modify the list below and run `go gen && go build`
# The parser takes the input format of
# <plugin-name>:<package-name>
# Or
# <plugin-name>:<fully-qualified-package-name>
#
# External plugin example:
# log:github.com/coredns/coredns/plugin/log
# Local plugin example:
# log:log
tls:tls
reload:reload
nsid:nsid
root:root
bind:bind
debug:debug
trace:trace
health:health
pprof:pprof
prometheus:metrics
errors:errors
log:log
dnstap:dnstap
chaos:chaos
loadbalance:loadbalance
cache:cache
rewrite:rewrite
dnssec:dnssec
autopath:autopath
reverse:deprecated
template:template
hosts:hosts
route53:route53
federation:federation
kubernetes:kubernetes
file:file
auto:auto
secondary:secondary
etcd:etcd
forward:forward
proxy:proxy
erratic:erratic
whoami:whoami
on:github.com/mholt/caddy/onevent

134
vendor/github.com/coredns/coredns/plugin.md generated vendored Normal file
View File

@@ -0,0 +1,134 @@
# Plugins
## Writing Plugins
The main method that gets called is `ServeDNS`. It has three parameters:
* a `context.Context`;
* `dns.ResponseWriter` that is, basically, the client's connection;
* `*dns.Msg` the request from the client.
`ServeDNS` returns two values, a response code and an error. If the error is not nil CoreDNS,
will return a SERVFAIL to the client. The response code tells CoreDNS if a *reply has been
written by the plugin chain or not*. In the latter case CoreDNS will take care of that.
CoreDNS treats:
* SERVFAIL (dns.RcodeServerFailure)
* REFUSED (dns.RcodeRefused)
* FORMERR (dns.RcodeFormatError)
* NOTIMP (dns.RcodeNotImplemented)
as special and will then assume *nothing* has been written to the client. In all other cases it
assumes something has been written to the client (by the plugin).
The [*example*](https://github.com/coredns/example) plugin shows a bare-bones implementation that
can be used as a starting point for your plugin. This plugin has tests and extensive comments in the
code.
## Hooking It Up
See a couple of blog posts on how to write and add plugin to CoreDNS:
* <https://blog.coredns.io/2017/03/01/how-to-add-plugins-to-coredns/>
* <https://blog.coredns.io/2016/12/19/writing-plugin-for-coredns/>, slightly older, but useful.
## Logging
If your plugin needs to output a log line you should use the `plugin/pkg/log` package. This package
implements log levels. The standard way of outputting is: `log.Info` for info level messages. The
levels available are `log.Info`, `log.Warning`, `log.Error`, `log.Debug`. Each of these also has
a `f` variant. The plugin's name should be included, by using the log package like so:
~~~ go
import clog "github.com/coredns/coredns/plugin/pkg/log"
var log = clog.NewWithPlugin("whoami")
log.Info("message") // outputs: [INFO] plugin/whoami: message
~~~
In general, logging should be left to the higher layers by returning an error. However, if there is
a reason to consume the error and notify the user, then logging in the plugin itself can be
acceptable. The `Debug*` functions only output something when the *debug* plugin is loaded in the
server.
## Metrics
When exporting metrics the *Namespace* should be `plugin.Namespace` (="coredns"), and the
*Subsystem* should be the name of the plugin. The README.md for the plugin should then also contain
a *Metrics* section detailing the metrics. If the plugin supports dynamic health reporting it
should also have *Health* section detailing on some of its inner workings.
## Documentation
Each plugin should have a README.md explaining what the plugin does and how it is configured. The
file should have the following layout:
* Title: use the plugin's name
* Subsection titled: "Named"
with *PLUGIN* - one line description.
* Subsection titled: "Description" has a longer description.
* Subsection titled: "Syntax", syntax and supported directives.
* Subsection titled: "Examples"
More sections are of course possible.
### Style
We use the Unix manual page style:
* The name of plugin in the running text should be italic: *plugin*.
* all CAPITAL: user supplied argument, in the running text references this use strong text: `**`:
**EXAMPLE**.
* Optional text: in block quotes: `[optional]`.
* Use three dots to indicate multiple options are allowed: `arg...`.
* Item used literal: `literal`.
### Example Domain Names
Please be sure to use `example.org` or `example.net` in any examples and tests you provide. These
are the standard domain names created for this purpose.
## Fallthrough
In a perfect world the following would be true for plugin: "Either you are responsible for a zone or
not". If the answer is "not", the plugin should call the next plugin in the chain. If "yes" it
should handle *all* names that fall in this zone and the names below - i.e. it should handle the
entire domain and all sub domains.
~~~ txt
. {
file example.org db.example
}
~~~
In this example the *file* plugin is handling all names below (and including) `example.org`. If
a query comes in that is not a subdomain (or equal to) `example.org` the next plugin is called.
Now, the world isn't perfect, and there are good reasons to "fallthrough" to the next middlware,
meaning a plugin is only responsible for a *subset* of names within the zone. The first of these
to appear was the *reverse* plugin that synthesis PTR and A/AAAA responses (useful with IPv6).
The nature of the *reverse* plugin is such that it only deals with A,AAAA and PTR and then only
for a subset of the names. Ideally you would want to layer *reverse* **in front off** another
plugin such as *file* or *auto* (or even *proxy*). This means *reverse* handles some special
reverse cases and **all other** request are handled by the backing plugin. This is exactly what
"fallthrough" does. To keep things explicit we've opted that plugins implement such behavior
should implement a `fallthrough` keyword.
The `fallthrough` directive should optionally accept a list of zones. Only queries for records
in one of those zones should be allowed to fallthrough.
## Qualifying for main repo
Plugins for CoreDNS can live out-of-tree, `plugin.cfg` defaults to CoreDNS' repo but other
repos work just as well. So when do we consider the inclusion of a new plugin in the main repo?
* First, the plugin should be useful for other people. "Useful" is a subjective term. We will
probably need to further refine this.
* It should be sufficiently different from other plugin to warrant inclusion.
* Current internet standards need be supported: IPv4 and IPv6, so A and AAAA records should be
handled (if your plugin is in the business of dealing with address records that is).
* It must have tests.
* It must have a README.md for documentation.

5
vendor/github.com/coredns/coredns/plugin/auto/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,5 @@
reviewers:
- miekg
- stp-ip
approvers:
- miekg

View File

@@ -0,0 +1,78 @@
# auto
## Name
*auto* - enables serving zone data from an RFC 1035-style master file, which is automatically picked up from disk.
## Description
The *auto* plugin is used for an "old-style" DNS server. It serves from a preloaded file that exists
on disk. If the zone file contains signatures (i.e. is signed, i.e. using DNSSEC) correct DNSSEC answers
are returned. Only NSEC is supported! If you use this setup *you* are responsible for re-signing the
zonefile. New or changed zones are automatically picked up from disk.
## Syntax
~~~
auto [ZONES...] {
directory DIR [REGEXP ORIGIN_TEMPLATE [TIMEOUT]]
no_reload
upstream [ADDRESS...]
}
~~~
**ZONES** zones it should be authoritative for. If empty, the zones from the configuration block
are used.
* `directory` loads zones from the speficied **DIR**. If a file name matches **REGEXP** it will be
used to extract the origin. **ORIGIN_TEMPLATE** will be used as a template for the origin. Strings
like `{<number>}` are replaced with the respective matches in the file name, e.g. `{1}` is the
first match, `{2}` is the second. The default is: `db\.(.*) {1}` i.e. from a file with the
name `db.example.com`, the extracted origin will be `example.com`. **TIMEOUT** specifies how often
CoreDNS should scan the directory; the default is every 60 seconds. This value is in seconds.
The minimum value is 1 second.
* `no_reload` by default CoreDNS will try to reload a zone every minute and reloads if the
SOA's serial has changed. This option disables that behavior.
* `upstream` defines upstream resolvers to be used resolve external names found (think CNAMEs)
pointing to external names. **ADDRESS** can be an IP address, an IP:port or a string pointing to
a file that is structured as /etc/resolv.conf. If no **ADDRESS** is given, CoreDNS will resolve CNAMEs
against itself.
All directives from the *file* plugin are supported. Note that *auto* will load all zones found,
even though the directive might only receive queries for a specific zone. I.e:
~~~ corefile
. {
auto example.org {
directory /etc/coredns/zones
}
}
~~~
Will happily pick up a zone for `example.COM`, except it will never be queried, because the *auto*
directive only is authoritative for `example.ORG`.
## Examples
Load `org` domains from `/etc/coredns/zones/org` and allow transfers to the internet, but send
notifies to 10.240.1.1
~~~ corefile
. {
auto org {
directory /etc/coredns/zones/org
transfer to *
transfer to 10.240.1.1
}
}
~~~
Load `org` domains from `/etc/coredns/zones/org` and looks for file names as `www.db.example.org`,
where `example.org` is the origin. Scan every 45 seconds.
~~~ corefile
org {
auto {
directory /etc/coredns/zones/org www\.db\.(.*) {1} 45
}
}
~~~

94
vendor/github.com/coredns/coredns/plugin/auto/auto.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
// Package auto implements an on-the-fly loading file backend.
package auto
import (
"context"
"regexp"
"time"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/file"
"github.com/coredns/coredns/plugin/metrics"
"github.com/coredns/coredns/plugin/pkg/upstream"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
type (
// Auto holds the zones and the loader configuration for automatically loading zones.
Auto struct {
Next plugin.Handler
*Zones
metrics *metrics.Metrics
loader
}
loader struct {
directory string
template string
re *regexp.Regexp
// In the future this should be something like ZoneMeta that contains all this stuff.
transferTo []string
noReload bool
upstream upstream.Upstream // Upstream for looking up names during the resolution process.
duration time.Duration
}
)
// ServeDNS implements the plugin.Handler interface.
func (a Auto) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
state := request.Request{W: w, Req: r, Context: ctx}
qname := state.Name()
// Precheck with the origins, i.e. are we allowed to look here?
zone := plugin.Zones(a.Zones.Origins()).Matches(qname)
if zone == "" {
return plugin.NextOrFailure(a.Name(), a.Next, ctx, w, r)
}
// Now the real zone.
zone = plugin.Zones(a.Zones.Names()).Matches(qname)
a.Zones.RLock()
z, ok := a.Zones.Z[zone]
a.Zones.RUnlock()
if !ok || z == nil {
return dns.RcodeServerFailure, nil
}
if state.QType() == dns.TypeAXFR || state.QType() == dns.TypeIXFR {
xfr := file.Xfr{Zone: z}
return xfr.ServeDNS(ctx, w, r)
}
answer, ns, extra, result := z.Lookup(state, qname)
m := new(dns.Msg)
m.SetReply(r)
m.Authoritative, m.RecursionAvailable = true, true
m.Answer, m.Ns, m.Extra = answer, ns, extra
switch result {
case file.Success:
case file.NoData:
case file.NameError:
m.Rcode = dns.RcodeNameError
case file.Delegation:
m.Authoritative = false
case file.ServerFailure:
return dns.RcodeServerFailure, nil
}
state.SizeAndDo(m)
m, _ = state.Scrub(m)
w.WriteMsg(m)
return dns.RcodeSuccess, nil
}
// Name implements the Handler interface.
func (a Auto) Name() string { return "auto" }

View File

@@ -0,0 +1,20 @@
package auto
// rewriteToExpand rewrites our template string to one that we can give to regexp.ExpandString. This basically
// involves prefixing any '{' with a '$'.
func rewriteToExpand(s string) string {
// Pretty dumb at the moment, every { will get a $ prefixed.
// Also wasteful as we build the string with +=. This is OKish
// as we do this during config parsing.
copy := ""
for _, c := range s {
if c == '{' {
copy += "$"
}
copy += string(c)
}
return copy
}

View File

@@ -0,0 +1,20 @@
package auto
import "testing"
func TestRewriteToExpand(t *testing.T) {
tests := []struct {
in string
expected string
}{
{in: "", expected: ""},
{in: "{1}", expected: "${1}"},
{in: "{1", expected: "${1"},
}
for i, tc := range tests {
got := rewriteToExpand(tc.in)
if got != tc.expected {
t.Errorf("Test %d: Expected error %v, but got %v", i, tc.expected, got)
}
}
}

173
vendor/github.com/coredns/coredns/plugin/auto/setup.go generated vendored Normal file
View File

@@ -0,0 +1,173 @@
package auto
import (
"os"
"path"
"regexp"
"strconv"
"time"
"github.com/coredns/coredns/core/dnsserver"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics"
clog "github.com/coredns/coredns/plugin/pkg/log"
"github.com/coredns/coredns/plugin/pkg/parse"
"github.com/coredns/coredns/plugin/pkg/upstream"
"github.com/mholt/caddy"
)
var log = clog.NewWithPlugin("auto")
func init() {
caddy.RegisterPlugin("auto", caddy.Plugin{
ServerType: "dns",
Action: setup,
})
}
func setup(c *caddy.Controller) error {
a, err := autoParse(c)
if err != nil {
return plugin.Error("auto", err)
}
c.OnStartup(func() error {
m := dnsserver.GetConfig(c).Handler("prometheus")
if m == nil {
return nil
}
(&a).metrics = m.(*metrics.Metrics)
return nil
})
walkChan := make(chan bool)
c.OnStartup(func() error {
err := a.Walk()
if err != nil {
return err
}
go func() {
ticker := time.NewTicker(a.loader.duration)
for {
select {
case <-walkChan:
return
case <-ticker.C:
a.Walk()
}
}
}()
return nil
})
c.OnShutdown(func() error {
close(walkChan)
return nil
})
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
a.Next = next
return a
})
return nil
}
func autoParse(c *caddy.Controller) (Auto, error) {
var a = Auto{
loader: loader{template: "${1}", re: regexp.MustCompile(`db\.(.*)`), duration: 60 * time.Second},
Zones: &Zones{},
}
config := dnsserver.GetConfig(c)
for c.Next() {
// auto [ZONES...]
a.Zones.origins = make([]string, len(c.ServerBlockKeys))
copy(a.Zones.origins, c.ServerBlockKeys)
args := c.RemainingArgs()
if len(args) > 0 {
a.Zones.origins = args
}
for i := range a.Zones.origins {
a.Zones.origins[i] = plugin.Host(a.Zones.origins[i]).Normalize()
}
for c.NextBlock() {
switch c.Val() {
case "directory": // directory DIR [REGEXP [TEMPLATE] [DURATION]]
if !c.NextArg() {
return a, c.ArgErr()
}
a.loader.directory = c.Val()
if !path.IsAbs(a.loader.directory) && config.Root != "" {
a.loader.directory = path.Join(config.Root, a.loader.directory)
}
_, err := os.Stat(a.loader.directory)
if err != nil {
if os.IsNotExist(err) {
log.Warningf("Directory does not exist: %s", a.loader.directory)
} else {
return a, c.Errf("Unable to access root path '%s': %v", a.loader.directory, err)
}
}
// regexp
if c.NextArg() {
a.loader.re, err = regexp.Compile(c.Val())
if err != nil {
return a, err
}
if a.loader.re.NumSubexp() == 0 {
return a, c.Errf("Need at least one sub expression")
}
}
// template
if c.NextArg() {
a.loader.template = rewriteToExpand(c.Val())
}
// duration
if c.NextArg() {
i, err := strconv.Atoi(c.Val())
if err != nil {
return a, err
}
if i < 1 {
i = 1
}
a.loader.duration = time.Duration(i) * time.Second
}
case "no_reload":
a.loader.noReload = true
case "upstream":
args := c.RemainingArgs()
if len(args) == 0 {
return a, c.ArgErr()
}
var err error
a.loader.upstream, err = upstream.New(args)
if err != nil {
return a, err
}
default:
t, _, e := parse.Transfer(c, false)
if e != nil {
return a, e
}
if t != nil {
a.loader.transferTo = append(a.loader.transferTo, t...)
}
}
}
}
return a, nil
}

View File

@@ -0,0 +1,125 @@
package auto
import (
"testing"
"github.com/mholt/caddy"
)
func TestAutoParse(t *testing.T) {
tests := []struct {
inputFileRules string
shouldErr bool
expectedDirectory string
expectedTempl string
expectedRe string
expectedTo []string
}{
{
`auto example.org {
directory /tmp
transfer to 127.0.0.1
}`,
false, "/tmp", "${1}", `db\.(.*)`, []string{"127.0.0.1:53"},
},
{
`auto 10.0.0.0/24 {
directory /tmp
}`,
false, "/tmp", "${1}", `db\.(.*)`, nil,
},
{
`auto {
directory /tmp
no_reload
}`,
false, "/tmp", "${1}", `db\.(.*)`, nil,
},
{
`auto {
directory /tmp (.*) bliep
}`,
false, "/tmp", "bliep", `(.*)`, nil,
},
{
`auto {
directory /tmp (.*) bliep 10
}`,
false, "/tmp", "bliep", `(.*)`, nil,
},
{
`auto {
directory /tmp (.*) bliep
transfer to 127.0.0.1
transfer to 127.0.0.2
upstream 8.8.8.8
}`,
false, "/tmp", "bliep", `(.*)`, []string{"127.0.0.1:53", "127.0.0.2:53"},
},
// errors
{
`auto example.org {
directory
}`,
true, "", "${1}", `db\.(.*)`, nil,
},
{
`auto example.org {
directory /tmp * {1}
}`,
true, "", "${1}", ``, nil,
},
{
`auto example.org {
directory /tmp * {1} aa
}`,
true, "", "${1}", ``, nil,
},
{
`auto example.org {
directory /tmp .* {1}
}`,
true, "", "${1}", ``, nil,
},
{
`auto example.org {
directory /tmp .* {1}
}`,
true, "", "${1}", ``, nil,
},
{
`auto example.org {
directory /tmp .* {1}
}`,
true, "", "${1}", ``, nil,
},
}
for i, test := range tests {
c := caddy.NewTestController("dns", test.inputFileRules)
a, err := autoParse(c)
if err == nil && test.shouldErr {
t.Fatalf("Test %d expected errors, but got no error", i)
} else if err != nil && !test.shouldErr {
t.Fatalf("Test %d expected no errors, but got '%v'", i, err)
} else if !test.shouldErr {
if a.loader.directory != test.expectedDirectory {
t.Fatalf("Test %d expected %v, got %v", i, test.expectedDirectory, a.loader.directory)
}
if a.loader.template != test.expectedTempl {
t.Fatalf("Test %d expected %v, got %v", i, test.expectedTempl, a.loader.template)
}
if a.loader.re.String() != test.expectedRe {
t.Fatalf("Test %d expected %v, got %v", i, test.expectedRe, a.loader.re)
}
if test.expectedTo != nil {
for j, got := range a.loader.transferTo {
if got != test.expectedTo[j] {
t.Fatalf("Test %d expected %v, got %v", i, test.expectedTo[j], got)
}
}
}
}
}
}

108
vendor/github.com/coredns/coredns/plugin/auto/walk.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
package auto
import (
"os"
"path"
"path/filepath"
"regexp"
"github.com/coredns/coredns/plugin/file"
"github.com/miekg/dns"
)
// Walk will recursively walk of the file under l.directory and adds the one that match l.re.
func (a Auto) Walk() error {
// TODO(miek): should add something so that we don't stomp on each other.
toDelete := make(map[string]bool)
for _, n := range a.Zones.Names() {
toDelete[n] = true
}
filepath.Walk(a.loader.directory, func(path string, info os.FileInfo, err error) error {
if info == nil || info.IsDir() {
return nil
}
match, origin := matches(a.loader.re, info.Name(), a.loader.template)
if !match {
return nil
}
if _, ok := a.Zones.Z[origin]; ok {
// we already have this zone
toDelete[origin] = false
return nil
}
reader, err := os.Open(path)
if err != nil {
log.Warningf("Opening %s failed: %s", path, err)
return nil
}
defer reader.Close()
// Serial for loading a zone is 0, because it is a new zone.
zo, err := file.Parse(reader, origin, path, 0)
if err != nil {
log.Warningf("Parse zone `%s': %v", origin, err)
return nil
}
zo.NoReload = a.loader.noReload
zo.Upstream = a.loader.upstream
zo.TransferTo = a.loader.transferTo
a.Zones.Add(zo, origin)
if a.metrics != nil {
a.metrics.AddZone(origin)
}
zo.Notify()
log.Infof("Inserting zone `%s' from: %s", origin, path)
toDelete[origin] = false
return nil
})
for origin, ok := range toDelete {
if !ok {
continue
}
if a.metrics != nil {
a.metrics.RemoveZone(origin)
}
a.Zones.Remove(origin)
log.Infof("Deleting zone `%s'", origin)
}
return nil
}
// matches matches re to filename, if is is a match, the subexpression will be used to expand
// template to an origin. When match is true that origin is returned. Origin is fully qualified.
func matches(re *regexp.Regexp, filename, template string) (match bool, origin string) {
base := path.Base(filename)
matches := re.FindStringSubmatchIndex(base)
if matches == nil {
return false, ""
}
by := re.ExpandString(nil, template, base, matches)
if by == nil {
return false, ""
}
origin = dns.Fqdn(string(by))
return true, origin
}

View File

@@ -0,0 +1,89 @@
package auto
import (
"io/ioutil"
"os"
"path"
"regexp"
"testing"
)
var dbFiles = []string{"db.example.org", "aa.example.org"}
const zoneContent = `; testzone
@ IN SOA sns.dns.icann.org. noc.dns.icann.org. 2016082534 7200 3600 1209600 3600
NS a.iana-servers.net.
NS b.iana-servers.net.
www IN A 127.0.0.1
`
func TestWalk(t *testing.T) {
tempdir, err := createFiles()
if err != nil {
if tempdir != "" {
os.RemoveAll(tempdir)
}
t.Fatal(err)
}
defer os.RemoveAll(tempdir)
ldr := loader{
directory: tempdir,
re: regexp.MustCompile(`db\.(.*)`),
template: `${1}`,
}
a := Auto{
loader: ldr,
Zones: &Zones{},
}
a.Walk()
// db.example.org and db.example.com should be here (created in createFiles)
for _, name := range []string{"example.com.", "example.org."} {
if _, ok := a.Zones.Z[name]; !ok {
t.Errorf("%s should have been added", name)
}
}
}
func TestWalkNonExistent(t *testing.T) {
nonExistingDir := "highly_unlikely_to_exist_dir"
ldr := loader{
directory: nonExistingDir,
re: regexp.MustCompile(`db\.(.*)`),
template: `${1}`,
}
a := Auto{
loader: ldr,
Zones: &Zones{},
}
a.Walk()
}
func createFiles() (string, error) {
dir, err := ioutil.TempDir(os.TempDir(), "coredns")
if err != nil {
return dir, err
}
for _, name := range dbFiles {
if err := ioutil.WriteFile(path.Join(dir, name), []byte(zoneContent), 0644); err != nil {
return dir, err
}
}
// symlinks
if err = os.Symlink(path.Join(dir, "db.example.org"), path.Join(dir, "db.example.com")); err != nil {
return dir, err
}
if err = os.Symlink(path.Join(dir, "db.example.org"), path.Join(dir, "aa.example.com")); err != nil {
return dir, err
}
return dir, nil
}

View File

@@ -0,0 +1,54 @@
package auto
import (
"os"
"path"
"regexp"
"testing"
)
func TestWatcher(t *testing.T) {
tempdir, err := createFiles()
if err != nil {
if tempdir != "" {
os.RemoveAll(tempdir)
}
t.Fatal(err)
}
defer os.RemoveAll(tempdir)
ldr := loader{
directory: tempdir,
re: regexp.MustCompile(`db\.(.*)`),
template: `${1}`,
}
a := Auto{
loader: ldr,
Zones: &Zones{},
}
a.Walk()
// example.org and example.com should exist
if x := len(a.Zones.Z["example.org."].All()); x != 4 {
t.Fatalf("Expected 4 RRs, got %d", x)
}
if x := len(a.Zones.Z["example.com."].All()); x != 4 {
t.Fatalf("Expected 4 RRs, got %d", x)
}
// Now remove one file, rescan and see if it's gone.
if err := os.Remove(path.Join(tempdir, "db.example.com")); err != nil {
t.Fatal(err)
}
a.Walk()
if _, ok := a.Zones.Z["example.com."]; ok {
t.Errorf("Expected %q to be gone.", "example.com.")
}
if _, ok := a.Zones.Z["example.org."]; !ok {
t.Errorf("Expected %q to still be there.", "example.org.")
}
}

76
vendor/github.com/coredns/coredns/plugin/auto/zone.go generated vendored Normal file
View File

@@ -0,0 +1,76 @@
// Package auto implements a on-the-fly loading file backend.
package auto
import (
"sync"
"github.com/coredns/coredns/plugin/file"
)
// Zones maps zone names to a *Zone. This keep track of what we zones we have loaded at
// any one time.
type Zones struct {
Z map[string]*file.Zone // A map mapping zone (origin) to the Zone's data.
names []string // All the keys from the map Z as a string slice.
origins []string // Any origins from the server block.
sync.RWMutex
}
// Names returns the names from z.
func (z *Zones) Names() []string {
z.RLock()
n := z.names
z.RUnlock()
return n
}
// Origins returns the origins from z.
func (z *Zones) Origins() []string {
// doesn't need locking, because there aren't multiple Go routines accessing it.
return z.origins
}
// Zones returns a zone with origin name from z, nil when not found.
func (z *Zones) Zones(name string) *file.Zone {
z.RLock()
zo := z.Z[name]
z.RUnlock()
return zo
}
// Add adds a new zone into z. If zo.NoReload is false, the
// reload goroutine is started.
func (z *Zones) Add(zo *file.Zone, name string) {
z.Lock()
if z.Z == nil {
z.Z = make(map[string]*file.Zone)
}
z.Z[name] = zo
z.names = append(z.names, name)
zo.Reload()
z.Unlock()
}
// Remove removes the zone named name from z. It also stop the the zone's reload goroutine.
func (z *Zones) Remove(name string) {
z.Lock()
if zo, ok := z.Z[name]; ok {
zo.OnShutdown()
}
delete(z.Z, name)
// TODO(miek): just regenerate Names (might be bad if you have a lot of zones...)
z.names = []string{}
for n := range z.Z {
z.names = append(z.names, n)
}
z.Unlock()
}

View File

@@ -0,0 +1,6 @@
reviewers:
- chrisohaver
- miekg
approvers:
- chrisohaver
- miekg

View File

@@ -0,0 +1,49 @@
# autopath
## Name
*autopath* - allows for server-side search path completion.
## Description
If it sees a query that matches the first element of the configured search path, *autopath* will
follow the chain of search path elements and return the first reply that is not NXDOMAIN. On any
failures, the original reply is returned. Because *autopath* returns a reply for a name that wasn't
the original question it will add a CNAME that points from the original name (with the search path
element in it) to the name of this answer.
## Syntax
~~~
autopath [ZONE...] RESOLV-CONF
~~~
* **ZONES** zones *autopath* should be authoritative for.
* **RESOLV-CONF** points to a `resolv.conf` like file or uses a special syntax to point to another
plugin. For instance `@kubernetes`, will call out to the kubernetes plugin (for each
query) to retrieve the search list it should use.
If a plugin implements the `AutoPather` interface then it can be used.
## Metrics
If monitoring is enabled (via the *prometheus* directive) then the following metric is exported:
* `coredns_autopath_success_count_total{server}` - counter of successfully autopath-ed queries.
The `server` label is explained in the *metrics* plugin documentation.
## Examples
~~~
autopath my-resolv.conf
~~~
Use `my-resolv.conf` as the file to get the search path from. This file only needs so have one line:
`search domain1 domain2 ...`
~~~
autopath @kubernetes
~~~
Use the search path dynamically retrieved from the *kubernetes* plugin.

View File

@@ -0,0 +1,158 @@
/*
Package autopath implements autopathing. This is a hack; it shortcuts the
client's search path resolution by performing these lookups on the server...
The server has a copy (via AutoPathFunc) of the client's search path and on
receiving a query it first establish if the suffix matches the FIRST configured
element. If no match can be found the query will be forwarded up the plugin
chain without interference (iff 'fallthrough' has been set).
If the query is deemed to fall in the search path the server will perform the
queries with each element of the search path appended in sequence until a
non-NXDOMAIN answer has been found. That reply will then be returned to the
client - with some CNAME hackery to let the client accept the reply.
If all queries return NXDOMAIN we return the original as-is and let the client
continue searching. The client will go to the next element in the search path,
but we wont do any more autopathing. It means that in the failure case, you do
more work, since the server looks it up, then the client still needs to go
through the search path.
It is assume the search path ordering is identical between server and client.
Midldeware implementing autopath, must have a function called `AutoPath` of type
autopath.Func. Note the searchpath must be ending with the empty string.
I.e:
func (m Plugins ) AutoPath(state request.Request) []string {
return []string{"first", "second", "last", ""}
}
*/
package autopath
import (
"context"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/metrics"
"github.com/coredns/coredns/plugin/pkg/dnsutil"
"github.com/coredns/coredns/plugin/pkg/nonwriter"
"github.com/coredns/coredns/request"
"github.com/miekg/dns"
)
// Func defines the function plugin should implement to return a search
// path to the autopath plugin. The last element of the slice must be the empty string.
// If Func returns a nil slice, no autopathing will be done.
type Func func(request.Request) []string
// AutoPather defines the interface that a plugin should implement in order to be
// used by AutoPath.
type AutoPather interface {
AutoPath(request.Request) []string
}
// AutoPath perform autopath: service side search path completion.
type AutoPath struct {
Next plugin.Handler
Zones []string
// Search always includes "" as the last element, so we try the base query with out any search paths added as well.
search []string
searchFunc Func
}
// ServeDNS implements the plugin.Handle interface.
func (a *AutoPath) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
state := request.Request{W: w, Req: r}
zone := plugin.Zones(a.Zones).Matches(state.Name())
if zone == "" {
return plugin.NextOrFailure(a.Name(), a.Next, ctx, w, r)
}
// Check if autopath should be done, searchFunc takes precedence over the local configured search path.
var err error
searchpath := a.search
if a.searchFunc != nil {
searchpath = a.searchFunc(state)
}
if len(searchpath) == 0 {
return plugin.NextOrFailure(a.Name(), a.Next, ctx, w, r)
}
if !firstInSearchPath(state.Name(), searchpath) {
return plugin.NextOrFailure(a.Name(), a.Next, ctx, w, r)
}
origQName := state.QName()
// Establish base name of the query. I.e what was originally asked.
base, err := dnsutil.TrimZone(state.QName(), searchpath[0])
if err != nil {
return dns.RcodeServerFailure, err
}
firstReply := new(dns.Msg)
firstRcode := 0
var firstErr error
ar := r.Copy()
// Walk the search path and see if we can get a non-nxdomain - if they all fail we return the first
// query we've done and return that as-is. This means the client will do the search path walk again...
for i, s := range searchpath {
newQName := base + "." + s
ar.Question[0].Name = newQName
nw := nonwriter.New(w)
rcode, err := plugin.NextOrFailure(a.Name(), a.Next, ctx, nw, ar)
if err != nil {
// Return now - not sure if this is the best. We should also check if the write has happened.
return rcode, err
}
if i == 0 {
firstReply = nw.Msg
firstRcode = rcode
firstErr = err
}
if !plugin.ClientWrite(rcode) {
continue
}
if nw.Msg.Rcode == dns.RcodeNameError {
continue
}
msg := nw.Msg
cnamer(msg, origQName)
// Write whatever non-nxdomain answer we've found.
w.WriteMsg(msg)
autoPathCount.WithLabelValues(metrics.WithServer(ctx)).Add(1)
return rcode, err
}
if plugin.ClientWrite(firstRcode) {
w.WriteMsg(firstReply)
}
return firstRcode, firstErr
}
// Name implements the Handler interface.
func (a *AutoPath) Name() string { return "autopath" }
// firstInSearchPath checks if name is equal to are a sibling of the first element in the search path.
func firstInSearchPath(name string, searchpath []string) bool {
if name == searchpath[0] {
return true
}
if dns.IsSubDomain(searchpath[0], name) {
return true
}
return false
}

View File

@@ -0,0 +1,166 @@
package autopath
import (
"context"
"testing"
"github.com/coredns/coredns/plugin"
"github.com/coredns/coredns/plugin/pkg/dnstest"
"github.com/coredns/coredns/plugin/test"
"github.com/miekg/dns"
)
var autopathTestCases = []test.Case{
{
// search path expansion.
Qname: "b.example.org.", Qtype: dns.TypeA,
Answer: []dns.RR{
test.CNAME("b.example.org. 3600 IN CNAME b.com."),
test.A("b.com." + defaultA),
},
},
{
// No search path expansion
Qname: "a.example.com.", Qtype: dns.TypeA,
Answer: []dns.RR{
test.A("a.example.com." + defaultA),
},
},
}
func newTestAutoPath() *AutoPath {
ap := new(AutoPath)
ap.Zones = []string{"."}
ap.Next = nextHandler(map[string]int{
"b.example.org.": dns.RcodeNameError,
"b.com.": dns.RcodeSuccess,
"a.example.com.": dns.RcodeSuccess,
})
ap.search = []string{"example.org.", "example.com.", "com.", ""}
return ap
}
func TestAutoPath(t *testing.T) {
ap := newTestAutoPath()
ctx := context.TODO()
for _, tc := range autopathTestCases {
m := tc.Msg()
rec := dnstest.NewRecorder(&test.ResponseWriter{})
_, err := ap.ServeDNS(ctx, rec, m)
if err != nil {
t.Errorf("Expected no error, got %v\n", err)
continue
}
// No sorting here as we want to check if the CNAME sits *before* the
// test of the answer.
resp := rec.Msg
if !test.Header(t, tc, resp) {
t.Logf("%v\n", resp)
continue
}
if !test.Section(t, tc, test.Answer, resp.Answer) {
t.Logf("%v\n", resp)
}
if !test.Section(t, tc, test.Ns, resp.Ns) {
t.Logf("%v\n", resp)
}
if !test.Section(t, tc, test.Extra, resp.Extra) {
t.Logf("%v\n", resp)
}
}
}
var autopathNoAnswerTestCases = []test.Case{
{
// search path expansion, no answer
Qname: "c.example.org.", Qtype: dns.TypeA,
Answer: []dns.RR{
test.CNAME("b.example.org. 3600 IN CNAME b.com."),
test.A("b.com." + defaultA),
},
},
}
func TestAutoPathNoAnswer(t *testing.T) {
ap := newTestAutoPath()
ctx := context.TODO()
for _, tc := range autopathNoAnswerTestCases {
m := tc.Msg()
rec := dnstest.NewRecorder(&test.ResponseWriter{})
rcode, err := ap.ServeDNS(ctx, rec, m)
if err != nil {
t.Errorf("Expected no error, got %v\n", err)
continue
}
if plugin.ClientWrite(rcode) {
t.Fatalf("Expected no client write, got one for rcode %d", rcode)
}
}
}
// nextHandler returns a Handler that returns an answer for the question in the
// request per the domain->answer map. On success an RR will be returned: "qname 3600 IN A 127.0.0.53"
func nextHandler(mm map[string]int) test.Handler {
return test.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
rcode, ok := mm[r.Question[0].Name]
if !ok {
return dns.RcodeServerFailure, nil
}
m := new(dns.Msg)
m.SetReply(r)
switch rcode {
case dns.RcodeNameError:
m.Rcode = rcode
m.Ns = []dns.RR{soa}
w.WriteMsg(m)
return m.Rcode, nil
case dns.RcodeSuccess:
m.Rcode = rcode
a, _ := dns.NewRR(r.Question[0].Name + defaultA)
m.Answer = []dns.RR{a}
w.WriteMsg(m)
return m.Rcode, nil
default:
panic("nextHandler: unhandled rcode")
}
})
}
const defaultA = " 3600 IN A 127.0.0.53"
var soa = func() dns.RR {
s, _ := dns.NewRR("example.org. 1800 IN SOA example.org. example.org. 1502165581 14400 3600 604800 14400")
return s
}()
func TestInSearchPath(t *testing.T) {
a := AutoPath{search: []string{"default.svc.cluster.local.", "svc.cluster.local.", "cluster.local."}}
tests := []struct {
qname string
b bool
}{
{"google.com", false},
{"default.svc.cluster.local.", true},
{"a.default.svc.cluster.local.", true},
{"a.b.svc.cluster.local.", false},
}
for i, tc := range tests {
got := firstInSearchPath(tc.qname, a.search)
if got != tc.b {
t.Errorf("Test %d, got %v, expected %v", i, got, tc.b)
}
}
}

Some files were not shown because too many files have changed in this diff Show More