first commit

remotes/1680050961956510080/tmp_refs/heads/master 1.0.0
LowEel 2020-10-08 22:14:07 +02:00
parent 05afb0dace
commit 5c5e4e4417
685 changed files with 301506 additions and 0 deletions

10
.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
zabov
killfile
killfile/*
db/zabov.db
binaries
binaries/*
build.sh
.vscode
.vscode/*

38
00.database.go Normal file
View File

@ -0,0 +1,38 @@
package main
import (
"fmt"
"os"
"github.com/syndtr/goleveldb/leveldb"
)
//MyZabovKDB is the storage where we'll put domains to block
var MyZabovKDB *leveldb.DB
//MyZabovCDB is the storage where we'll put domains to cache
var MyZabovCDB *leveldb.DB
func init() {
var err error
os.RemoveAll("./db")
os.MkdirAll("./db", 0755)
MyZabovKDB, err = leveldb.OpenFile("./db/killfile", nil)
if err != nil {
fmt.Println("Cannot create Killfile db: ", err.Error())
} else {
fmt.Println("Killfile DB created")
}
MyZabovCDB, err = leveldb.OpenFile("./db/cache", nil)
if err != nil {
fmt.Println("Cannot create Cache db: ", err.Error())
} else {
fmt.Println("Cache DB created")
}
}

26
00.memory.go Normal file
View File

@ -0,0 +1,26 @@
package main
import (
"fmt"
"runtime"
"time"
)
func init() {
fmt.Println("Garbage Collector Thread Starting")
go memoryCleanerThread()
}
func memoryCleanerThread() {
for {
time.Sleep(10 * time.Minute)
fmt.Println("Time to clean memory...")
runtime.GC()
fmt.Println("Garbage Collection done.")
}
}

69
01.conf.go Normal file
View File

@ -0,0 +1,69 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/miekg/dns"
)
func init() {
//ZabovConf describes the Json we use for configuration
type ZabovConf struct {
Zabov struct {
Port string `json:"port"`
Proto string `json:"proto"`
Ipaddr string `json:"ipaddr"`
Upstream string `json:"upstream"`
Cachettl int `json:"cachettl"`
Killfilettl int `json:"killfilettl"`
Singlefilters string `json:"singlefilters"`
Doublefilters string `json:"doublefilters"`
Blackholeip string `json:"blackholeip"`
Hostsfile string `json:"hostsfile"`
} `json:"zabov"`
}
var MyConf ZabovConf
file, err := ioutil.ReadFile("config.json")
if err != nil {
log.Println("Cannot open config file", err.Error())
os.Exit(1)
}
err = json.Unmarshal([]byte(file), &MyConf)
if err != nil {
log.Println("Cannot marshal json: ", err.Error())
os.Exit(1)
}
// now we read configuration file
fmt.Println("Reading configuration file...")
ZabovPort := MyConf.Zabov.Port
ZabovType := MyConf.Zabov.Proto
ZabovAddr := MyConf.Zabov.Ipaddr
ZabovUpDNS = MyConf.Zabov.Upstream
ZabovSingleBL = MyConf.Zabov.Singlefilters
ZabovDoubleBL = MyConf.Zabov.Doublefilters
ZabovAddBL = MyConf.Zabov.Blackholeip
ZabovCacheTTL = MyConf.Zabov.Cachettl
ZabovKillTTL = MyConf.Zabov.Killfilettl
ZabovHostsFile = MyConf.Zabov.Hostsfile
zabovString := ZabovAddr + ":" + ZabovPort
MyDNS = new(dns.Server)
MyDNS.Addr = zabovString
MyDNS.Net = ZabovType
ZabovDNSArray = fileByLines(ZabovUpDNS)
}

37
01.dnscheck.go Normal file
View File

@ -0,0 +1,37 @@
package main
import (
"fmt"
"net/http"
"time"
)
//NetworkUp tells the system if the network is up or not
var NetworkUp bool
func checkNetworkUp() bool {
// RFC2606 test domain, should always work, unless internet is down.
_, err := http.Get("http://example.com")
if err != nil {
return false
}
return true
}
func checkNetworkUpThread() {
ticker := time.NewTicker(2 * time.Minute)
for range ticker.C {
NetworkUp = checkNetworkUp()
}
}
func init() {
fmt.Println("Network Checker starting....")
go checkNetworkUpThread()
}

79
01.killfile.go Normal file
View File

@ -0,0 +1,79 @@
package main
import (
"fmt"
"strings"
)
var zabovKbucket = []byte("killfile")
type killfileItem struct {
Kdomain string
Ksource string
}
var bChannel chan killfileItem
func init() {
bChannel = make(chan killfileItem, 1024)
fmt.Println("Initializing kill channel engine.")
go bWriteThread()
}
func bWriteThread() {
for item := range bChannel {
writeInKillfile(item.Kdomain, item.Ksource)
incrementStats("BL domains from "+item.Ksource, 1)
incrementStats("TOTAL", 1)
}
}
//DomainKill stores a domain name inside the killfile
func DomainKill(s, durl string) {
if len(s) > 2 {
s = strings.ToLower(s)
var k killfileItem
k.Kdomain = s
k.Ksource = durl
bChannel <- k
}
}
func writeInKillfile(key, value string) {
stK := []byte(key)
stV := []byte(value)
err := MyZabovKDB.Put(stK, stV, nil)
if err != nil {
fmt.Println("Cannot write to Killfile DB: ", err.Error())
}
}
func domainInKillfile(domain string) bool {
s := strings.ToLower(domain)
has, err := MyZabovKDB.Has([]byte(s), nil)
if err != nil {
fmt.Println("Cannot read from Killfile DB: ", err.Error())
}
return has
}

100
01.stats.go Normal file
View File

@ -0,0 +1,100 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"time"
)
type send struct {
Payload string
Number int64
Operation string
}
//ZabovStats is used to keep statistics to print
var ZabovStats map[string]int64
var stats chan send
func init() {
stats = make(chan send, 1024)
ZabovStats = make(map[string]int64)
fmt.Println("Initializing stats engine.")
go reportPrintThread()
go statsThread()
}
func statsPrint() {
fmt.Println()
stat, _ := json.Marshal(ZabovStats)
fmt.Println(jsonPrettyPrint(string(stat)))
fmt.Println()
}
func incrementStats(key string, value int64) {
var s send
s.Payload = key
s.Number = value
s.Operation = "INC"
stats <- s
}
func setstatsvalue(key string, value int64) {
var s send
s.Payload = key
s.Number = value
s.Operation = "SET"
stats <- s
}
func reportPrintThread() {
for {
var s send
s.Operation = "PRI"
s.Payload = "-"
s.Number = 0
stats <- s
time.Sleep(2 * time.Minute)
}
}
func statsThread() {
fmt.Println("Starting Statistical Collection Thread")
for item := range stats {
switch item.Operation {
case "INC":
ZabovStats[item.Payload] += item.Number
case "SET":
ZabovStats[item.Payload] = item.Number
case "PRI":
statsPrint()
}
}
}
func jsonPrettyPrint(in string) string {
var out bytes.Buffer
err := json.Indent(&out, []byte(in), "", "\t")
if err != nil {
return in
}
return out.String()
}

104
02.cache.go Normal file
View File

@ -0,0 +1,104 @@
package main
import (
"bytes"
"encoding/gob"
"fmt"
"time"
"github.com/miekg/dns"
)
type cacheItem struct {
Query []byte
Date time.Time
}
//DomainCache stores a domain name inside the cache
func DomainCache(s string, resp *dns.Msg) {
var domain2cache cacheItem
var err error
var dom2 bytes.Buffer
enc := gob.NewEncoder(&dom2)
domain2cache.Query, err = resp.Pack()
if err != nil {
fmt.Println("Problems packing the response: ", err.Error())
}
domain2cache.Date = time.Now()
err = enc.Encode(domain2cache)
if err != nil {
fmt.Println("Cannot GOB the domain to cache: ", err.Error())
}
cacheDomain(s, dom2.Bytes())
}
func cacheDomain(key string, domain []byte) {
err := MyZabovCDB.Put([]byte(key), domain, nil)
if err != nil {
fmt.Println("Cannot write to Cache DB: ", err.Error())
}
}
//GetDomainFromCache stores a domain name inside the cache
func GetDomainFromCache(s string) *dns.Msg {
ret := new(dns.Msg)
var cache bytes.Buffer
dec := gob.NewDecoder(&cache)
var record cacheItem
var conf []byte
var errDB error
if domainInCache(s) == false {
return nil
}
conf, errDB = MyZabovCDB.Get([]byte(s), nil)
if errDB != nil {
fmt.Println("Cant READ DB :" , errDB.Error() )
return nil
}
cache.Write(conf)
err := dec.Decode(&record)
if err != nil {
fmt.Println("Decode error :", err.Error())
return nil
}
if time.Since(record.Date) > (time.Duration(ZabovCacheTTL) * time.Hour) {
return nil
}
err = ret.Unpack(record.Query)
if err != nil {
fmt.Println("Problem unpacking response: ", err.Error())
return nil
}
return ret
}
func domainInCache(domain string) bool {
has, err := MyZabovCDB.Has([]byte(domain), nil)
if err != nil {
fmt.Println("Cannot search Cache DB: ", err.Error())
return false
}
return has
}

18
Dockerfile.amd64 Normal file
View File

@ -0,0 +1,18 @@
FROM golang:1.14.1 AS builder
RUN apt install git -y
RUN mkdir -p /go/src/zabov
RUN git clone https://git.keinpfusch.net/loweel/zabov /go/src/zabov
WORKDIR /go/src/zabov
ENV GO111MODULE=auto
RUN go get ; go build -mod=vendor
FROM debian:latest
RUN apt update
RUN apt upgrade -y
RUN apt install ca-certificates -y
RUN mkdir -p /opt/zabov
WORKDIR /opt/zabov
COPY --from=builder /go/src/zabov /opt/zabov
EXPOSE 53/udp
ENTRYPOINT ["/opt/zabov/zabov"]

17
Dockerfile.arm32v7 Normal file
View File

@ -0,0 +1,17 @@
FROM arm32v7/golang:1.14.1 AS builder
RUN apt install git -y
RUN mkdir -p /go/src/zabov
RUN git clone https://git.keinpfusch.net/loweel/zabov /go/src/zabov
WORKDIR /go/src/zabov
ENV GO111MODULE=auto
RUN go get ; go build -mod=vendor
FROM arm32v7/debian:latest
RUN apt update
RUN apt upgrade -y
RUN apt install ca-certificates -y
RUN mkdir -p /opt/zabov
WORKDIR /opt/zabov
COPY --from=builder /go/src/zabov /opt/zabov
EXPOSE 53/udp
ENTRYPOINT ["/opt/zabov/zabov"]

17
Dockerfile.arm64v8 Normal file
View File

@ -0,0 +1,17 @@
FROM arm64v8/golang:1.14.1 AS builder
RUN apt install git -y
RUN mkdir -p /go/src/zabov
RUN git clone https://git.keinpfusch.net/loweel/zabov /go/src/zabov
WORKDIR /go/src/zabov
ENV GO111MODULE=auto
RUN go get ; go build -mod=vendor
FROM arm64v8/debian:latest
RUN apt update
RUN apt upgrade -y
RUN apt install ca-certificates -y
RUN mkdir -p /opt/zabov
WORKDIR /opt/zabov
COPY --from=builder /go/src/zabov /opt/zabov
EXPOSE 53/udp
ENTRYPOINT ["/opt/zabov/zabov"]

14
LICENSE Normal file
View File

@ -0,0 +1,14 @@
Copyright (C) 2020 loweel@keinpfusch.net
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.

91
README.md Normal file
View File

@ -0,0 +1,91 @@
# zabov
Tiny replacement for piHole DNS filter
Still Work in progress, usable.
Idea is to produce a very simple, no-web-interface , IP DNS blocker.
# INSTALL
Zabov requires golang 1.13 or later.
<pre>
git clone https://git.keinpfusch.net/Loweel/zabov.git
cd zabov
go get
go build -mod=vendor
</pre>
Then, edit config.json: please notice config.json must be in the same folder of the executable you run.
Just a few words about "singlefilters" and "doublefilters":
Data must be downloaded from URLs of blacklist mantainers.They may come in different formats.
There are two kinds of blacklists:
One is the format zabov calls "singlefilter", where we find a single column , full of domains:
<pre>
domain1.com
domain2.com
domain3.com
</pre>
The second is the format zabov calls "doublefilter" (a file in "/etc/hosts" format, to be precise), where there is an IP, usually localhost or 0.0.0.0 and then the domain:
<pre>
127.0.0.1 domain1.com
127.0.0.1 domain2.com
127.0.0.1 domain3.com
</pre>
This is why configuration file has two separated items.
The config file should look like:
<pre>
{
"zabov": {
"port":"53",
"proto":"udp",
"ipaddr":"127.0.0.1",
"upstream":"./dns-upstream.txt",
"cachettl": "4",
"killfilettl": "12",
"singlefilters":"./urls-hosts.txt" ,
"doublefilters":"./urls-domains.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local.txt"
}
}
</pre>
Where:
- port is the port number. Usually is 53, you can change for docker, if you like
- proto is the protocol. Choices are "udp", "tcp", "tcp/udp"
- ipaddr is the port to listen to. Maybe empty, (which will result in listening to 0.0.0.0) to avoid issues with docker.
- upstream: file containing all DNS we want to query : each line in format IP:PORT
- cachettl: amount of time the cache is kept (in hours)
- killfilettl: refresh time for _killfiles_
- singlefilters: name of the file for blacklists following the "singlefilter" schema.(one URL per line)
- doublefilters: name of the file, for blacklists following the "doublefilter" schema.(one URL per line)
- blackholeip: IP address to return when the IP is banned. This is because you may want to avoid MX issues, mail loops on localhost, or you have a web server running on localhost
- hostsfile: path where you keep your local blacklistfile : this is in the format "singlefilter", meaning one domain per line, unlike hosts file.
# DOCKER
Multistage Dockerfiles are provided for AMD64, ARMv7, ARM64V8
# TODO:
- ~~caching~~
- monitoring port

96
adlist_hosts.go Normal file
View File

@ -0,0 +1,96 @@
package main
import (
"bufio"
"errors"
"fmt"
"net"
"net/http"
"strings"
"time"
)
func init() {
go downloadDoubleThread()
}
//DoubleIndexFilter puts the domains inside file
func DoubleIndexFilter(durl string) error {
fmt.Println("Retrieving HostFile from: ", durl)
var err error
// Get the data
resp, err := http.Get(durl)
if err != nil {
fmt.Println("HTTP problem: ", err)
return err
}
defer resp.Body.Close()
if resp.StatusCode == 200 { // OK
fmt.Println(durl + " Response: OK")
} else {
fmt.Println("Server <"+durl+"> returned status code: ", resp.StatusCode)
return errors.New("Server <" + durl + "> returned status code: " + resp.Status)
}
scanner := bufio.NewScanner(resp.Body)
splitter := func(c rune) bool {
return c == ' ' || c == '\t'
}
var numLines int64
for scanner.Scan() {
line := scanner.Text()
h := strings.FieldsFunc(line, splitter)
if h == nil {
continue
}
if len(h) < 2 {
continue
}
if net.ParseIP(h[0]) != nil {
DomainKill(h[1], durl)
// fmt.Println("MATCH: ", h[1])
numLines++
} else {
incrementStats("Malformed HostLines "+durl, 1)
// fmt.Println("Malformed line: <" + line + ">")
}
}
fmt.Println("Finished to parse: "+durl+" ,number of lines", numLines)
return err
}
func getDoubleFilters() {
s := fileByLines(ZabovDoubleBL)
for _, a := range s {
DoubleIndexFilter(a)
}
}
func downloadDoubleThread() {
fmt.Println("Starting updater of DOUBLE lists, each (hours):", ZabovKillTTL)
for {
getDoubleFilters()
time.Sleep(time.Duration(ZabovKillTTL) * time.Hour)
}
}

93
adlist_single.go Normal file
View File

@ -0,0 +1,93 @@
package main
import (
"bufio"
"errors"
"fmt"
"net/http"
"strings"
"time"
)
func init() {
go downloadThread()
}
//SingleIndexFilter puts the domains inside file
func SingleIndexFilter(durl string) error {
fmt.Println("Retrieving DomainFile from: ", durl)
var err error
// Get the data
resp, err := http.Get(durl)
if err != nil {
fmt.Println("HTTP Problem: ", err)
return err
}
defer resp.Body.Close()
if resp.StatusCode == 200 { // OK
fmt.Println(durl + " Response: OK")
} else {
fmt.Println("Server <"+durl+"> returned status code: ", resp.StatusCode)
return errors.New("Server <" + durl + "> returned status code: " + resp.Status)
}
scanner := bufio.NewScanner(resp.Body)
splitter := func(c rune) bool {
return c == ' ' || c == '\t'
}
var numLines int64
for scanner.Scan() {
line := scanner.Text()
h := strings.FieldsFunc(line, splitter)
if h == nil {
continue
}
if len(h) < 1 {
continue
}
if !strings.Contains(h[0], "#") {
DomainKill(h[0], durl)
// fmt.Println("MATCH: ", h[1])
numLines++
} else {
incrementStats("Malformed DomainLines "+durl, 1)
// fmt.Println("Malformed line: <" + line + ">")
}
}
fmt.Println("Finished to parse: "+durl+" ,number of lines", numLines)
return err
}
func getSingleFilters() {
s := fileByLines(ZabovSingleBL)
for _, a := range s {
SingleIndexFilter(a)
}
}
func downloadThread() {
fmt.Println("Starting updater of SINGLE lists, each (hours): ", ZabovKillTTL)
for {
getSingleFilters()
time.Sleep(time.Duration(ZabovKillTTL) * time.Hour)
}
}

15
config.json Normal file
View File

@ -0,0 +1,15 @@
{
"zabov": {
"port":"53",
"proto":"udp",
"ipaddr":"0.0.0.0",
"upstream":"./dns-upstream.txt" ,
"cachettl": 1,
"killfilettl": 12,
"singlefilters":"./urls-domains.txt" ,
"doublefilters":"./urls-hosts.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local.txt"
}
}

387
dns-upstream.txt Normal file
View File

@ -0,0 +1,387 @@
194.150.168.168:53
194.25.0.68:53
141.1.1.1:53
213.239.204.35:53
194.25.0.52:53
212.211.132.4:53
213.68.194.51:53
195.243.214.4:53
141.1.27.249:53
80.237.197.14:53
217.28.98.62:53
82.96.64.2:53
82.96.65.2:53
194.25.0.60:53
193.101.111.10:53
193.101.111.20:53
192.76.144.66:53
217.69.169.25:53
85.88.19.10:53
85.88.19.11:53
85.214.20.141:53
194.169.239.10:53
194.172.160.4:53
212.102.225.2:53
212.51.16.1:53
212.51.17.1:53
212.66.129.98:53
212.89.130.180:53
213.209.122.11:53
213.23.108.129:53
91.204.4.133:53
84.200.69.80:53
62.146.63.211:53
212.77.178.83:53
78.46.58.246:53
85.214.102.25:53
87.106.62.128:53
81.20.87.84:53
81.20.87.181:53
217.5.159.227:53
185.38.9.99:53
62.146.25.130:53
37.59.218.50:53
212.184.191.193:53
213.23.143.154:53
178.32.187.10:53
62.154.214.86:53
85.214.151.164:53
5.175.225.2:53
193.105.38.142:53
145.253.183.21:53
178.15.146.43:53
62.245.233.22:53
212.224.71.71:53
213.136.78.213:53
80.156.196.196:53
193.158.99.67:53
194.95.75.230:53
212.184.191.2:53
213.138.38.22:53
195.145.80.150:53
139.18.25.34:53
79.143.180.116:53
213.240.172.200:53
217.160.238.238:53
213.61.185.238:53
84.201.0.34:53
82.194.105.219:53
62.157.89.178:53
46.189.26.123:53
85.214.208.8:53
87.239.128.130:53
78.111.65.40:53
85.214.69.126:53
109.75.29.1:53
80.81.19.226:53
81.169.162.74:53
217.14.164.35:53
5.9.172.92:53
62.225.102.180:53
217.7.71.203:53
217.6.71.61:53
62.154.138.43:53
146.0.38.140:53
78.111.67.10:53
217.6.110.20:53
87.245.18.221:53
62.225.66.19:53
81.169.212.52:53
178.162.205.123:53
212.227.83.183:53
139.18.25.33:53
193.29.2.4:53
212.91.246.11:53
62.153.141.15:53
148.251.120.228:53
62.154.253.226:53
194.25.218.2:53
194.174.73.36:53
62.245.226.182:53
87.234.222.68:53
194.25.169.130:53
62.225.15.253:53
176.94.20.4:53
188.40.115.29:53
188.40.115.22:53
194.187.240.10:53
80.150.109.197:53
217.86.149.109:53
91.208.193.1:53
195.243.99.35:53
62.225.102.177:53
178.210.102.9:53
80.228.113.125:53
178.210.102.12:53
130.255.121.9:53
212.204.56.218:53
37.59.218.151:53
80.148.52.109:53
194.30.174.222:53
5.199.141.5:53
94.135.173.22:53
88.79.208.11:53
141.16.180.9:53
82.193.241.125:53
212.8.216.37:53
109.75.29.2:53
78.46.17.82:53
81.169.185.49:53
217.244.13.14:53
93.104.209.27:53
79.143.182.174:53
81.20.82.131:53
213.136.68.181:53
213.136.68.189:53
193.107.145.233:53
195.145.241.3:53
80.242.182.182:53
193.159.181.250:53
195.243.124.75:53
62.159.104.102:53
92.222.202.244:53
85.214.254.13:53
85.114.128.115:53
145.253.176.50:53
217.7.63.1:53
78.35.40.149:53
81.169.187.253:53
94.249.192.20:53
85.214.43.157:53
80.149.83.60:53
178.210.102.225:53
178.210.102.193:53
62.154.236.126:53
213.183.185.50:53
212.60.229.242:53
80.146.192.66:53
79.133.62.62:53
178.33.33.219:53
62.245.225.225:53
46.38.235.212:53
213.136.88.31:53
212.66.135.250:53
194.231.138.26:53
62.225.1.33:53
80.148.34.131:53
94.23.163.114:53
80.64.189.94:53
81.169.241.28:53
212.38.26.132:53
62.91.19.67:53
87.239.128.25:53
212.185.196.10:53
89.221.2.171:53
217.243.239.11:53
213.136.69.214:53
213.138.56.75:53
212.122.52.11:53
46.4.166.113:53
77.37.30.12:53
194.187.242.10:53
188.40.132.212:53
194.150.168.169:53
85.25.105.193:53
185.93.180.131:53
109.234.249.10:53
109.234.248.10:53
138.201.120.250:53
81.3.27.54:53
78.46.231.161:53
78.46.231.162:53
212.51.16.197:53
212.28.34.65:53
148.251.24.48:53
212.75.32.4:53
91.103.112.150:53
217.69.169.26:53
195.63.103.144:53
213.209.121.30:53
88.79.149.4:53
185.194.143.243:53
46.182.19.48:53
217.111.24.246:53
62.96.37.74:53
213.61.64.174:53
213.61.65.226:53
62.96.190.134:53
217.111.123.166:53
213.61.176.118:53
185.216.33.82:53
185.220.70.50:53
188.138.57.95:53
195.145.137.164:53
195.167.223.164:53
195.226.69.82:53
195.37.174.194:53
195.4.138.12:53
195.63.61.189:53
212.124.35.25:53
212.184.191.100:53
212.38.2.130:53
212.51.16.193:53
212.66.129.107:53
212.8.216.41:53
212.89.128.28:53
213.133.116.14:53
213.166.247.100:53
217.243.173.82:53
217.5.182.118:53
217.7.80.40:53
217.7.81.136:53
217.9.50.199:53
46.237.220.2:53
5.189.179.105:53
52.28.79.14:53
52.29.2.17:53
62.146.202.2:53
62.146.2.48:53
62.153.122.2:53
62.153.237.200:53
62.153.237.201:53
62.154.139.99:53
62.154.159.12:53
62.154.159.5:53
62.154.160.3:53
62.209.40.75:53
62.217.61.162:53
62.245.225.55:53
78.111.224.224:53
78.111.226.226:53
78.138.80.42:53
80.149.112.139:53
80.156.6.209:53
80.190.209.218:53
80.228.231.122:53
80.228.231.48:53
80.245.65.100:53
81.14.182.169:53
81.27.162.100:53
83.97.23.178:53
83.97.23.226:53
84.16.240.43:53
85.214.98.185:53
89.19.228.52:53
89.200.168.203:53
91.217.86.4:53
93.104.195.2:53
94.247.43.254:53
109.234.248.8:53
131.220.20.199:53
131.220.23.123:53
144.76.173.169:53
144.76.83.104:53
148.251.92.241:53
176.9.136.236:53
195.10.195.195:53
173.212.249.41:53
85.214.41.155:53
54.37.75.2:53
194.55.13.75:53
5.189.138.153:53
159.69.51.18:53
51.75.77.179:53
138.201.169.84:53
138.201.239.66:53
138.68.106.109:53
145.253.109.162:53
159.69.68.181:53
167.86.78.56:53
173.212.208.116:53
173.212.218.206:53
173.212.219.129:53
173.212.239.87:53
173.212.242.89:53
173.212.244.78:53
173.249.41.233:53
173.249.48.6:53
176.9.233.171:53
176.9.58.218:53
178.162.199.27:53
178.162.208.135:53
178.238.230.127:53
178.238.235.218:53
18.195.121.224:53
185.139.98.100:53
185.40.135.11:53
185.53.169.22:53
185.90.131.194:53
188.40.239.99:53
188.68.35.145:53
192.162.85.48:53
193.159.232.5:53
194.77.237.31:53
194.77.253.32:53
195.201.192.29:53
195.202.52.30:53
195.243.101.5:53
207.180.203.42:53
207.180.243.200:53
207.180.247.212:53
213.136.71.68:53
213.136.77.39:53
213.144.24.234:53
217.147.96.210:53
217.182.198.203:53
217.6.131.248:53
217.6.247.237:53
217.6.64.5:53
217.79.177.220:53
46.163.119.155:53
46.228.199.116:53
5.175.26.208:53
51.77.65.15:53
5.189.133.151:53
5.189.141.216:53
5.189.186.154:53
5.189.186.93:53
5.189.187.34:53
5.199.141.30:53
5.45.96.220:53
62.138.20.211:53
62.144.82.252:53
62.153.165.107:53
62.153.201.91:53
62.154.214.84:53
62.157.242.85:53
79.143.177.243:53
79.143.183.45:53
80.156.198.146:53
80.156.6.206:53
80.237.207.100:53
80.82.223.94:53
81.169.215.29:53
81.169.223.126:53
81.169.230.157:53
81.20.80.79:53
83.236.183.211:53
84.16.240.224:53
85.214.224.76:53
85.214.238.190:53
85.214.246.133:53
85.214.62.160:53
85.93.91.101:53
87.106.63.208:53
87.118.126.225:53
88.198.37.146:53
88.99.66.18:53
89.163.150.209:53
89.163.220.114:53
89.19.236.152:53
93.104.213.74:53
93.186.196.137:53
93.190.71.172:53
94.177.246.221:53
80.241.218.68:53
172.105.81.90:53
172.105.81.92:53
84.200.70.40:53
94.16.114.254:53
93.90.207.192:53
93.90.201.211:53
144.91.68.146:53
176.9.37.132:53
176.9.93.198:53
176.9.1.117:53
144.91.115.47:53
91.237.100.4:53

98
dns_client.go Normal file
View File

@ -0,0 +1,98 @@
package main
import (
"fmt"
"time"
"math/rand"
"strings"
"github.com/miekg/dns"
)
//ForwardQuery forwards the query to the upstream server
//first server to answer wins
func ForwardQuery(query *dns.Msg) *dns.Msg {
go incrementStats("ForwardQueries", 1)
r := new(dns.Msg)
r.SetReply(query)
r.Authoritative = true
fqdn := strings.TrimRight(query.Question[0].Name, ".")
lfqdn := fmt.Sprintf("%d", query.Question[0].Qtype) + "." + fqdn
if cached := GetDomainFromCache(lfqdn); cached != nil {
go incrementStats("CacheHit", 1)
cached.SetReply(query)
cached.Authoritative = true
return cached
}
c := new(dns.Client)
c.ReadTimeout = 500 * time.Millisecond
c.WriteTimeout = 500 * time.Millisecond
for {
// round robin with retry
if !NetworkUp {
time.Sleep(10 * time.Second)
go incrementStats("Network Problems ", 1)
continue
}
d := oneTimeDNS()
in, _, err := c.Exchange(query, d)
if err != nil {
fmt.Printf("Problem with DNS %s : %s\n", d, err.Error())
go incrementStats("DNS Problems "+d, 1)
continue
} else {
go incrementStats(d, 1)
in.SetReply(query)
in.Authoritative = true
go DomainCache(lfqdn, in)
return in
}
}
}
func init() {
fmt.Println("DNS client engine starting")
NetworkUp = checkNetworkUp()
if NetworkUp {
fmt.Println("[OK]: Network is UP")
} else {
fmt.Println("[KO] Network is DOWN: system will check again in 2 minutes")
}
}
func oneTimeDNS() (dns string) {
rand.Seed(time.Now().Unix())
upl := ZabovDNSArray
if len(upl) < 1 {
fmt.Println("No DNS defined, using default 127.0.0.53:53. Hope it works!")
return "127.0.0.53:53"
}
n := rand.Intn(128*len(upl)) % len(upl)
dns = upl[n]
return
}

44
dns_handler.go Normal file
View File

@ -0,0 +1,44 @@
package main
import (
"net"
"strings"
"github.com/miekg/dns"
)
func (mydns *handler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
go incrementStats("TotalQueries", 1)
remIP, _, e := net.SplitHostPort(w.RemoteAddr().String())
if e != nil {
go incrementStats("CLIENT: "+remIP, 1)
}
msg := dns.Msg{}
msg.SetReply(r)
switch r.Question[0].Qtype {
case dns.TypeA:
msg.Authoritative = true
domain := msg.Question[0].Name
fqdn := strings.TrimRight(domain, ".")
if domainInKillfile(fqdn) {
go incrementStats("Killed", 1)
msg.Answer = append(msg.Answer, &dns.A{
Hdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
A: net.ParseIP(ZabovAddBL),
})
} else {
ret := ForwardQuery(r)
w.WriteMsg(ret)
}
default:
ret := ForwardQuery(r)
w.WriteMsg(ret)
}
w.WriteMsg(&msg)
}

8
go.mod Normal file
View File

@ -0,0 +1,8 @@
module zabov
go 1.13
require (
github.com/miekg/dns v1.1.27
github.com/syndtr/goleveldb v1.0.0
)

36
go.sum Normal file
View File

@ -0,0 +1,36 @@
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

59
hostfile.go Normal file
View File

@ -0,0 +1,59 @@
package main
import (
"bufio"
"fmt"
"os"
)
func init() {
fmt.Println("Ingesting local hosts file")
ingestLocalBlacklist()
}
func ingestLocalBlacklist() {
file, err := os.Open(ZabovHostsFile)
if err != nil {
fmt.Println(err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
d := scanner.Text()
DomainKill(d, ZabovHostsFile)
incrementStats("Blacklist", 1)
}
if err := scanner.Err(); err != nil {
fmt.Println(err.Error())
}
}
func fileByLines(filename string) (blurls []string) {
file, err := os.Open(filename)
if err != nil {
fmt.Println(err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
d := scanner.Text()
blurls = append(blurls, d)
}
if err := scanner.Err(); err != nil {
fmt.Println(err.Error())
}
return
}

46
main.go Normal file
View File

@ -0,0 +1,46 @@
package main
import (
"log"
"github.com/miekg/dns"
)
//MyDNS is my dns server
var MyDNS *dns.Server
//ZabovUpDNS keeps the name of upstream DNSs
var ZabovUpDNS string
//ZabovSingleBL list of urls returning a file with just names of domains
var ZabovSingleBL string
//ZabovDoubleBL list of urls returning a file with IP<space>domain
var ZabovDoubleBL string
//ZabovAddBL is the IP we want to send all the clients to. Usually is 127.0.0.1
var ZabovAddBL string
//ZabovCacheTTL is the amount of hours we cache records of DNS
var ZabovCacheTTL int
//ZabovKillTTL is the amount of hours we cache the killfile
var ZabovKillTTL int
//ZabovHostsFile is the file we use to keep our hosts
var ZabovHostsFile string
//ZabovDNSArray is the array containing all the DNS we mention
var ZabovDNSArray []string
type handler struct{}
func main() {
MyDNS.Handler = &handler{}
if err := MyDNS.ListenAndServe(); err != nil {
log.Printf("Failed to set udp listener %s\n", err.Error())
} else {
log.Printf("Listener running \n")
}
}

37
urls-domains.txt Normal file
View File

@ -0,0 +1,37 @@
https://mirror1.malwaredomains.com/files/justdomains
https://raw.githubusercontent.com/hectorm/hmirror/master/data/adaway.org/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/adblock-nocoin-list/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/adguard-simplified/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/anudeepnd-adservers/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-ad/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-malvertising/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-malware/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-tracking/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/easylist/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/easyprivacy/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/eth-phishing-detect/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.2o7net/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.dead/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.risk/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.spam/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/kadhosts/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/malwaredomainlist.com/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/malwaredomains.com-immortaldomains/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/malwaredomains.com-justdomains/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/matomo.org-spammers/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/mitchellkrogza-badd-boyz-hosts/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/pgl.yoyo.org/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/ransomwaretracker.abuse.ch/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/someonewhocares.org/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/spam404.com/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/stevenblack/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/winhelp2002.mvps.org/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/zerodot1-coinblockerlists-browser/list.txt
https://raw.githubusercontent.com/hectorm/hmirror/master/data/zeustracker.abuse.ch/list.txt
https://raw.githubusercontent.com/CHEF-KOCH/Audio-fingerprint-pages/master/AudioFp.txt
https://raw.githubusercontent.com/CHEF-KOCH/Canvas-fingerprinting-pages/master/Canvas.txt
https://raw.githubusercontent.com/CHEF-KOCH/WebRTC-tracking/master/WebRTC.txt
https://raw.githubusercontent.com/CHEF-KOCH/CKs-FilterList/master/Anti-Corp/hosts/NSABlocklist.txt
https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-blocklist.txt
https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt
https://www.stopforumspam.com/downloads/toxic_domains_whole.txt

12
urls-hosts.txt Normal file
View File

@ -0,0 +1,12 @@
http://sysctl.org/cameleon/hosts
https://www.malwaredomainlist.com/hostslist/hosts.txt
https://adaway.org/hosts.txt
https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/fakenews/hosts
https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/gambling/hosts
https://someonewhocares.org/hosts/hosts
https://getadhell.com/standard-package.txt
https://raw.githubusercontent.com/hoshsadiq/adblock-nocoin-list/master/hosts.txt
https://raw.githubusercontent.com/notracking/hosts-blocklists/master/hostnames.txt
https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt
https://raw.githubusercontent.com/anudeepND/blacklist/master/facebook.txt

8
urls-local.txt Normal file
View File

@ -0,0 +1,8 @@
blc.vodafone.com
gab.com
gab.ai
freespeechextremist.com
neckbeard.xyz
funkwhale.it
social.byoblu.com

16
vendor/github.com/golang/snappy/.gitignore generated vendored Normal file
View File

@ -0,0 +1,16 @@
cmd/snappytool/snappytool
testdata/bench
# These explicitly listed benchmark data files are for an obsolete version of
# snappy_test.go.
testdata/alice29.txt
testdata/asyoulik.txt
testdata/fireworks.jpeg
testdata/geo.protodata
testdata/html
testdata/html_x_4
testdata/kppkn.gtb
testdata/lcet10.txt
testdata/paper-100k.pdf
testdata/plrabn12.txt
testdata/urls.10K

15
vendor/github.com/golang/snappy/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,15 @@
# This is the official list of Snappy-Go authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as
# Name or Organization <email address>
# The email address is not required for organizations.
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

37
vendor/github.com/golang/snappy/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,37 @@
# This is the official list of people who can contribute
# (and typically have contributed) code to the Snappy-Go repository.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# The submission process automatically checks to make sure
# that people submitting code are listed in this file (by email address).
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# http://code.google.com/legal/individual-cla-v1.0.html
# http://code.google.com/legal/corporate-cla-v1.0.html
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
# Names should be added to this file like so:
# Name <email address>
# Please keep the list sorted.
Damian Gryski <dgryski@gmail.com>
Jan Mercl <0xjnml@gmail.com>
Kai Backman <kaib@golang.org>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Russ Cox <rsc@golang.org>
Sebastien Binet <seb.binet@gmail.com>

27
vendor/github.com/golang/snappy/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

107
vendor/github.com/golang/snappy/README generated vendored Normal file
View File

@ -0,0 +1,107 @@
The Snappy compression format in the Go programming language.
To download and install from source:
$ go get github.com/golang/snappy
Unless otherwise noted, the Snappy-Go source files are distributed
under the BSD-style license found in the LICENSE file.
Benchmarks.
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
"go test -test.bench=."
_UFlat0-8 2.19GB/s ± 0% html
_UFlat1-8 1.41GB/s ± 0% urls
_UFlat2-8 23.5GB/s ± 2% jpg
_UFlat3-8 1.91GB/s ± 0% jpg_200
_UFlat4-8 14.0GB/s ± 1% pdf
_UFlat5-8 1.97GB/s ± 0% html4
_UFlat6-8 814MB/s ± 0% txt1
_UFlat7-8 785MB/s ± 0% txt2
_UFlat8-8 857MB/s ± 0% txt3
_UFlat9-8 719MB/s ± 1% txt4
_UFlat10-8 2.84GB/s ± 0% pb
_UFlat11-8 1.05GB/s ± 0% gaviota
_ZFlat0-8 1.04GB/s ± 0% html
_ZFlat1-8 534MB/s ± 0% urls
_ZFlat2-8 15.7GB/s ± 1% jpg
_ZFlat3-8 740MB/s ± 3% jpg_200
_ZFlat4-8 9.20GB/s ± 1% pdf
_ZFlat5-8 991MB/s ± 0% html4
_ZFlat6-8 379MB/s ± 0% txt1
_ZFlat7-8 352MB/s ± 0% txt2
_ZFlat8-8 396MB/s ± 1% txt3
_ZFlat9-8 327MB/s ± 1% txt4
_ZFlat10-8 1.33GB/s ± 1% pb
_ZFlat11-8 605MB/s ± 1% gaviota
"go test -test.bench=. -tags=noasm"
_UFlat0-8 621MB/s ± 2% html
_UFlat1-8 494MB/s ± 1% urls
_UFlat2-8 23.2GB/s ± 1% jpg
_UFlat3-8 1.12GB/s ± 1% jpg_200
_UFlat4-8 4.35GB/s ± 1% pdf
_UFlat5-8 609MB/s ± 0% html4
_UFlat6-8 296MB/s ± 0% txt1
_UFlat7-8 288MB/s ± 0% txt2
_UFlat8-8 309MB/s ± 1% txt3
_UFlat9-8 280MB/s ± 1% txt4
_UFlat10-8 753MB/s ± 0% pb
_UFlat11-8 400MB/s ± 0% gaviota
_ZFlat0-8 409MB/s ± 1% html
_ZFlat1-8 250MB/s ± 1% urls
_ZFlat2-8 12.3GB/s ± 1% jpg
_ZFlat3-8 132MB/s ± 0% jpg_200
_ZFlat4-8 2.92GB/s ± 0% pdf
_ZFlat5-8 405MB/s ± 1% html4
_ZFlat6-8 179MB/s ± 1% txt1
_ZFlat7-8 170MB/s ± 1% txt2
_ZFlat8-8 189MB/s ± 1% txt3
_ZFlat9-8 164MB/s ± 1% txt4
_ZFlat10-8 479MB/s ± 1% pb
_ZFlat11-8 270MB/s ± 1% gaviota
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
are the numbers from C++ Snappy's
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
BM_UFlat/0 2.4GB/s html
BM_UFlat/1 1.4GB/s urls
BM_UFlat/2 21.8GB/s jpg
BM_UFlat/3 1.5GB/s jpg_200
BM_UFlat/4 13.3GB/s pdf
BM_UFlat/5 2.1GB/s html4
BM_UFlat/6 1.0GB/s txt1
BM_UFlat/7 959.4MB/s txt2
BM_UFlat/8 1.0GB/s txt3
BM_UFlat/9 864.5MB/s txt4
BM_UFlat/10 2.9GB/s pb
BM_UFlat/11 1.2GB/s gaviota
BM_ZFlat/0 944.3MB/s html (22.31 %)
BM_ZFlat/1 501.6MB/s urls (47.78 %)
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
BM_ZFlat/10 1.2GB/s pb (19.68 %)
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)

237
vendor/github.com/golang/snappy/decode.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
var (
// ErrCorrupt reports that the input is invalid.
ErrCorrupt = errors.New("snappy: corrupt input")
// ErrTooLarge reports that the uncompressed length is too large.
ErrTooLarge = errors.New("snappy: decoded block is too large")
// ErrUnsupported reports that the input isn't supported.
ErrUnsupported = errors.New("snappy: unsupported input")
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
)
// DecodedLen returns the length of the decoded block.
func DecodedLen(src []byte) (int, error) {
v, _, err := decodedLen(src)
return v, err
}
// decodedLen returns the length of the decoded block and the number of bytes
// that the length header occupied.
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
v, n := binary.Uvarint(src)
if n <= 0 || v > 0xffffffff {
return 0, 0, ErrCorrupt
}
const wordSize = 32 << (^uint(0) >> 32 & 1)
if wordSize == 32 && v > 0x7fffffff {
return 0, 0, ErrTooLarge
}
return int(v), n, nil
}
const (
decodeErrCodeCorrupt = 1
decodeErrCodeUnsupportedLiteralLength = 2
)
// Decode returns the decoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire decoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
return nil, err
}
if dLen <= len(dst) {
dst = dst[:dLen]
} else {
dst = make([]byte, dLen)
}
switch decode(dst, src[s:]) {
case 0:
return dst, nil
case decodeErrCodeUnsupportedLiteralLength:
return nil, errUnsupportedLiteralLength
}
return nil, ErrCorrupt
}
// NewReader returns a new Reader that decompresses from r, using the framing
// format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
decoded: make([]byte, maxBlockSize),
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
}
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
type Reader struct {
r io.Reader
err error
decoded []byte
buf []byte
// decoded[i:j] contains decoded bytes that have not yet been passed on.
i, j int
readHeader bool
}
// Reset discards any buffered data, resets all state, and switches the Snappy
// reader to read from r. This permits reusing a Reader rather than allocating
// a new one.
func (r *Reader) Reset(reader io.Reader) {
r.r = reader
r.err = nil
r.i = 0
r.j = 0
r.readHeader = false
}
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
r.err = ErrCorrupt
}
return false
}
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
if !r.readFull(r.buf[:4], true) {
return 0, r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
}
// The chunk types are specified at
// https://github.com/google/snappy/blob/master/framing_format.txt
switch chunkType {
case chunkTypeCompressedData:
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeUncompressedData:
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
}
r.i, r.j = 0, n
continue
case chunkTypeStreamIdentifier:
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
}
}
continue
}
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return 0, r.err
}
}
}

14
vendor/github.com/golang/snappy/decode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// decode has the same semantics as in decode_other.go.
//
//go:noescape
func decode(dst, src []byte) int

490
vendor/github.com/golang/snappy/decode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,490 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - AX scratch
// - BX scratch
// - CX length or x
// - DX offset
// - SI &src[s]
// - DI &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
TEXT ·decode(SB), NOSPLIT, $48-56
// Initialize SI, DI and R8-R13.
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, DI
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, SI
MOVQ R11, R13
ADDQ R12, R13
loop:
// for s < len(src)
CMPQ SI, R13
JEQ end
// CX = uint32(src[s])
//
// switch src[s] & 0x03
MOVBLZX (SI), CX
MOVL CX, BX
ANDL $3, BX
CMPL BX, $1
JAE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
SHRL $2, CX
CMPL CX, $60
JAE tagLit60Plus
// case x < 60:
// s++
INCQ SI
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that CX == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// CX can hold 64 bits, so the increment cannot overflow.
INCQ CX
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// AX = len(dst) - d
// BX = len(src) - s
MOVQ R10, AX
SUBQ DI, AX
MOVQ R13, BX
SUBQ SI, BX
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMPQ CX, $16
JGT callMemmove
CMPQ AX, $16
JLT callMemmove
CMPQ BX, $16
JLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(SI), X0
MOVOU X0, 0(DI)
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMPQ CX, AX
JGT errCorrupt
CMPQ CX, BX
JGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVQ DI, 0(SP)
MOVQ SI, 8(SP)
MOVQ CX, 16(SP)
MOVQ DI, 24(SP)
MOVQ SI, 32(SP)
MOVQ CX, 40(SP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVQ 24(SP), DI
MOVQ 32(SP), SI
MOVQ 40(SP), CX
MOVQ dst_base+0(FP), R8
MOVQ dst_len+8(FP), R9
MOVQ R8, R10
ADDQ R9, R10
MOVQ src_base+24(FP), R11
MOVQ src_len+32(FP), R12
MOVQ R11, R13
ADDQ R12, R13
// d += length
// s += length
ADDQ CX, DI
ADDQ CX, SI
JMP loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADDQ CX, SI
SUBQ $58, SI
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// case x == 60:
CMPL CX, $61
JEQ tagLit61
JA tagLit62Plus
// x = uint32(src[s-1])
MOVBLZX -1(SI), CX
JMP doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVWLZX -2(SI), CX
JMP doLit
tagLit62Plus:
CMPL CX, $62
JA tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVWLZX -3(SI), CX
MOVBLZX -1(SI), BX
SHLL $16, BX
ORL BX, CX
JMP doLit
tagLit63:
// case x == 63:
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
MOVL -4(SI), CX
JMP doLit
// The code above handles literal tags.
// ----------------------------------------
// The code below handles copy tags.
tagCopy4:
// case tagCopy4:
// s += 5
ADDQ $5, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-5])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
MOVLQZX -4(SI), DX
JMP doCopy
tagCopy2:
// case tagCopy2:
// s += 3
ADDQ $3, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// length = 1 + int(src[s-3])>>2
SHRQ $2, CX
INCQ CX
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
MOVWQZX -2(SI), DX
JMP doCopy
tagCopy:
// We have a copy tag. We assume that:
// - BX == src[s] & 0x03
// - CX == src[s]
CMPQ BX, $2
JEQ tagCopy2
JA tagCopy4
// case tagCopy1:
// s += 2
ADDQ $2, SI
// if uint(s) > uint(len(src)) { etc }
MOVQ SI, BX
SUBQ R11, BX
CMPQ BX, R12
JA errCorrupt
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
MOVQ CX, DX
ANDQ $0xe0, DX
SHLQ $3, DX
MOVBQZX -1(SI), BX
ORQ BX, DX
// length = 4 + int(src[s-2])>>2&0x7
SHRQ $2, CX
ANDQ $7, CX
ADDQ $4, CX
doCopy:
// This is the end of the outer "switch", when we have a copy tag.
//
// We assume that:
// - CX == length && CX > 0
// - DX == offset
// if offset <= 0 { etc }
CMPQ DX, $0
JLE errCorrupt
// if d < offset { etc }
MOVQ DI, BX
SUBQ R8, BX
CMPQ BX, DX
JLT errCorrupt
// if length > len(dst)-d { etc }
MOVQ R10, BX
SUBQ DI, BX
CMPQ CX, BX
JGT errCorrupt
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
//
// Set:
// - R14 = len(dst)-d
// - R15 = &dst[d-offset]
MOVQ R10, R14
SUBQ DI, R14
MOVQ DI, R15
SUBQ DX, R15
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
//
// First, try using two 8-byte load/stores, similar to the doLit technique
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
// and not one 16-byte load/store, and the first store has to be before the
// second load, due to the overlap if offset is in the range [8, 16).
//
// if length > 16 || offset < 8 || len(dst)-d < 16 {
// goto slowForwardCopy
// }
// copy 16 bytes
// d += length
CMPQ CX, $16
JGT slowForwardCopy
CMPQ DX, $8
JLT slowForwardCopy
CMPQ R14, $16
JLT slowForwardCopy
MOVQ 0(R15), AX
MOVQ AX, 0(DI)
MOVQ 8(R15), BX
MOVQ BX, 8(DI)
ADDQ CX, DI
JMP loop
slowForwardCopy:
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
// can still try 8-byte load stores, provided we can overrun up to 10 extra
// bytes. As above, the overrun will be fixed up by subsequent iterations
// of the outermost loop.
//
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
// commentary says:
//
// ----
//
// The main part of this loop is a simple copy of eight bytes at a time
// until we've copied (at least) the requested amount of bytes. However,
// if d and d-offset are less than eight bytes apart (indicating a
// repeating pattern of length < 8), we first need to expand the pattern in
// order to get the correct results. For instance, if the buffer looks like
// this, with the eight-byte <d-offset> and <d> patterns marked as
// intervals:
//
// abxxxxxxxxxxxx
// [------] d-offset
// [------] d
//
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
// once, after which we can move <d> two bytes without moving <d-offset>:
//
// ababxxxxxxxxxx
// [------] d-offset
// [------] d
//
// and repeat the exercise until the two no longer overlap.
//
// This allows us to do very well in the special case of one single byte
// repeated many times, without taking a big hit for more general cases.
//
// The worst case of extra writing past the end of the match occurs when
// offset == 1 and length == 1; the last copy will read from byte positions
// [0..7] and write to [4..11], whereas it was only supposed to write to
// position 1. Thus, ten excess bytes.
//
// ----
//
// That "10 byte overrun" worst case is confirmed by Go's
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
// and finishSlowForwardCopy algorithm.
//
// if length > len(dst)-d-10 {
// goto verySlowForwardCopy
// }
SUBQ $10, R14
CMPQ CX, R14
JGT verySlowForwardCopy
makeOffsetAtLeast8:
// !!! As above, expand the pattern so that offset >= 8 and we can use
// 8-byte load/stores.
//
// for offset < 8 {
// copy 8 bytes from dst[d-offset:] to dst[d:]
// length -= offset
// d += offset
// offset += offset
// // The two previous lines together means that d-offset, and therefore
// // R15, is unchanged.
// }
CMPQ DX, $8
JGE fixUpSlowForwardCopy
MOVQ (R15), BX
MOVQ BX, (DI)
SUBQ DX, CX
ADDQ DX, DI
ADDQ DX, DX
JMP makeOffsetAtLeast8
fixUpSlowForwardCopy:
// !!! Add length (which might be negative now) to d (implied by DI being
// &dst[d]) so that d ends up at the right place when we jump back to the
// top of the loop. Before we do that, though, we save DI to AX so that, if
// length is positive, copying the remaining length bytes will write to the
// right place.
MOVQ DI, AX
ADDQ CX, DI
finishSlowForwardCopy:
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
// length means that we overrun, but as above, that will be fixed up by
// subsequent iterations of the outermost loop.
CMPQ CX, $0
JLE loop
MOVQ (R15), BX
MOVQ BX, (AX)
ADDQ $8, R15
ADDQ $8, AX
SUBQ $8, CX
JMP finishSlowForwardCopy
verySlowForwardCopy:
// verySlowForwardCopy is a simple implementation of forward copy. In C
// parlance, this is a do/while loop instead of a while loop, since we know
// that length > 0. In Go syntax:
//
// for {
// dst[d] = dst[d - offset]
// d++
// length--
// if length == 0 {
// break
// }
// }
MOVB (R15), BX
MOVB BX, (DI)
INCQ R15
INCQ DI
DECQ CX
JNZ verySlowForwardCopy
JMP loop
// The code above handles copy tags.
// ----------------------------------------
end:
// This is the end of the "for s < len(src)".
//
// if d != len(dst) { etc }
CMPQ DI, R10
JNE errCorrupt
// return 0
MOVQ $0, ret+48(FP)
RET
errCorrupt:
// return decodeErrCodeCorrupt
MOVQ $1, ret+48(FP)
RET

101
vendor/github.com/golang/snappy/decode_other.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
// decode writes the decoding of src to dst. It assumes that the varint-encoded
// length of the decompressed bytes has already been read, and that len(dst)
// equals that length.
//
// It returns 0 on success or a decodeErrCodeXxx error code on failure.
func decode(dst, src []byte) int {
var d, s, offset, length int
for s < len(src) {
switch src[s] & 0x03 {
case tagLiteral:
x := uint32(src[s] >> 2)
switch {
case x < 60:
s++
case x == 60:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-1])
case x == 61:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-2]) | uint32(src[s-1])<<8
case x == 62:
s += 4
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
case x == 63:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
}
length = int(x) + 1
if length <= 0 {
return decodeErrCodeUnsupportedLiteralLength
}
if length > len(dst)-d || length > len(src)-s {
return decodeErrCodeCorrupt
}
copy(dst[d:], src[s:s+length])
d += length
s += length
continue
case tagCopy1:
s += 2
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 4 + int(src[s-2])>>2&0x7
offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
case tagCopy2:
s += 3
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-3])>>2
offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
case tagCopy4:
s += 5
if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
return decodeErrCodeCorrupt
}
length = 1 + int(src[s-5])>>2
offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
}
if offset <= 0 || d < offset || length > len(dst)-d {
return decodeErrCodeCorrupt
}
// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
// the built-in copy function, this byte-by-byte copy always runs
// forwards, even if the slices overlap. Conceptually, this is:
//
// d += forwardCopy(dst[d:d+length], dst[d-offset:])
for end := d + length; d != end; d++ {
dst[d] = dst[d-offset]
}
}
if d != len(dst) {
return decodeErrCodeCorrupt
}
return 0
}

285
vendor/github.com/golang/snappy/encode.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer {
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}

29
vendor/github.com/golang/snappy/encode_amd64.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
package snappy
// emitLiteral has the same semantics as in encode_other.go.
//
//go:noescape
func emitLiteral(dst, lit []byte) int
// emitCopy has the same semantics as in encode_other.go.
//
//go:noescape
func emitCopy(dst []byte, offset, length int) int
// extendMatch has the same semantics as in encode_other.go.
//
//go:noescape
func extendMatch(src []byte, i, j int) int
// encodeBlock has the same semantics as in encode_other.go.
//
//go:noescape
func encodeBlock(dst, src []byte) (d int)

730
vendor/github.com/golang/snappy/encode_amd64.s generated vendored Normal file
View File

@ -0,0 +1,730 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
// https://github.com/golang/snappy/issues/29
//
// As a workaround, the package was built with a known good assembler, and
// those instructions were disassembled by "objdump -d" to yield the
// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
// style comments, in AT&T asm syntax. Note that rsp here is a physical
// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
// fine on Go 1.6.
// The asm code generally follows the pure Go code in encode_other.go, except
// where marked with a "!!!".
// ----------------------------------------------------------------------------
// func emitLiteral(dst, lit []byte) int
//
// All local variables fit into registers. The register allocation:
// - AX len(lit)
// - BX n
// - DX return value
// - DI &dst[i]
// - R10 &lit[0]
//
// The 24 bytes of stack space is to call runtime·memmove.
//
// The unusual register allocation of local variables, such as R10 for the
// source pointer, matches the allocation used at the call site in encodeBlock,
// which makes it easier to manually inline this function.
TEXT ·emitLiteral(SB), NOSPLIT, $24-56
MOVQ dst_base+0(FP), DI
MOVQ lit_base+24(FP), R10
MOVQ lit_len+32(FP), AX
MOVQ AX, DX
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT oneByte
CMPL BX, $256
JLT twoBytes
threeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
ADDQ $3, DX
JMP memmove
twoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
ADDQ $2, DX
JMP memmove
oneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
ADDQ $1, DX
memmove:
MOVQ DX, ret+48(FP)
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
CALL runtime·memmove(SB)
RET
// ----------------------------------------------------------------------------
// func emitCopy(dst []byte, offset, length int) int
//
// All local variables fit into registers. The register allocation:
// - AX length
// - SI &dst[0]
// - DI &dst[i]
// - R11 offset
//
// The unusual register allocation of local variables, such as R11 for the
// offset, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·emitCopy(SB), NOSPLIT, $0-48
MOVQ dst_base+0(FP), DI
MOVQ DI, SI
MOVQ offset+24(FP), R11
MOVQ length+32(FP), AX
loop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT step1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP loop0
step1:
// if length > 64 { etc }
CMPL AX, $64
JLE step2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
step2:
// if length >= 12 || offset >= 2048 { goto step3 }
CMPL AX, $12
JGE step3
CMPL R11, $2048
JGE step3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
step3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
// Return the number of bytes written.
SUBQ SI, DI
MOVQ DI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func extendMatch(src []byte, i, j int) int
//
// All local variables fit into registers. The register allocation:
// - DX &src[0]
// - SI &src[j]
// - R13 &src[len(src) - 8]
// - R14 &src[len(src)]
// - R15 &src[i]
//
// The unusual register allocation of local variables, such as R15 for a source
// pointer, matches the allocation used at the call site in encodeBlock, which
// makes it easier to manually inline this function.
TEXT ·extendMatch(SB), NOSPLIT, $0-48
MOVQ src_base+0(FP), DX
MOVQ src_len+8(FP), R14
MOVQ i+24(FP), R15
MOVQ j+32(FP), SI
ADDQ DX, R14
ADDQ DX, R15
ADDQ DX, SI
MOVQ R14, R13
SUBQ $8, R13
cmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA cmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE bsf
ADDQ $8, R15
ADDQ $8, SI
JMP cmp8
bsf:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
cmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE extendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE extendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP cmp1
extendMatchEnd:
// Convert from &src[ret] to ret.
SUBQ DX, SI
MOVQ SI, ret+40(FP)
RET
// ----------------------------------------------------------------------------
// func encodeBlock(dst, src []byte) (d int)
//
// All local variables fit into registers, other than "var table". The register
// allocation:
// - AX . .
// - BX . .
// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
// - DX 64 &src[0], tableSize
// - SI 72 &src[s]
// - DI 80 &dst[d]
// - R9 88 sLimit
// - R10 . &src[nextEmit]
// - R11 96 prevHash, currHash, nextHash, offset
// - R12 104 &src[base], skip
// - R13 . &src[nextS], &src[len(src) - 8]
// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
// - R15 112 candidate
//
// The second column (56, 64, etc) is the stack offset to spill the registers
// when calling other functions. We could pack this slightly tighter, but it's
// simpler to have a dedicated spill map independent of the function called.
//
// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
TEXT ·encodeBlock(SB), 0, $32888-56
MOVQ dst_base+0(FP), DI
MOVQ src_base+24(FP), SI
MOVQ src_len+32(FP), R14
// shift, tableSize := uint32(32-8), 1<<8
MOVQ $24, CX
MOVQ $256, DX
calcShift:
// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
// shift--
// }
CMPQ DX, $16384
JGE varTable
CMPQ DX, R14
JGE varTable
SUBQ $1, CX
SHLQ $1, DX
JMP calcShift
varTable:
// var table [maxTableSize]uint16
//
// In the asm code, unlike the Go code, we can zero-initialize only the
// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
// writes 16 bytes, so we can do only tableSize/8 writes instead of the
// 2048 writes that would zero-initialize all of table's 32768 bytes.
SHRQ $3, DX
LEAQ table-32768(SP), BX
PXOR X0, X0
memclr:
MOVOU X0, 0(BX)
ADDQ $16, BX
SUBQ $1, DX
JNZ memclr
// !!! DX = &src[0]
MOVQ SI, DX
// sLimit := len(src) - inputMargin
MOVQ R14, R9
SUBQ $15, R9
// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
// change for the rest of the function.
MOVQ CX, 56(SP)
MOVQ DX, 64(SP)
MOVQ R9, 88(SP)
// nextEmit := 0
MOVQ DX, R10
// s := 1
ADDQ $1, SI
// nextHash := hash(load32(src, s), shift)
MOVL 0(SI), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
outer:
// for { etc }
// skip := 32
MOVQ $32, R12
// nextS := s
MOVQ SI, R13
// candidate := 0
MOVQ $0, R15
inner0:
// for { etc }
// s := nextS
MOVQ R13, SI
// bytesBetweenHashLookups := skip >> 5
MOVQ R12, R14
SHRQ $5, R14
// nextS = s + bytesBetweenHashLookups
ADDQ R14, R13
// skip += bytesBetweenHashLookups
ADDQ R14, R12
// if nextS > sLimit { goto emitRemainder }
MOVQ R13, AX
SUBQ DX, AX
CMPQ AX, R9
JA emitRemainder
// candidate = int(table[nextHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[nextHash] = uint16(s)
MOVQ SI, AX
SUBQ DX, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// nextHash = hash(load32(src, nextS), shift)
MOVL 0(R13), R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// if load32(src, s) != load32(src, candidate) { continue } break
MOVL 0(SI), AX
MOVL (DX)(R15*1), BX
CMPL AX, BX
JNE inner0
fourByteMatch:
// As per the encode_other.go code:
//
// A 4-byte match has been found. We'll later see etc.
// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
// on inputMargin in encode.go.
MOVQ SI, AX
SUBQ R10, AX
CMPQ AX, $16
JLE emitLiteralFastPath
// ----------------------------------------
// Begin inline of the emitLiteral call.
//
// d += emitLiteral(dst[d:], src[nextEmit:s])
MOVL AX, BX
SUBL $1, BX
CMPL BX, $60
JLT inlineEmitLiteralOneByte
CMPL BX, $256
JLT inlineEmitLiteralTwoBytes
inlineEmitLiteralThreeBytes:
MOVB $0xf4, 0(DI)
MOVW BX, 1(DI)
ADDQ $3, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralTwoBytes:
MOVB $0xf0, 0(DI)
MOVB BX, 1(DI)
ADDQ $2, DI
JMP inlineEmitLiteralMemmove
inlineEmitLiteralOneByte:
SHLB $2, BX
MOVB BX, 0(DI)
ADDQ $1, DI
inlineEmitLiteralMemmove:
// Spill local variables (registers) onto the stack; call; unspill.
//
// copy(dst[i:], lit)
//
// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
// DI, R10 and AX as arguments.
MOVQ DI, 0(SP)
MOVQ R10, 8(SP)
MOVQ AX, 16(SP)
ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
MOVQ SI, 72(SP)
MOVQ DI, 80(SP)
MOVQ R15, 112(SP)
CALL runtime·memmove(SB)
MOVQ 56(SP), CX
MOVQ 64(SP), DX
MOVQ 72(SP), SI
MOVQ 80(SP), DI
MOVQ 88(SP), R9
MOVQ 112(SP), R15
JMP inner1
inlineEmitLiteralEnd:
// End inline of the emitLiteral call.
// ----------------------------------------
emitLiteralFastPath:
// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
MOVB AX, BX
SUBB $1, BX
SHLB $2, BX
MOVB BX, (DI)
ADDQ $1, DI
// !!! Implement the copy from lit to dst as a 16-byte load and store.
// (Encode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only len(lit) bytes, but that's
// OK. Subsequent iterations will fix up the overrun.
//
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
MOVOU 0(R10), X0
MOVOU X0, 0(DI)
ADDQ AX, DI
inner1:
// for { etc }
// base := s
MOVQ SI, R12
// !!! offset := base - candidate
MOVQ R12, R11
SUBQ R15, R11
SUBQ DX, R11
// ----------------------------------------
// Begin inline of the extendMatch call.
//
// s = extendMatch(src, candidate+4, s+4)
// !!! R14 = &src[len(src)]
MOVQ src_len+32(FP), R14
ADDQ DX, R14
// !!! R13 = &src[len(src) - 8]
MOVQ R14, R13
SUBQ $8, R13
// !!! R15 = &src[candidate + 4]
ADDQ $4, R15
ADDQ DX, R15
// !!! s += 4
ADDQ $4, SI
inlineExtendMatchCmp8:
// As long as we are 8 or more bytes before the end of src, we can load and
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
CMPQ SI, R13
JA inlineExtendMatchCmp1
MOVQ (R15), AX
MOVQ (SI), BX
CMPQ AX, BX
JNE inlineExtendMatchBSF
ADDQ $8, R15
ADDQ $8, SI
JMP inlineExtendMatchCmp8
inlineExtendMatchBSF:
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
// the index of the first byte that differs. The BSF instruction finds the
// least significant 1 bit, the amd64 architecture is little-endian, and
// the shift by 3 converts a bit index to a byte index.
XORQ AX, BX
BSFQ BX, BX
SHRQ $3, BX
ADDQ BX, SI
JMP inlineExtendMatchEnd
inlineExtendMatchCmp1:
// In src's tail, compare 1 byte at a time.
CMPQ SI, R14
JAE inlineExtendMatchEnd
MOVB (R15), AX
MOVB (SI), BX
CMPB AX, BX
JNE inlineExtendMatchEnd
ADDQ $1, R15
ADDQ $1, SI
JMP inlineExtendMatchCmp1
inlineExtendMatchEnd:
// End inline of the extendMatch call.
// ----------------------------------------
// ----------------------------------------
// Begin inline of the emitCopy call.
//
// d += emitCopy(dst[d:], base-candidate, s-base)
// !!! length := s - base
MOVQ SI, AX
SUBQ R12, AX
inlineEmitCopyLoop0:
// for length >= 68 { etc }
CMPL AX, $68
JLT inlineEmitCopyStep1
// Emit a length 64 copy, encoded as 3 bytes.
MOVB $0xfe, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $64, AX
JMP inlineEmitCopyLoop0
inlineEmitCopyStep1:
// if length > 64 { etc }
CMPL AX, $64
JLE inlineEmitCopyStep2
// Emit a length 60 copy, encoded as 3 bytes.
MOVB $0xee, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
SUBL $60, AX
inlineEmitCopyStep2:
// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
CMPL AX, $12
JGE inlineEmitCopyStep3
CMPL R11, $2048
JGE inlineEmitCopyStep3
// Emit the remaining copy, encoded as 2 bytes.
MOVB R11, 1(DI)
SHRL $8, R11
SHLB $5, R11
SUBB $4, AX
SHLB $2, AX
ORB AX, R11
ORB $1, R11
MOVB R11, 0(DI)
ADDQ $2, DI
JMP inlineEmitCopyEnd
inlineEmitCopyStep3:
// Emit the remaining copy, encoded as 3 bytes.
SUBL $1, AX
SHLB $2, AX
ORB $2, AX
MOVB AX, 0(DI)
MOVW R11, 1(DI)
ADDQ $3, DI
inlineEmitCopyEnd:
// End inline of the emitCopy call.
// ----------------------------------------
// nextEmit = s
MOVQ SI, R10
// if s >= sLimit { goto emitRemainder }
MOVQ SI, AX
SUBQ DX, AX
CMPQ AX, R9
JAE emitRemainder
// As per the encode_other.go code:
//
// We could immediately etc.
// x := load64(src, s-1)
MOVQ -1(SI), R14
// prevHash := hash(uint32(x>>0), shift)
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// table[prevHash] = uint16(s-1)
MOVQ SI, AX
SUBQ DX, AX
SUBQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// currHash := hash(uint32(x>>8), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// candidate = int(table[currHash])
// XXX: MOVWQZX table-32768(SP)(R11*2), R15
// XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
BYTE $0x4e
BYTE $0x0f
BYTE $0xb7
BYTE $0x7c
BYTE $0x5c
BYTE $0x78
// table[currHash] = uint16(s)
ADDQ $1, AX
// XXX: MOVW AX, table-32768(SP)(R11*2)
// XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
BYTE $0x66
BYTE $0x42
BYTE $0x89
BYTE $0x44
BYTE $0x5c
BYTE $0x78
// if uint32(x>>8) == load32(src, candidate) { continue }
MOVL (DX)(R15*1), BX
CMPL R14, BX
JEQ inner1
// nextHash = hash(uint32(x>>16), shift)
SHRQ $8, R14
MOVL R14, R11
IMULL $0x1e35a7bd, R11
SHRL CX, R11
// s++
ADDQ $1, SI
// break out of the inner1 for loop, i.e. continue the outer loop.
JMP outer
emitRemainder:
// if nextEmit < len(src) { etc }
MOVQ src_len+32(FP), AX
ADDQ DX, AX
CMPQ R10, AX
JEQ encodeBlockEnd
// d += emitLiteral(dst[d:], src[nextEmit:])
//
// Push args.
MOVQ DI, 0(SP)
MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
MOVQ R10, 24(SP)
SUBQ R10, AX
MOVQ AX, 32(SP)
MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
// Spill local variables (registers) onto the stack; call; unspill.
MOVQ DI, 80(SP)
CALL ·emitLiteral(SB)
MOVQ 80(SP), DI
// Finish the "d +=" part of "d += emitLiteral(etc)".
ADDQ 48(SP), DI
encodeBlockEnd:
MOVQ dst_base+0(FP), AX
SUBQ AX, DI
MOVQ DI, d+48(FP)
RET

238
vendor/github.com/golang/snappy/encode_other.go generated vendored Normal file
View File

@ -0,0 +1,238 @@
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine !gc noasm
package snappy
func load32(b []byte, i int) uint32 {
b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
}
func load64(b []byte, i int) uint64 {
b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
}
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
i, n := 0, uint(len(lit)-1)
switch {
case n < 60:
dst[0] = uint8(n)<<2 | tagLiteral
i = 1
case n < 1<<8:
dst[0] = 60<<2 | tagLiteral
dst[1] = uint8(n)
i = 2
default:
dst[0] = 61<<2 | tagLiteral
dst[1] = uint8(n)
dst[2] = uint8(n >> 8)
i = 3
}
return i + copy(dst[i:], lit)
}
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
func emitCopy(dst []byte, offset, length int) int {
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
// length emitted down below is is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
for length >= 68 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[i+0] = 63<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 64
}
if length > 64 {
// Emit a length 60 copy, encoded as 3 bytes.
dst[i+0] = 59<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
i += 3
length -= 60
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[i+0] = uint8(length-1)<<2 | tagCopy2
dst[i+1] = uint8(offset)
dst[i+2] = uint8(offset >> 8)
return i + 3
}
// Emit the remaining copy, encoded as 2 bytes.
dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
dst[i+1] = uint8(offset)
return i + 2
}
// extendMatch returns the largest k such that k <= len(src) and that
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
}
return j
}
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
// len(dst) >= MaxEncodedLen(len(src)) &&
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
// and len(src) <= maxBlockSize and maxBlockSize == 65536.
const (
maxTableSize = 1 << 14
// tableMask is redundant, but helps the compiler eliminate bounds
// checks.
tableMask = maxTableSize - 1
)
shift := uint32(32 - 8)
for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
shift--
}
// In Go, all array elements are zero-initialized, so there is no advantage
// to a smaller tableSize per se. However, it matches the C++ algorithm,
// and in the asm versions of this code, we can get away with zeroing only
// the first tableSize elements.
var table [maxTableSize]uint16
// sLimit is when to stop looking for offset/length copies. The inputMargin
// lets us use a fast path for emitLiteral in the main loop, while we are
// looking for copies.
sLimit := len(src) - inputMargin
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := 0
// The encoded form must start with a literal, as there are no previous
// bytes to copy, so we start looking for hash matches at s == 1.
s := 1
nextHash := hash(load32(src, s), shift)
for {
// Copied from the C++ snappy implementation:
//
// Heuristic match skipping: If 32 bytes are scanned with no matches
// found, start looking only at every other byte. If 32 more bytes are
// scanned (or skipped), look at every third byte, etc.. When a match
// is found, immediately go back to looking at every byte. This is a
// small loss (~5% performance, ~0.1% density) for compressible data
// due to more bookkeeping, but for non-compressible data (such as
// JPEG) it's a huge win since the compressor quickly "realizes" the
// data is incompressible and doesn't bother looking for matches
// everywhere.
//
// The "skip" variable keeps track of how many bytes there are since
// the last match; dividing it by 32 (ie. right-shifting by five) gives
// the number of bytes to move ahead for each iteration.
skip := 32
nextS := s
candidate := 0
for {
s = nextS
bytesBetweenHashLookups := skip >> 5
nextS = s + bytesBetweenHashLookups
skip += bytesBetweenHashLookups
if nextS > sLimit {
goto emitRemainder
}
candidate = int(table[nextHash&tableMask])
table[nextHash&tableMask] = uint16(s)
nextHash = hash(load32(src, nextS), shift)
if load32(src, s) == load32(src, candidate) {
break
}
}
// A 4-byte match has been found. We'll later see if more than 4 bytes
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
// them as literal bytes.
d += emitLiteral(dst[d:], src[nextEmit:s])
// Call emitCopy, and then see if another emitCopy could be our next
// move. Repeat until we find no match for the input immediately after
// what was consumed by the last emitCopy call.
//
// If we exit this loop normally then we need to call emitLiteral next,
// though we don't yet know how big the literal will be. We handle that
// by proceeding to the next iteration of the main loop. We also can
// exit this loop via goto if we get close to exhausting the input.
for {
// Invariant: we have a 4-byte match at s, and no need to emit any
// literal bytes prior to s.
base := s
// Extend the 4-byte match as long as possible.
//
// This is an inlined version of:
// s = extendMatch(src, candidate+4, s+4)
s += 4
for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
}
d += emitCopy(dst[d:], base-candidate, s-base)
nextEmit = s
if s >= sLimit {
goto emitRemainder
}
// We could immediately start working at s now, but to improve
// compression we first update the hash table at s-1 and at s. If
// another emitCopy is not our next move, also calculate nextHash
// at s+1. At least on GOARCH=amd64, these three hash calculations
// are faster as one load64 call (with some shifts) instead of
// three load32 calls.
x := load64(src, s-1)
prevHash := hash(uint32(x>>0), shift)
table[prevHash&tableMask] = uint16(s - 1)
currHash := hash(uint32(x>>8), shift)
candidate = int(table[currHash&tableMask])
table[currHash&tableMask] = uint16(s)
if uint32(x>>8) != load32(src, candidate) {
nextHash = hash(uint32(x>>16), shift)
s++
break
}
}
}
emitRemainder:
if nextEmit < len(src) {
d += emitLiteral(dst[d:], src[nextEmit:])
}
return d
}

98
vendor/github.com/golang/snappy/snappy.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package snappy implements the Snappy compression format. It aims for very
// high speeds and reasonable compression.
//
// There are actually two Snappy formats: block and stream. They are related,
// but different: trying to decompress block-compressed data as a Snappy stream
// will fail, and vice versa. The block format is the Decode and Encode
// functions and the stream format is the Reader and Writer types.
//
// The block format, the more common case, is used when the complete size (the
// number of bytes) of the original data is known upfront, at the time
// compression starts. The stream format, also known as the framing format, is
// for when that isn't always true.
//
// The canonical, C++ implementation is at https://github.com/google/snappy and
// it only implements the block format.
package snappy // import "github.com/golang/snappy"
import (
"hash/crc32"
)
/*
Each encoded block begins with the varint-encoded length of the decoded data,
followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
first byte of each chunk is broken into its 2 least and 6 most significant bits
called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
Zero means a literal tag. All other values mean a copy tag.
For literal tags:
- If m < 60, the next 1 + m bytes are literal bytes.
- Otherwise, let n be the little-endian unsigned integer denoted by the next
m - 59 bytes. The next 1 + n bytes after that are literal bytes.
For copy tags, length bytes are copied from offset bytes ago, in the style of
Lempel-Ziv compression algorithms. In particular:
- For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
of the offset. The next byte is bits 0-7 of the offset.
- For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
The length is 1 + m. The offset is the little-endian unsigned integer
denoted by the next 2 bytes.
- For l == 3, this tag is a legacy format that is no longer issued by most
encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
[1, 65). The length is 1 + m. The offset is the little-endian unsigned
integer denoted by the next 4 bytes.
*/
const (
tagLiteral = 0x00
tagCopy1 = 0x01
tagCopy2 = 0x02
tagCopy4 = 0x03
)
const (
checksumSize = 4
chunkHeaderSize = 4
magicChunk = "\xff\x06\x00\x00" + magicBody
magicBody = "sNaPpY"
// maxBlockSize is the maximum size of the input to encodeBlock. It is not
// part of the wire format per se, but some parts of the encoder assume
// that an offset fits into a uint16.
//
// Also, for the framing format (Writer type instead of Encode function),
// https://github.com/google/snappy/blob/master/framing_format.txt says
// that "the uncompressed data in a chunk must be no longer than 65536
// bytes".
maxBlockSize = 65536
// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
// hard coded to be a const instead of a variable, so that obufLen can also
// be a const. Their equivalence is confirmed by
// TestMaxEncodedLenOfMaxBlockSize.
maxEncodedLenOfMaxBlockSize = 76490
obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
)
const (
chunkTypeCompressedData = 0x00
chunkTypeUncompressedData = 0x01
chunkTypePadding = 0xfe
chunkTypeStreamIdentifier = 0xff
)
var crcTable = crc32.MakeTable(crc32.Castagnoli)
// crc implements the checksum specified in section 3 of
// https://github.com/google/snappy/blob/master/framing_format.txt
func crc(b []byte) uint32 {
c := crc32.Update(0, crcTable, b)
return uint32(c>>15|c<<17) + 0xa282ead8
}

8
vendor/github.com/miekg/dns/.codecov.yml generated vendored Normal file
View File

@ -0,0 +1,8 @@
coverage:
status:
project:
default:
target: 40%
threshold: null
patch: false
changes: false

4
vendor/github.com/miekg/dns/.gitignore generated vendored Normal file
View File

@ -0,0 +1,4 @@
*.6
tags
test.out
a.out

17
vendor/github.com/miekg/dns/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,17 @@
language: go
sudo: false
go:
- "1.12.x"
- "1.13.x"
- tip
env:
- GO111MODULE=on
script:
- go generate ./... && test `git ls-files --modified | wc -l` = 0
- go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
after_success:
- bash <(curl -s https://codecov.io/bash)

1
vendor/github.com/miekg/dns/AUTHORS generated vendored Normal file
View File

@ -0,0 +1 @@
Miek Gieben <miek@miek.nl>

1
vendor/github.com/miekg/dns/CODEOWNERS generated vendored Normal file
View File

@ -0,0 +1 @@
* @miekg @tmthrgd

10
vendor/github.com/miekg/dns/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,10 @@
Alex A. Skinner
Andrew Tunnell-Jones
Ask Bjørn Hansen
Dave Cheney
Dusty Wilson
Marek Majkowski
Peter van Dijk
Omri Bahumi
Alex Sergeyev
James Hartig

9
vendor/github.com/miekg/dns/COPYRIGHT generated vendored Normal file
View File

@ -0,0 +1,9 @@
Copyright 2009 The Go Authors. All rights reserved. Use of this source code
is governed by a BSD-style license that can be found in the LICENSE file.
Extensions of the original work are copyright (c) 2011 Miek Gieben
Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.
Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
governed by a BSD-style license that can be found in the LICENSE file.

30
vendor/github.com/miekg/dns/LICENSE generated vendored Normal file
View File

@ -0,0 +1,30 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
As this is fork of the official Go code the same license applies.
Extensions of the original work are copyright (c) 2011 Miek Gieben

33
vendor/github.com/miekg/dns/Makefile.fuzz generated vendored Normal file
View File

@ -0,0 +1,33 @@
# Makefile for fuzzing
#
# Use go-fuzz and needs the tools installed.
# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
#
# Installing go-fuzz:
# $ make -f Makefile.fuzz get
# Installs:
# * github.com/dvyukov/go-fuzz/go-fuzz
# * get github.com/dvyukov/go-fuzz/go-fuzz-build
all: build
.PHONY: build
build:
go-fuzz-build -tags fuzz github.com/miekg/dns
.PHONY: build-newrr
build-newrr:
go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
.PHONY: fuzz
fuzz:
go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
.PHONY: get
get:
go get github.com/dvyukov/go-fuzz/go-fuzz
go get github.com/dvyukov/go-fuzz/go-fuzz-build
.PHONY: clean
clean:
rm *-fuzz.zip

52
vendor/github.com/miekg/dns/Makefile.release generated vendored Normal file
View File

@ -0,0 +1,52 @@
# Makefile for releasing.
#
# The release is controlled from version.go. The version found there is
# used to tag the git repo, we're not building any artifects so there is nothing
# to upload to github.
#
# * Up the version in version.go
# * Run: make -f Makefile.release release
# * will *commit* your change with 'Release $VERSION'
# * push to github
#
define GO
//+build ignore
package main
import (
"fmt"
"github.com/miekg/dns"
)
func main() {
fmt.Println(dns.Version.String())
}
endef
$(file > version_release.go,$(GO))
VERSION:=$(shell go run version_release.go)
TAG="v$(VERSION)"
all:
@echo Use the \'release\' target to start a release $(VERSION)
rm -f version_release.go
.PHONY: release
release: commit push
@echo Released $(VERSION)
rm -f version_release.go
.PHONY: commit
commit:
@echo Committing release $(VERSION)
git commit -am"Release $(VERSION)"
git tag $(TAG)
.PHONY: push
push:
@echo Pushing release $(VERSION) to master
git push --tags
git push

175
vendor/github.com/miekg/dns/README.md generated vendored Normal file
View File

@ -0,0 +1,175 @@
[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns)
[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns)
[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns)
# Alternative (more granular) approach to a DNS library
> Less is more.
Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types.
It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there
isn't a convenience function for it. Server side and client side programming is supported, i.e. you
can build servers and resolvers with it.
We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
avoiding breaking changes wherever reasonable. We support the last two versions of Go.
# Goals
* KISS;
* Fast;
* Small API. If it's easy to code in Go, don't make a function for it.
# Users
A not-so-up-to-date-list-that-may-be-actually-current:
* https://github.com/coredns/coredns
* https://cloudflare.com
* https://github.com/abh/geodns
* http://www.statdns.com/
* http://www.dnsinspect.com/
* https://github.com/chuangbo/jianbing-dictionary-dns
* http://www.dns-lg.com/
* https://github.com/fcambus/rrda
* https://github.com/kenshinx/godns
* https://github.com/skynetservices/skydns
* https://github.com/hashicorp/consul
* https://github.com/DevelopersPL/godnsagent
* https://github.com/duedil-ltd/discodns
* https://github.com/StalkR/dns-reverse-proxy
* https://github.com/tianon/rawdns
* https://mesosphere.github.io/mesos-dns/
* https://pulse.turbobytes.com/
* https://github.com/fcambus/statzone
* https://github.com/benschw/dns-clb-go
* https://github.com/corny/dnscheck for <http://public-dns.info/>
* https://namesmith.io
* https://github.com/miekg/unbound
* https://github.com/miekg/exdns
* https://dnslookup.org
* https://github.com/looterz/grimd
* https://github.com/phamhongviet/serf-dns
* https://github.com/mehrdadrad/mylg
* https://github.com/bamarni/dockness
* https://github.com/fffaraz/microdns
* http://kelda.io
* https://github.com/ipdcode/hades <https://jd.com>
* https://github.com/StackExchange/dnscontrol/
* https://www.dnsperf.com/
* https://dnssectest.net/
* https://dns.apebits.com
* https://github.com/oif/apex
* https://github.com/jedisct1/dnscrypt-proxy
* https://github.com/jedisct1/rpdns
* https://github.com/xor-gate/sshfp
* https://github.com/rs/dnstrace
* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
* https://github.com/semihalev/sdns
* https://render.com
* https://github.com/peterzen/goresolver
* https://github.com/folbricht/routedns
Send pull request if you want to be listed here.
# Features
* UDP/TCP queries, IPv4 and IPv6
* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported
* Fast
* Server side programming (mimicking the net/http package)
* Client side programming
* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519
* EDNS0, NSID, Cookies
* AXFR/IXFR
* TSIG, SIG(0)
* DNS over TLS (DoT): encrypted connection between client and server over TCP
* DNS name compression
Have fun!
Miek Gieben - 2010-2012 - <miek@miek.nl>
DNS Authors 2012-
# Building
This library uses Go modules and uses semantic versioning. Building is done with the `go` tool, so
the following should work:
go get github.com/miekg/dns
go build github.com/miekg/dns
## Examples
A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc
github.com/miekg/dns`).
Example programs can be found in the `github.com/miekg/exdns` repository.
## Supported RFCs
*all of them*
* 103{4,5} - DNS standard
* 1348 - NSAP record (removed the record)
* 1982 - Serial Arithmetic
* 1876 - LOC record
* 1995 - IXFR
* 1996 - DNS notify
* 2136 - DNS Update (dynamic updates)
* 2181 - RRset definition - there is no RRset type though, just []RR
* 2537 - RSAMD5 DNS keys
* 2065 - DNSSEC (updated in later RFCs)
* 2671 - EDNS record
* 2782 - SRV record
* 2845 - TSIG record
* 2915 - NAPTR record
* 2929 - DNS IANA Considerations
* 3110 - RSASHA1 DNS keys
* 3123 - APL record
* 3225 - DO bit (DNSSEC OK)
* 340{1,2,3} - NAPTR record
* 3445 - Limiting the scope of (DNS)KEY
* 3597 - Unknown RRs
* 403{3,4,5} - DNSSEC + validation functions
* 4255 - SSHFP record
* 4343 - Case insensitivity
* 4408 - SPF record
* 4509 - SHA256 Hash in DS
* 4592 - Wildcards in the DNS
* 4635 - HMAC SHA TSIG
* 4701 - DHCID
* 4892 - id.server
* 5001 - NSID
* 5155 - NSEC3 record
* 5205 - HIP record
* 5702 - SHA2 in the DNS
* 5936 - AXFR
* 5966 - TCP implementation recommendations
* 6605 - ECDSA
* 6725 - IANA Registry Update
* 6742 - ILNP DNS
* 6840 - Clarifications and Implementation Notes for DNS Security
* 6844 - CAA record
* 6891 - EDNS0 update
* 6895 - DNS IANA considerations
* 6944 - DNSSEC DNSKEY Algorithm Status
* 6975 - Algorithm Understanding in DNSSEC
* 7043 - EUI48/EUI64 records
* 7314 - DNS (EDNS) EXPIRE Option
* 7477 - CSYNC RR
* 7828 - edns-tcp-keepalive EDNS0 Option
* 7553 - URI record
* 7858 - DNS over TLS: Initiation and Performance Considerations
* 7871 - EDNS0 Client Subnet
* 7873 - Domain Name System (DNS) Cookies
* 8080 - EdDSA for DNSSEC
* 8499 - DNS Terminology
## Loosely Based Upon
* ldns - <https://nlnetlabs.nl/projects/ldns/about/>
* NSD - <https://nlnetlabs.nl/projects/nsd/about/>
* Net::DNS - <http://www.net-dns.org/>
* GRONG - <https://github.com/bortzmeyer/grong>

61
vendor/github.com/miekg/dns/acceptfunc.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package dns
// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
// It returns a MsgAcceptAction to indicate what should happen with the message.
type MsgAcceptFunc func(dh Header) MsgAcceptAction
// DefaultMsgAcceptFunc checks the request and will reject if:
//
// * isn't a request (don't respond in that case)
//
// * opcode isn't OpcodeQuery or OpcodeNotify
//
// * Zero bit isn't zero
//
// * has more than 1 question in the question section
//
// * has more than 1 RR in the Answer section
//
// * has more than 0 RRs in the Authority section
//
// * has more than 2 RRs in the Additional section
//
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
// MsgAcceptAction represents the action to be taken.
type MsgAcceptAction int
const (
MsgAccept MsgAcceptAction = iota // Accept the message
MsgReject // Reject the message with a RcodeFormatError
MsgIgnore // Ignore the error and send nothing back.
MsgRejectNotImplemented // Reject the message with a RcodeNotImplemented
)
func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
if isResponse := dh.Bits&_QR != 0; isResponse {
return MsgIgnore
}
// Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
opcode := int(dh.Bits>>11) & 0xF
if opcode != OpcodeQuery && opcode != OpcodeNotify {
return MsgRejectNotImplemented
}
if dh.Qdcount != 1 {
return MsgReject
}
// NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
if dh.Ancount > 1 {
return MsgReject
}
// IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
if dh.Nscount > 1 {
return MsgReject
}
if dh.Arcount > 2 {
return MsgReject
}
return MsgAccept
}

415
vendor/github.com/miekg/dns/client.go generated vendored Normal file
View File

@ -0,0 +1,415 @@
package dns
// A client implementation.
import (
"context"
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strings"
"time"
)
const (
dnsTimeout time.Duration = 2 * time.Second
tcpIdleTimeout time.Duration = 8 * time.Second
)
// A Conn represents a connection to a DNS server.
type Conn struct {
net.Conn // a net.Conn holding the connection
UDPSize uint16 // minimum receive buffer for UDP messages
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
tsigRequestMAC string
}
// A Client defines parameters for a DNS client.
type Client struct {
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
UDPSize uint16 // minimum receive buffer for UDP messages
TLSConfig *tls.Config // TLS connection configuration
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
Timeout time.Duration
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
group singleflight
}
// Exchange performs a synchronous UDP query. It sends the message m to the address
// contained in a and waits for a reply. Exchange does not retry a failed query, nor
// will it fall back to TCP in case of truncation.
// See client.Exchange for more information on setting larger buffer sizes.
func Exchange(m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.Exchange(m, a)
return r, err
}
func (c *Client) dialTimeout() time.Duration {
if c.Timeout != 0 {
return c.Timeout
}
if c.DialTimeout != 0 {
return c.DialTimeout
}
return dnsTimeout
}
func (c *Client) readTimeout() time.Duration {
if c.ReadTimeout != 0 {
return c.ReadTimeout
}
return dnsTimeout
}
func (c *Client) writeTimeout() time.Duration {
if c.WriteTimeout != 0 {
return c.WriteTimeout
}
return dnsTimeout
}
// Dial connects to the address on the named network.
func (c *Client) Dial(address string) (conn *Conn, err error) {
// create a new dialer with the appropriate timeout
var d net.Dialer
if c.Dialer == nil {
d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
} else {
d = *c.Dialer
}
network := c.Net
if network == "" {
network = "udp"
}
useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
conn = new(Conn)
if useTLS {
network = strings.TrimSuffix(network, "-tls")
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
} else {
conn.Conn, err = d.Dial(network, address)
}
if err != nil {
return nil, err
}
return conn, nil
}
// Exchange performs a synchronous query. It sends the message m to the address
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
//
// c := new(dns.Client)
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
//
// Exchange does not retry a failed query, nor will it fall back to TCP in
// case of truncation.
// It is up to the caller to create a message that allows for larger responses to be
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
// of 512 bytes
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
// attribute appropriately
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
if !c.SingleInflight {
return c.exchange(m, address)
}
q := m.Question[0]
key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
return c.exchange(m, address)
})
if r != nil && shared {
r = r.Copy()
}
return r, rtt, err
}
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
var co *Conn
co, err = c.Dial(a)
if err != nil {
return nil, 0, err
}
defer co.Close()
opt := m.IsEdns0()
// If EDNS0 is used use that for size.
if opt != nil && opt.UDPSize() >= MinMsgSize {
co.UDPSize = opt.UDPSize()
}
// Otherwise use the client's configured UDP size.
if opt == nil && c.UDPSize >= MinMsgSize {
co.UDPSize = c.UDPSize
}
co.TsigSecret = c.TsigSecret
t := time.Now()
// write with the appropriate write timeout
co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
if err = co.WriteMsg(m); err != nil {
return nil, 0, err
}
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
rtt = time.Since(t)
return r, rtt, err
}
// ReadMsg reads a message from the connection co.
// If the received message contains a TSIG record the transaction signature
// is verified. This method always tries to return the message, however if an
// error is returned there are no guarantees that the returned message is a
// valid representation of the packet read.
func (co *Conn) ReadMsg() (*Msg, error) {
p, err := co.ReadMsgHeader(nil)
if err != nil {
return nil, err
}
m := new(Msg)
if err := m.Unpack(p); err != nil {
// If an error was returned, we still want to allow the user to use
// the message, but naively they can just check err if they don't want
// to use an erroneous message
return m, err
}
if t := m.IsTsig(); t != nil {
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return m, ErrSecret
}
// Need to work on the original message p, as that was used to calculate the tsig.
err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
}
return m, err
}
// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
// Returns message as a byte slice to be parsed with Msg.Unpack later on.
// Note that error handling on the message body is not possible as only the header is parsed.
func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
var (
p []byte
n int
err error
)
if _, ok := co.Conn.(net.PacketConn); ok {
if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize)
} else {
p = make([]byte, MinMsgSize)
}
n, err = co.Read(p)
} else {
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return nil, err
}
p = make([]byte, length)
n, err = io.ReadFull(co.Conn, p)
}
if err != nil {
return nil, err
} else if n < headerSize {
return nil, ErrShortRead
}
p = p[:n]
if hdr != nil {
dh, _, err := unpackMsgHdr(p, 0)
if err != nil {
return nil, err
}
*hdr = dh
}
return p, err
}
// Read implements the net.Conn read method.
func (co *Conn) Read(p []byte) (n int, err error) {
if co.Conn == nil {
return 0, ErrConnEmpty
}
if _, ok := co.Conn.(net.PacketConn); ok {
// UDP connection
return co.Conn.Read(p)
}
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return 0, err
}
if int(length) > len(p) {
return 0, io.ErrShortBuffer
}
return io.ReadFull(co.Conn, p[:length])
}
// WriteMsg sends a message through the connection co.
// If the message m contains a TSIG record the transaction
// signature is calculated.
func (co *Conn) WriteMsg(m *Msg) (err error) {
var out []byte
if t := m.IsTsig(); t != nil {
mac := ""
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
return ErrSecret
}
out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
// Set for the next read, although only used in zone transfers
co.tsigRequestMAC = mac
} else {
out, err = m.Pack()
}
if err != nil {
return err
}
_, err = co.Write(out)
return err
}
// Write implements the net.Conn Write method.
func (co *Conn) Write(p []byte) (int, error) {
if len(p) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
if _, ok := co.Conn.(net.PacketConn); ok {
return co.Conn.Write(p)
}
l := make([]byte, 2)
binary.BigEndian.PutUint16(l, uint16(len(p)))
n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
return int(n), err
}
// Return the appropriate timeout for a specific request
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
var requestTimeout time.Duration
if c.Timeout != 0 {
requestTimeout = c.Timeout
} else {
requestTimeout = timeout
}
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
// far
if c.Dialer != nil && c.Dialer.Timeout != 0 {
if c.Dialer.Timeout < requestTimeout {
requestTimeout = c.Dialer.Timeout
}
}
return requestTimeout
}
// Dial connects to the address on the named network.
func Dial(network, address string) (conn *Conn, err error) {
conn = new(Conn)
conn.Conn, err = net.Dial(network, address)
if err != nil {
return nil, err
}
return conn, nil
}
// ExchangeContext performs a synchronous UDP query, like Exchange. It
// additionally obeys deadlines from the passed Context.
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
client := Client{Net: "udp"}
r, _, err = client.ExchangeContext(ctx, m, a)
// ignorint rtt to leave the original ExchangeContext API unchanged, but
// this function will go away
return r, err
}
// ExchangeConn performs a synchronous query. It sends the message m via the connection
// c and waits for a reply. The connection c is not closed by ExchangeConn.
// Deprecated: This function is going away, but can easily be mimicked:
//
// co := &dns.Conn{Conn: c} // c is your net.Conn
// co.WriteMsg(m)
// in, _ := co.ReadMsg()
// co.Close()
//
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
println("dns: ExchangeConn: this function is deprecated")
co := new(Conn)
co.Conn = c
if err = co.WriteMsg(m); err != nil {
return nil, err
}
r, err = co.ReadMsg()
if err == nil && r.Id != m.Id {
err = ErrId
}
return r, err
}
// DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
return client.Dial(address)
}
// DialWithTLS connects to the address on the named network with TLS.
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
if !strings.HasSuffix(network, "-tls") {
network += "-tls"
}
client := Client{Net: network, TLSConfig: tlsConfig}
return client.Dial(address)
}
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
if !strings.HasSuffix(network, "-tls") {
network += "-tls"
}
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
return client.Dial(address)
}
// ExchangeContext acts like Exchange, but honors the deadline on the provided
// context, if present. If there is both a context deadline and a configured
// timeout on the client, the earliest of the two takes effect.
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
var timeout time.Duration
if deadline, ok := ctx.Deadline(); !ok {
timeout = 0
} else {
timeout = time.Until(deadline)
}
// not passing the context to the underlying calls, as the API does not support
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
// TODO(tmthrgd,miekg): this is a race condition.
c.Dialer = &net.Dialer{Timeout: timeout}
return c.Exchange(m, a)
}

135
vendor/github.com/miekg/dns/clientconfig.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
package dns
import (
"bufio"
"io"
"os"
"strconv"
"strings"
)
// ClientConfig wraps the contents of the /etc/resolv.conf file.
type ClientConfig struct {
Servers []string // servers to use
Search []string // suffixes to append to local name
Port string // what port to use
Ndots int // number of dots in name to trigger absolute lookup
Timeout int // seconds before giving up on packet
Attempts int // lost packets before giving up on server, not used in the package dns
}
// ClientConfigFromFile parses a resolv.conf(5) like file and returns
// a *ClientConfig.
func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
file, err := os.Open(resolvconf)
if err != nil {
return nil, err
}
defer file.Close()
return ClientConfigFromReader(file)
}
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
c := new(ClientConfig)
scanner := bufio.NewScanner(resolvconf)
c.Servers = make([]string, 0)
c.Search = make([]string, 0)
c.Port = "53"
c.Ndots = 1
c.Timeout = 5
c.Attempts = 2
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return nil, err
}
line := scanner.Text()
f := strings.Fields(line)
if len(f) < 1 {
continue
}
switch f[0] {
case "nameserver": // add one name server
if len(f) > 1 {
// One more check: make sure server name is
// just an IP address. Otherwise we need DNS
// to look it up.
name := f[1]
c.Servers = append(c.Servers, name)
}
case "domain": // set search path to just this domain
if len(f) > 1 {
c.Search = make([]string, 1)
c.Search[0] = f[1]
} else {
c.Search = make([]string, 0)
}
case "search": // set search path to given servers
c.Search = append([]string(nil), f[1:]...)
case "options": // magic options
for _, s := range f[1:] {
switch {
case len(s) >= 6 && s[:6] == "ndots:":
n, _ := strconv.Atoi(s[6:])
if n < 0 {
n = 0
} else if n > 15 {
n = 15
}
c.Ndots = n
case len(s) >= 8 && s[:8] == "timeout:":
n, _ := strconv.Atoi(s[8:])
if n < 1 {
n = 1
}
c.Timeout = n
case len(s) >= 9 && s[:9] == "attempts:":
n, _ := strconv.Atoi(s[9:])
if n < 1 {
n = 1
}
c.Attempts = n
case s == "rotate":
/* not imp */
}
}
}
}
return c, nil
}
// NameList returns all of the names that should be queried based on the
// config. It is based off of go's net/dns name building, but it does not
// check the length of the resulting names.
func (c *ClientConfig) NameList(name string) []string {
// if this domain is already fully qualified, no append needed.
if IsFqdn(name) {
return []string{name}
}
// Check to see if the name has more labels than Ndots. Do this before making
// the domain fully qualified.
hasNdots := CountLabel(name) > c.Ndots
// Make the domain fully qualified.
name = Fqdn(name)
// Make a list of names based off search.
names := []string{}
// If name has enough dots, try that first.
if hasNdots {
names = append(names, name)
}
for _, s := range c.Search {
names = append(names, Fqdn(name+s))
}
// If we didn't have enough dots, try after suffixes.
if !hasNdots {
names = append(names, name)
}
return names
}

43
vendor/github.com/miekg/dns/dane.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package dns
import (
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/hex"
"errors"
)
// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
switch matchingType {
case 0:
switch selector {
case 0:
return hex.EncodeToString(cert.Raw), nil
case 1:
return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
}
case 1:
h := sha256.New()
switch selector {
case 0:
h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
case 2:
h := sha512.New()
switch selector {
case 0:
h.Write(cert.Raw)
return hex.EncodeToString(h.Sum(nil)), nil
case 1:
h.Write(cert.RawSubjectPublicKeyInfo)
return hex.EncodeToString(h.Sum(nil)), nil
}
}
return "", errors.New("dns: bad MatchingType or Selector")
}

378
vendor/github.com/miekg/dns/defaults.go generated vendored Normal file
View File

@ -0,0 +1,378 @@
package dns
import (
"errors"
"net"
"strconv"
"strings"
)
const hexDigit = "0123456789abcdef"
// Everything is assumed in ClassINET.
// SetReply creates a reply message from a request message.
func (dns *Msg) SetReply(request *Msg) *Msg {
dns.Id = request.Id
dns.Response = true
dns.Opcode = request.Opcode
if dns.Opcode == OpcodeQuery {
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
}
dns.Rcode = RcodeSuccess
if len(request.Question) > 0 {
dns.Question = make([]Question, 1)
dns.Question[0] = request.Question[0]
}
return dns
}
// SetQuestion creates a question message, it sets the Question
// section, generates an Id and sets the RecursionDesired (RD)
// bit to true.
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
dns.Id = Id()
dns.RecursionDesired = true
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, t, ClassINET}
return dns
}
// SetNotify creates a notify message, it sets the Question
// section, generates an Id and sets the Authoritative (AA)
// bit to true.
func (dns *Msg) SetNotify(z string) *Msg {
dns.Opcode = OpcodeNotify
dns.Authoritative = true
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetRcode creates an error message suitable for the request.
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
dns.SetReply(request)
dns.Rcode = rcode
return dns
}
// SetRcodeFormatError creates a message with FormError set.
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
dns.Rcode = RcodeFormatError
dns.Opcode = OpcodeQuery
dns.Response = true
dns.Authoritative = false
dns.Id = request.Id
return dns
}
// SetUpdate makes the message a dynamic update message. It
// sets the ZONE section to: z, TypeSOA, ClassINET.
func (dns *Msg) SetUpdate(z string) *Msg {
dns.Id = Id()
dns.Response = false
dns.Opcode = OpcodeUpdate
dns.Compress = false // BIND9 cannot handle compression
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetIxfr creates message for requesting an IXFR.
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Ns = make([]RR, 1)
s := new(SOA)
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
s.Serial = serial
s.Ns = ns
s.Mbox = mbox
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
dns.Ns[0] = s
return dns
}
// SetAxfr creates message for requesting an AXFR.
func (dns *Msg) SetAxfr(z string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
return dns
}
// SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The Tsig is calculated when the message is being send.
func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
t.Algorithm = algo
t.Fudge = fudge
t.TimeSigned = uint64(timesigned)
t.OrigId = dns.Id
dns.Extra = append(dns.Extra, t)
return dns
}
// SetEdns0 appends a EDNS0 OPT RR to the message.
// TSIG should always the last RR in a message.
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
e := new(OPT)
e.Hdr.Name = "."
e.Hdr.Rrtype = TypeOPT
e.SetUDPSize(udpsize)
if do {
e.SetDo()
}
dns.Extra = append(dns.Extra, e)
return dns
}
// IsTsig checks if the message has a TSIG record as the last record
// in the additional section. It returns the TSIG record found or nil.
func (dns *Msg) IsTsig() *TSIG {
if len(dns.Extra) > 0 {
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
return dns.Extra[len(dns.Extra)-1].(*TSIG)
}
}
return nil
}
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
// record in the additional section will do. It returns the OPT record
// found or nil.
func (dns *Msg) IsEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT)
}
}
return nil
}
// popEdns0 is like IsEdns0, but it removes the record from the message.
func (dns *Msg) popEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
opt := dns.Extra[i].(*OPT)
dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
return opt
}
}
return nil
}
// IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters and that the entire name will fit into the 255
// octet wire format limit.
func IsDomainName(s string) (labels int, ok bool) {
// XXX: The logic in this function was copied from packDomainName and
// should be kept in sync with that function.
const lenmsg = 256
if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
return 0, false
}
s = Fqdn(s)
// Each dot ends a segment of the name. Except for escaped dots (\.), which
// are normal dots.
var (
off int
begin int
wasDot bool
)
for i := 0; i < len(s); i++ {
switch s[i] {
case '\\':
if off+1 > lenmsg {
return labels, false
}
// check for \DDD
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
i += 3
begin += 3
} else {
i++
begin++
}
wasDot = false
case '.':
if wasDot {
// two dots back to back is not legal
return labels, false
}
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return labels, false
}
// off can already (we're in a loop) be bigger than lenmsg
// this happens when a name isn't fully qualified
off += 1 + labelLen
if off > lenmsg {
return labels, false
}
labels++
begin = i + 1
default:
wasDot = false
}
}
return labels, true
}
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
// are the same domain true is returned as well.
func IsSubDomain(parent, child string) bool {
// Entire child is contained in parent
return CompareDomainName(parent, child) == CountLabel(parent)
}
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
// The checking is performed on the binary payload.
func IsMsg(buf []byte) error {
// Header
if len(buf) < headerSize {
return errors.New("dns: bad message header")
}
// Header: Opcode
// TODO(miek): more checks here, e.g. check all header bits.
return nil
}
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
s2 := strings.TrimSuffix(s, ".")
if s == s2 {
return false
}
i := strings.LastIndexFunc(s2, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
// This means the RRs need to have the same type, name, and class. Returns true
// if the RR set is valid, otherwise false.
func IsRRset(rrset []RR) bool {
if len(rrset) == 0 {
return false
}
if len(rrset) == 1 {
return true
}
rrHeader := rrset[0].Header()
rrType := rrHeader.Rrtype
rrClass := rrHeader.Class
rrName := rrHeader.Name
for _, rr := range rrset[1:] {
curRRHeader := rr.Header()
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
// Mismatch between the records, so this is not a valid rrset for
//signing/verifying
return false
}
}
return true
}
// Fqdn return the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func Fqdn(s string) string {
if IsFqdn(s) {
return s
}
return s + "."
}
// Copied from the official Go code.
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
// to parse the IP address.
func ReverseAddr(addr string) (arpa string, err error) {
ip := net.ParseIP(addr)
if ip == nil {
return "", &Error{err: "unrecognized address: " + addr}
}
if v4 := ip.To4(); v4 != nil {
buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
// Add it, in reverse, to the buffer
for i := len(v4) - 1; i >= 0; i-- {
buf = strconv.AppendInt(buf, int64(v4[i]), 10)
buf = append(buf, '.')
}
// Append "in-addr.arpa." and return (buf already has the final .)
buf = append(buf, "in-addr.arpa."...)
return string(buf), nil
}
// Must be IPv6
buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
// Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- {
v := ip[i]
buf = append(buf, hexDigit[v&0xF])
buf = append(buf, '.')
buf = append(buf, hexDigit[v>>4])
buf = append(buf, '.')
}
// Append "ip6.arpa." and return (buf already has the final .)
buf = append(buf, "ip6.arpa."...)
return string(buf), nil
}
// String returns the string representation for the type t.
func (t Type) String() string {
if t1, ok := TypeToString[uint16(t)]; ok {
return t1
}
return "TYPE" + strconv.Itoa(int(t))
}
// String returns the string representation for the class c.
func (c Class) String() string {
if s, ok := ClassToString[uint16(c)]; ok {
// Only emit mnemonics when they are unambiguous, specically ANY is in both.
if _, ok := StringToType[s]; !ok {
return s
}
}
return "CLASS" + strconv.Itoa(int(c))
}
// String returns the string representation for the name n.
func (n Name) String() string {
return sprintName(string(n))
}

134
vendor/github.com/miekg/dns/dns.go generated vendored Normal file
View File

@ -0,0 +1,134 @@
package dns
import "strconv"
const (
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
defaultTtl = 3600 // Default internal TTL.
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
DefaultMsgSize = 4096
// MinMsgSize is the minimal size of a DNS packet.
MinMsgSize = 512
// MaxMsgSize is the largest possible DNS packet.
MaxMsgSize = 65535
)
// Error represents a DNS error.
type Error struct{ err string }
func (e *Error) Error() string {
if e == nil {
return "dns: <nil>"
}
return "dns: " + e.err
}
// An RR represents a resource record.
type RR interface {
// Header returns the header of an resource record. The header contains
// everything up to the rdata.
Header() *RR_Header
// String returns the text representation of the resource record.
String() string
// copy returns a copy of the RR
copy() RR
// len returns the length (in octets) of the compressed or uncompressed RR in wire format.
//
// If compression is nil, the uncompressed size will be returned, otherwise the compressed
// size will be returned and domain names will be added to the map for future compression.
len(off int, compression map[string]struct{}) int
// pack packs the records RDATA into wire format. The header will
// already have been packed into msg.
pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
// unpack unpacks an RR from wire format.
//
// This will only be called on a new and empty RR type with only the header populated. It
// will only be called if the record's RDATA is non-empty.
unpack(msg []byte, off int) (off1 int, err error)
// parse parses an RR from zone file format.
//
// This will only be called on a new and empty RR type with only the header populated.
parse(c *zlexer, origin string) *ParseError
// isDuplicate returns whether the two RRs are duplicates.
isDuplicate(r2 RR) bool
}
// RR_Header is the header all DNS resource records share.
type RR_Header struct {
Name string `dns:"cdomain-name"`
Rrtype uint16
Class uint16
Ttl uint32
Rdlength uint16 // Length of data after header.
}
// Header returns itself. This is here to make RR_Header implements the RR interface.
func (h *RR_Header) Header() *RR_Header { return h }
// Just to implement the RR interface.
func (h *RR_Header) copy() RR { return nil }
func (h *RR_Header) String() string {
var s string
if h.Rrtype == TypeOPT {
s = ";"
// and maybe other things
}
s += sprintName(h.Name) + "\t"
s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
s += Class(h.Class).String() + "\t"
s += Type(h.Rrtype).String() + "\t"
return s
}
func (h *RR_Header) len(off int, compression map[string]struct{}) int {
l := domainNameLen(h.Name, off, compression, true)
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l
}
func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// RR_Header has no RDATA to pack.
return off, nil
}
func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
panic("dns: internal error: unpack should never be called on RR_Header")
}
func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
panic("dns: internal error: parse should never be called on RR_Header")
}
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, Len(r)*2)
headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
if err != nil {
return err
}
buf = buf[:off]
*rr = RFC3597{Hdr: *r.Header()}
rr.Hdr.Rdlength = uint16(off - headerEnd)
if noRdata(rr.Hdr) {
return nil
}
_, err = rr.unpack(buf, headerEnd)
if err != nil {
return err
}
return nil
}

794
vendor/github.com/miekg/dns/dnssec.go generated vendored Normal file
View File

@ -0,0 +1,794 @@
package dns
import (
"bytes"
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/elliptic"
_ "crypto/md5"
"crypto/rand"
"crypto/rsa"
_ "crypto/sha1"
_ "crypto/sha256"
_ "crypto/sha512"
"encoding/asn1"
"encoding/binary"
"encoding/hex"
"math/big"
"sort"
"strings"
"time"
"golang.org/x/crypto/ed25519"
)
// DNSSEC encryption algorithm codes.
const (
_ uint8 = iota
RSAMD5
DH
DSA
_ // Skip 4, RFC 6725, section 2.1
RSASHA1
DSANSEC3SHA1
RSASHA1NSEC3SHA1
RSASHA256
_ // Skip 9, RFC 6725, section 2.1
RSASHA512
_ // Skip 11, RFC 6725, section 2.1
ECCGOST
ECDSAP256SHA256
ECDSAP384SHA384
ED25519
ED448
INDIRECT uint8 = 252
PRIVATEDNS uint8 = 253 // Private (experimental keys)
PRIVATEOID uint8 = 254
)
// AlgorithmToString is a map of algorithm IDs to algorithm names.
var AlgorithmToString = map[uint8]string{
RSAMD5: "RSAMD5",
DH: "DH",
DSA: "DSA",
RSASHA1: "RSASHA1",
DSANSEC3SHA1: "DSA-NSEC3-SHA1",
RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
RSASHA256: "RSASHA256",
RSASHA512: "RSASHA512",
ECCGOST: "ECC-GOST",
ECDSAP256SHA256: "ECDSAP256SHA256",
ECDSAP384SHA384: "ECDSAP384SHA384",
ED25519: "ED25519",
ED448: "ED448",
INDIRECT: "INDIRECT",
PRIVATEDNS: "PRIVATEDNS",
PRIVATEOID: "PRIVATEOID",
}
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
DSA: crypto.SHA1,
RSASHA1: crypto.SHA1,
RSASHA1NSEC3SHA1: crypto.SHA1,
RSASHA256: crypto.SHA256,
ECDSAP256SHA256: crypto.SHA256,
ECDSAP384SHA384: crypto.SHA384,
RSASHA512: crypto.SHA512,
ED25519: crypto.Hash(0),
}
// DNSSEC hashing algorithm codes.
const (
_ uint8 = iota
SHA1 // RFC 4034
SHA256 // RFC 4509
GOST94 // RFC 5933
SHA384 // Experimental
SHA512 // Experimental
)
// HashToString is a map of hash IDs to names.
var HashToString = map[uint8]string{
SHA1: "SHA1",
SHA256: "SHA256",
GOST94: "GOST94",
SHA384: "SHA384",
SHA512: "SHA512",
}
// DNSKEY flag values.
const (
SEP = 1
REVOKE = 1 << 7
ZONE = 1 << 8
)
// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
type rrsigWireFmt struct {
TypeCovered uint16
Algorithm uint8
Labels uint8
OrigTtl uint32
Expiration uint32
Inception uint32
KeyTag uint16
SignerName string `dns:"domain-name"`
/* No Signature */
}
// Used for converting DNSKEY's rdata to wirefmt.
type dnskeyWireFmt struct {
Flags uint16
Protocol uint8
Algorithm uint8
PublicKey string `dns:"base64"`
/* Nothing is left out */
}
func divRoundUp(a, b int) int {
return (a + b - 1) / b
}
// KeyTag calculates the keytag (or key-id) of the DNSKEY.
func (k *DNSKEY) KeyTag() uint16 {
if k == nil {
return 0
}
var keytag int
switch k.Algorithm {
case RSAMD5:
// Look at the bottom two bytes of the modules, which the last
// item in the pubkey.
// This algorithm has been deprecated, but keep this key-tag calculation.
modulus, _ := fromBase64([]byte(k.PublicKey))
if len(modulus) > 1 {
x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
keytag = int(x)
}
default:
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return 0
}
wire = wire[:n]
for i, v := range wire {
if i&1 != 0 {
keytag += int(v) // must be larger than uint32
} else {
keytag += int(v) << 8
}
}
keytag += keytag >> 16 & 0xFFFF
keytag &= 0xFFFF
}
return uint16(keytag)
}
// ToDS converts a DNSKEY record to a DS record.
func (k *DNSKEY) ToDS(h uint8) *DS {
if k == nil {
return nil
}
ds := new(DS)
ds.Hdr.Name = k.Hdr.Name
ds.Hdr.Class = k.Hdr.Class
ds.Hdr.Rrtype = TypeDS
ds.Hdr.Ttl = k.Hdr.Ttl
ds.Algorithm = k.Algorithm
ds.DigestType = h
ds.KeyTag = k.KeyTag()
keywire := new(dnskeyWireFmt)
keywire.Flags = k.Flags
keywire.Protocol = k.Protocol
keywire.Algorithm = k.Algorithm
keywire.PublicKey = k.PublicKey
wire := make([]byte, DefaultMsgSize)
n, err := packKeyWire(keywire, wire)
if err != nil {
return nil
}
wire = wire[:n]
owner := make([]byte, 255)
off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
if err1 != nil {
return nil
}
owner = owner[:off]
// RFC4034:
// digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
// "|" denotes concatenation
// DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
var hash crypto.Hash
switch h {
case SHA1:
hash = crypto.SHA1
case SHA256:
hash = crypto.SHA256
case SHA384:
hash = crypto.SHA384
case SHA512:
hash = crypto.SHA512
default:
return nil
}
s := hash.New()
s.Write(owner)
s.Write(wire)
ds.Digest = hex.EncodeToString(s.Sum(nil))
return ds
}
// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
c := &CDNSKEY{DNSKEY: *k}
c.Hdr = k.Hdr
c.Hdr.Rrtype = TypeCDNSKEY
return c
}
// ToCDS converts a DS record to a CDS record.
func (d *DS) ToCDS() *CDS {
c := &CDS{DS: *d}
c.Hdr = d.Hdr
c.Hdr.Rrtype = TypeCDS
return c
}
// Sign signs an RRSet. The signature needs to be filled in with the values:
// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
// from the RRset. Sign returns a non-nill error when the signing went OK.
// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
// zero, it is used as-is, otherwise the TTL of the RRset is used as the
// OrigTTL.
func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
if k == nil {
return ErrPrivKey
}
// s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
h0 := rrset[0].Header()
rr.Hdr.Rrtype = TypeRRSIG
rr.Hdr.Name = h0.Name
rr.Hdr.Class = h0.Class
if rr.OrigTtl == 0 { // If set don't override
rr.OrigTtl = h0.Ttl
}
rr.TypeCovered = h0.Rrtype
rr.Labels = uint8(CountLabel(h0.Name))
if strings.HasPrefix(h0.Name, "*") {
rr.Labels-- // wildcard, remove from label count
}
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
// For signing, lowercase this name
sigwire.SignerName = strings.ToLower(rr.SignerName)
// Create the desired binary blob
signdata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signdata)
if err != nil {
return err
}
signdata = signdata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case ED25519:
// ed25519 signs the raw message and performs hashing internally.
// All other supported signature schemes operate over the pre-hashed
// message, and thus ed25519 must be handled separately here.
//
// The raw message is passed directly into sign and crypto.Hash(0) is
// used to signal to the crypto.Signer that the data has not been hashed.
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
case RSAMD5, DSA, DSANSEC3SHA1:
// See RFC 6944.
return ErrAlg
default:
h := hash.New()
h.Write(signdata)
h.Write(wire)
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
if err != nil {
return err
}
rr.Signature = toBase64(signature)
}
return nil
}
func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
signature, err := k.Sign(rand.Reader, hashed, hash)
if err != nil {
return nil, err
}
switch alg {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
return signature, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
ecdsaSignature := &struct {
R, S *big.Int
}{}
if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
return nil, err
}
var intlen int
switch alg {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
signature := intToBytes(ecdsaSignature.R, intlen)
signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
return signature, nil
// There is no defined interface for what a DSA backed crypto.Signer returns
case DSA, DSANSEC3SHA1:
// t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
// signature := []byte{byte(t)}
// signature = append(signature, intToBytes(r1, 20)...)
// signature = append(signature, intToBytes(s1, 20)...)
// rr.Signature = signature
case ED25519:
return signature, nil
}
return nil, ErrAlg
}
// Verify validates an RRSet with the signature and key. This is only the
// cryptographic test, the signature validity period must be checked separately.
// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
// First the easy checks
if !IsRRset(rrset) {
return ErrRRset
}
if rr.KeyTag != k.KeyTag() {
return ErrKey
}
if rr.Hdr.Class != k.Hdr.Class {
return ErrKey
}
if rr.Algorithm != k.Algorithm {
return ErrKey
}
if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
return ErrKey
}
if k.Protocol != 3 {
return ErrKey
}
// IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type and
// class matches the RRSIG record.
if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
return ErrRRset
}
// RFC 4035 5.3.2. Reconstructing the Signed Data
// Copy the sig, except the rrsig data
sigwire := new(rrsigWireFmt)
sigwire.TypeCovered = rr.TypeCovered
sigwire.Algorithm = rr.Algorithm
sigwire.Labels = rr.Labels
sigwire.OrigTtl = rr.OrigTtl
sigwire.Expiration = rr.Expiration
sigwire.Inception = rr.Inception
sigwire.KeyTag = rr.KeyTag
sigwire.SignerName = strings.ToLower(rr.SignerName)
// Create the desired binary blob
signeddata := make([]byte, DefaultMsgSize)
n, err := packSigWire(sigwire, signeddata)
if err != nil {
return err
}
signeddata = signeddata[:n]
wire, err := rawSignatureData(rrset, rr)
if err != nil {
return err
}
sigbuf := rr.sigBuf() // Get the binary signature data
if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
// TODO(miek)
// remove the domain name and assume its ours?
}
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return ErrAlg
}
switch rr.Algorithm {
case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
// TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
pubkey := k.publicKeyRSA() // Get the key
if pubkey == nil {
return ErrKey
}
h := hash.New()
h.Write(signeddata)
h.Write(wire)
return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
case ECDSAP256SHA256, ECDSAP384SHA384:
pubkey := k.publicKeyECDSA()
if pubkey == nil {
return ErrKey
}
// Split sigbuf into the r and s coordinates
r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
h := hash.New()
h.Write(signeddata)
h.Write(wire)
if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
return nil
}
return ErrSig
case ED25519:
pubkey := k.publicKeyED25519()
if pubkey == nil {
return ErrKey
}
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
return nil
}
return ErrSig
default:
return ErrAlg
}
}
// ValidityPeriod uses RFC1982 serial arithmetic to calculate
// if a signature period is valid. If t is the zero time, the
// current time is taken other t is. Returns true if the signature
// is valid at the given time, otherwise returns false.
func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
var utc int64
if t.IsZero() {
utc = time.Now().UTC().Unix()
} else {
utc = t.UTC().Unix()
}
modi := (int64(rr.Inception) - utc) / year68
mode := (int64(rr.Expiration) - utc) / year68
ti := int64(rr.Inception) + modi*year68
te := int64(rr.Expiration) + mode*year68
return ti <= utc && utc <= te
}
// Return the signatures base64 encodedig sigdata as a byte slice.
func (rr *RRSIG) sigBuf() []byte {
sigbuf, err := fromBase64([]byte(rr.Signature))
if err != nil {
return nil
}
return sigbuf
}
// publicKeyRSA returns the RSA public key from a DNSKEY record.
func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) < 1+1+64 {
// Exponent must be at least 1 byte and modulus at least 64
return nil
}
// RFC 2537/3110, section 2. RSA Public KEY Resource Records
// Length is in the 0th byte, unless its zero, then it
// it in bytes 1 and 2 and its a 16 bit number
explen := uint16(keybuf[0])
keyoff := 1
if explen == 0 {
explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
keyoff = 3
}
if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
// Exponent larger than supported by the crypto package,
// empty, or contains prohibited leading zero.
return nil
}
modoff := keyoff + int(explen)
modlen := len(keybuf) - modoff
if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
// Modulus is too small, large, or contains prohibited leading zero.
return nil
}
pubkey := new(rsa.PublicKey)
var expo uint64
// The exponent of length explen is between keyoff and modoff.
for _, v := range keybuf[keyoff:modoff] {
expo <<= 8
expo |= uint64(v)
}
if expo > 1<<31-1 {
// Larger exponent than supported by the crypto package.
return nil
}
pubkey.E = int(expo)
pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
return pubkey
}
// publicKeyECDSA returns the Curve public key from the DNSKEY record.
func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
pubkey := new(ecdsa.PublicKey)
switch k.Algorithm {
case ECDSAP256SHA256:
pubkey.Curve = elliptic.P256()
if len(keybuf) != 64 {
// wrongly encoded key
return nil
}
case ECDSAP384SHA384:
pubkey.Curve = elliptic.P384()
if len(keybuf) != 96 {
// Wrongly encoded key
return nil
}
}
pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
return pubkey
}
func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) < 22 {
return nil
}
t, keybuf := int(keybuf[0]), keybuf[1:]
size := 64 + t*8
q, keybuf := keybuf[:20], keybuf[20:]
if len(keybuf) != 3*size {
return nil
}
p, keybuf := keybuf[:size], keybuf[size:]
g, y := keybuf[:size], keybuf[size:]
pubkey := new(dsa.PublicKey)
pubkey.Parameters.Q = new(big.Int).SetBytes(q)
pubkey.Parameters.P = new(big.Int).SetBytes(p)
pubkey.Parameters.G = new(big.Int).SetBytes(g)
pubkey.Y = new(big.Int).SetBytes(y)
return pubkey
}
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
keybuf, err := fromBase64([]byte(k.PublicKey))
if err != nil {
return nil
}
if len(keybuf) != ed25519.PublicKeySize {
return nil
}
return keybuf
}
type wireSlice [][]byte
func (p wireSlice) Len() int { return len(p) }
func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p wireSlice) Less(i, j int) bool {
_, ioff, _ := UnpackDomainName(p[i], 0)
_, joff, _ := UnpackDomainName(p[j], 0)
return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
}
// Return the raw signature data.
func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
wires := make(wireSlice, len(rrset))
for i, r := range rrset {
r1 := r.copy()
h := r1.Header()
h.Ttl = s.OrigTtl
labels := SplitDomainName(h.Name)
// 6.2. Canonical RR Form. (4) - wildcards
if len(labels) > int(s.Labels) {
// Wildcard
h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
}
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
h.Name = strings.ToLower(h.Name)
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
// SRV, DNAME, A6
//
// RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
// Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
// that needs conversion to lowercase, and twice at that. Since HINFO
// records contain no domain names, they are not subject to case
// conversion.
switch x := r1.(type) {
case *NS:
x.Ns = strings.ToLower(x.Ns)
case *MD:
x.Md = strings.ToLower(x.Md)
case *MF:
x.Mf = strings.ToLower(x.Mf)
case *CNAME:
x.Target = strings.ToLower(x.Target)
case *SOA:
x.Ns = strings.ToLower(x.Ns)
x.Mbox = strings.ToLower(x.Mbox)
case *MB:
x.Mb = strings.ToLower(x.Mb)
case *MG:
x.Mg = strings.ToLower(x.Mg)
case *MR:
x.Mr = strings.ToLower(x.Mr)
case *PTR:
x.Ptr = strings.ToLower(x.Ptr)
case *MINFO:
x.Rmail = strings.ToLower(x.Rmail)
x.Email = strings.ToLower(x.Email)
case *MX:
x.Mx = strings.ToLower(x.Mx)
case *RP:
x.Mbox = strings.ToLower(x.Mbox)
x.Txt = strings.ToLower(x.Txt)
case *AFSDB:
x.Hostname = strings.ToLower(x.Hostname)
case *RT:
x.Host = strings.ToLower(x.Host)
case *SIG:
x.SignerName = strings.ToLower(x.SignerName)
case *PX:
x.Map822 = strings.ToLower(x.Map822)
x.Mapx400 = strings.ToLower(x.Mapx400)
case *NAPTR:
x.Replacement = strings.ToLower(x.Replacement)
case *KX:
x.Exchanger = strings.ToLower(x.Exchanger)
case *SRV:
x.Target = strings.ToLower(x.Target)
case *DNAME:
x.Target = strings.ToLower(x.Target)
}
// 6.2. Canonical RR Form. (5) - origTTL
wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
off, err1 := PackRR(r1, wire, 0, nil, false)
if err1 != nil {
return nil, err1
}
wire = wire[:off]
wires[i] = wire
}
sort.Sort(wires)
for i, wire := range wires {
if i > 0 && bytes.Equal(wire, wires[i-1]) {
continue
}
buf = append(buf, wire...)
}
return buf, nil
}
func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go RRSIG packing
off, err := packUint16(sw.TypeCovered, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(sw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(sw.Labels, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.OrigTtl, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Expiration, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(sw.Inception, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(sw.KeyTag, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
if err != nil {
return off, err
}
return off, nil
}
func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
// copied from zmsg.go DNSKEY packing
off, err := packUint16(dw.Flags, msg, 0)
if err != nil {
return off, err
}
off, err = packUint8(dw.Protocol, msg, off)
if err != nil {
return off, err
}
off, err = packUint8(dw.Algorithm, msg, off)
if err != nil {
return off, err
}
off, err = packStringBase64(dw.PublicKey, msg, off)
if err != nil {
return off, err
}
return off, nil
}

140
vendor/github.com/miekg/dns/dnssec_keygen.go generated vendored Normal file
View File

@ -0,0 +1,140 @@
package dns
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"math/big"
"golang.org/x/crypto/ed25519"
)
// Generate generates a DNSKEY of the given bit size.
// The public part is put inside the DNSKEY record.
// The Algorithm in the key must be set as this will define
// what kind of DNSKEY will be generated.
// The ECDSA algorithms imply a fixed keysize, in that case
// bits should be set to the size of the algorithm.
func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
switch k.Algorithm {
case RSAMD5, DSA, DSANSEC3SHA1:
return nil, ErrAlg
case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
if bits < 512 || bits > 4096 {
return nil, ErrKeySize
}
case RSASHA512:
if bits < 1024 || bits > 4096 {
return nil, ErrKeySize
}
case ECDSAP256SHA256:
if bits != 256 {
return nil, ErrKeySize
}
case ECDSAP384SHA384:
if bits != 384 {
return nil, ErrKeySize
}
case ED25519:
if bits != 256 {
return nil, ErrKeySize
}
}
switch k.Algorithm {
case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
priv, err := rsa.GenerateKey(rand.Reader, bits)
if err != nil {
return nil, err
}
k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
return priv, nil
case ECDSAP256SHA256, ECDSAP384SHA384:
var c elliptic.Curve
switch k.Algorithm {
case ECDSAP256SHA256:
c = elliptic.P256()
case ECDSAP384SHA384:
c = elliptic.P384()
}
priv, err := ecdsa.GenerateKey(c, rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
return priv, nil
case ED25519:
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
return nil, err
}
k.setPublicKeyED25519(pub)
return priv, nil
default:
return nil, ErrAlg
}
}
// Set the public key (the value E and N)
func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
if _E == 0 || _N == nil {
return false
}
buf := exponentToBuf(_E)
buf = append(buf, _N.Bytes()...)
k.PublicKey = toBase64(buf)
return true
}
// Set the public key for Elliptic Curves
func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
if _X == nil || _Y == nil {
return false
}
var intlen int
switch k.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
return true
}
// Set the public key for Ed25519
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
if _K == nil {
return false
}
k.PublicKey = toBase64(_K)
return true
}
// Set the public key (the values E and N) for RSA
// RFC 3110: Section 2. RSA Public KEY Resource Records
func exponentToBuf(_E int) []byte {
var buf []byte
i := big.NewInt(int64(_E)).Bytes()
if len(i) < 256 {
buf = make([]byte, 1, 1+len(i))
buf[0] = uint8(len(i))
} else {
buf = make([]byte, 3, 3+len(i))
buf[0] = 0
buf[1] = uint8(len(i) >> 8)
buf[2] = uint8(len(i))
}
buf = append(buf, i...)
return buf
}
// Set the public key for X and Y for Curve. The two
// values are just concatenated.
func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
buf := intToBytes(_X, intlen)
buf = append(buf, intToBytes(_Y, intlen)...)
return buf
}

322
vendor/github.com/miekg/dns/dnssec_keyscan.go generated vendored Normal file
View File

@ -0,0 +1,322 @@
package dns
import (
"bufio"
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"io"
"math/big"
"strconv"
"strings"
"golang.org/x/crypto/ed25519"
)
// NewPrivateKey returns a PrivateKey by parsing the string s.
// s should be in the same form of the BIND private key files.
func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
}
return k.ReadPrivateKey(strings.NewReader(s), "")
}
// ReadPrivateKey reads a private key from the io.Reader q. The string file is
// only used in error reporting.
// The public key must be known, because some cryptographic algorithms embed
// the public inside the privatekey.
func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
m, err := parseKey(q, file)
if m == nil {
return nil, err
}
if _, ok := m["private-key-format"]; !ok {
return nil, ErrPrivKey
}
if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
return nil, ErrPrivKey
}
// TODO(mg): check if the pubkey matches the private key
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
if err != nil {
return nil, ErrPrivKey
}
switch uint8(algo) {
case RSAMD5, DSA, DSANSEC3SHA1:
return nil, ErrAlg
case RSASHA1:
fallthrough
case RSASHA1NSEC3SHA1:
fallthrough
case RSASHA256:
fallthrough
case RSASHA512:
priv, err := readPrivateKeyRSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyRSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ECCGOST:
return nil, ErrPrivKey
case ECDSAP256SHA256:
fallthrough
case ECDSAP384SHA384:
priv, err := readPrivateKeyECDSA(m)
if err != nil {
return nil, err
}
pub := k.publicKeyECDSA()
if pub == nil {
return nil, ErrKey
}
priv.PublicKey = *pub
return priv, nil
case ED25519:
return readPrivateKeyED25519(m)
default:
return nil, ErrPrivKey
}
}
// Read a private key (file) string and create a public key. Return the private key.
func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
p := new(rsa.PrivateKey)
p.Primes = []*big.Int{nil, nil}
for k, v := range m {
switch k {
case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
switch k {
case "modulus":
p.PublicKey.N = new(big.Int).SetBytes(v1)
case "publicexponent":
i := new(big.Int).SetBytes(v1)
p.PublicKey.E = int(i.Int64()) // int64 should be large enough
case "privateexponent":
p.D = new(big.Int).SetBytes(v1)
case "prime1":
p.Primes[0] = new(big.Int).SetBytes(v1)
case "prime2":
p.Primes[1] = new(big.Int).SetBytes(v1)
}
case "exponent1", "exponent2", "coefficient":
// not used in Go (yet)
case "created", "publish", "activate":
// not used in Go (yet)
}
}
return p, nil
}
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
p := new(ecdsa.PrivateKey)
p.D = new(big.Int)
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
v1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
p.D.SetBytes(v1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
var p ed25519.PrivateKey
// TODO: validate that the required flags are present
for k, v := range m {
switch k {
case "privatekey":
p1, err := fromBase64([]byte(v))
if err != nil {
return nil, err
}
if len(p1) != ed25519.SeedSize {
return nil, ErrPrivKey
}
p = ed25519.NewKeyFromSeed(p1)
case "created", "publish", "activate":
/* not used in Go (yet) */
}
}
return p, nil
}
// parseKey reads a private key from r. It returns a map[string]string,
// with the key-value pairs, or an error when the file is not correct.
func parseKey(r io.Reader, file string) (map[string]string, error) {
m := make(map[string]string)
var k string
c := newKLexer(r)
for l, ok := c.Next(); ok; l, ok = c.Next() {
// It should alternate
switch l.value {
case zKey:
k = l.token
case zValue:
if k == "" {
return nil, &ParseError{file, "no private key seen", l}
}
m[strings.ToLower(k)] = l.token
k = ""
}
}
// Surface any read errors from r.
if err := c.Err(); err != nil {
return nil, &ParseError{file: file, err: err.Error()}
}
return m, nil
}
type klexer struct {
br io.ByteReader
readErr error
line int
column int
key bool
eol bool // end-of-line
}
func newKLexer(r io.Reader) *klexer {
br, ok := r.(io.ByteReader)
if !ok {
br = bufio.NewReaderSize(r, 1024)
}
return &klexer{
br: br,
line: 1,
key: true,
}
}
func (kl *klexer) Err() error {
if kl.readErr == io.EOF {
return nil
}
return kl.readErr
}
// readByte returns the next byte from the input
func (kl *klexer) readByte() (byte, bool) {
if kl.readErr != nil {
return 0, false
}
c, err := kl.br.ReadByte()
if err != nil {
kl.readErr = err
return 0, false
}
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
if kl.eol {
kl.line++
kl.column = 0
kl.eol = false
}
if c == '\n' {
kl.eol = true
} else {
kl.column++
}
return c, true
}
func (kl *klexer) Next() (lex, bool) {
var (
l lex
str strings.Builder
commt bool
)
for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
l.line, l.column = kl.line, kl.column
switch x {
case ':':
if commt || !kl.key {
break
}
kl.key = false
// Next token is a space, eat it
kl.readByte()
l.value = zKey
l.token = str.String()
return l, true
case ';':
commt = true
case '\n':
if commt {
// Reset a comment
commt = false
}
if kl.key && str.Len() == 0 {
// ignore empty lines
break
}
kl.key = true
l.value = zValue
l.token = str.String()
return l, true
default:
if commt {
break
}
str.WriteByte(x)
}
}
if kl.readErr != nil && kl.readErr != io.EOF {
// Don't return any tokens after a read error occurs.
return lex{value: zEOF}, false
}
if str.Len() > 0 {
// Send remainder
l.value = zValue
l.token = str.String()
return l, true
}
return lex{value: zEOF}, false
}

94
vendor/github.com/miekg/dns/dnssec_privkey.go generated vendored Normal file
View File

@ -0,0 +1,94 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"math/big"
"strconv"
"golang.org/x/crypto/ed25519"
)
const format = "Private-key-format: v1.3\n"
var bigIntOne = big.NewInt(1)
// PrivateKeyString converts a PrivateKey to a string. This string has the same
// format as the private-key-file of BIND9 (Private-key-format: v1.3).
// It needs some info from the key (the algorithm), so its a method of the DNSKEY
// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
algorithm := strconv.Itoa(int(r.Algorithm))
algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
switch p := p.(type) {
case *rsa.PrivateKey:
modulus := toBase64(p.PublicKey.N.Bytes())
e := big.NewInt(int64(p.PublicKey.E))
publicExponent := toBase64(e.Bytes())
privateExponent := toBase64(p.D.Bytes())
prime1 := toBase64(p.Primes[0].Bytes())
prime2 := toBase64(p.Primes[1].Bytes())
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
// and from: http://code.google.com/p/go/issues/detail?id=987
p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
exp1 := new(big.Int).Mod(p.D, p1)
exp2 := new(big.Int).Mod(p.D, q1)
coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
exponent1 := toBase64(exp1.Bytes())
exponent2 := toBase64(exp2.Bytes())
coefficient := toBase64(coeff.Bytes())
return format +
"Algorithm: " + algorithm + "\n" +
"Modulus: " + modulus + "\n" +
"PublicExponent: " + publicExponent + "\n" +
"PrivateExponent: " + privateExponent + "\n" +
"Prime1: " + prime1 + "\n" +
"Prime2: " + prime2 + "\n" +
"Exponent1: " + exponent1 + "\n" +
"Exponent2: " + exponent2 + "\n" +
"Coefficient: " + coefficient + "\n"
case *ecdsa.PrivateKey:
var intlen int
switch r.Algorithm {
case ECDSAP256SHA256:
intlen = 32
case ECDSAP384SHA384:
intlen = 48
}
private := toBase64(intToBytes(p.D, intlen))
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
case *dsa.PrivateKey:
T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
priv := toBase64(intToBytes(p.X, 20))
pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
return format +
"Algorithm: " + algorithm + "\n" +
"Prime(p): " + prime + "\n" +
"Subprime(q): " + subprime + "\n" +
"Base(g): " + base + "\n" +
"Private_value(x): " + priv + "\n" +
"Public_value(y): " + pub + "\n"
case ed25519.PrivateKey:
private := toBase64(p.Seed())
return format +
"Algorithm: " + algorithm + "\n" +
"PrivateKey: " + private + "\n"
default:
return ""
}
}

268
vendor/github.com/miekg/dns/doc.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
/*
Package dns implements a full featured interface to the Domain Name System.
Both server- and client-side programming is supported. The package allows
complete control over what is sent out to the DNS. The API follows the
less-is-more principle, by presenting a small, clean interface.
It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
Note that domain names MUST be fully qualified before sending them, unqualified
names in a message will result in a packing failure.
Resource records are native types. They are not stored in wire format. Basic
usage pattern for creating a new resource record:
r := new(dns.MX)
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
r.Preference = 10
r.Mx = "mx.miek.nl."
Or directly from a string:
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
Or even:
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
In the DNS messages are exchanged, these messages contain resource records
(sets). Use pattern for creating a message:
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
Or when not certain if the domain name is fully qualified:
m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
The message m is now a message with the question section set to ask the MX
records for the miek.nl. zone.
The following is slightly more verbose, but more flexible:
m1 := new(dns.Msg)
m1.Id = dns.Id()
m1.RecursionDesired = true
m1.Question = make([]dns.Question, 1)
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
After creating a message it can be sent. Basic use pattern for synchronous
querying the DNS at a server configured on 127.0.0.1 and port 53:
c := new(dns.Client)
in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
Suppressing multiple outstanding queries (with the same question, type and
class) is as easy as setting:
c.SingleInflight = true
More advanced options are available using a net.Dialer and the corresponding API.
For example it is possible to set a timeout, or to specify a source IP address
and port to use for the connection:
c := new(dns.Client)
laddr := net.UDPAddr{
IP: net.ParseIP("[::1]"),
Port: 12345,
Zone: "",
}
c.Dialer := &net.Dialer{
Timeout: 200 * time.Millisecond,
LocalAddr: &laddr,
}
in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
If these "advanced" features are not needed, a simple UDP query can be sent,
with:
in, err := dns.Exchange(m1, "127.0.0.1:53")
When this functions returns you will get DNS message. A DNS message consists
out of four sections.
The question section: in.Question, the answer section: in.Answer,
the authority section: in.Ns and the additional section: in.Extra.
Each of these sections (except the Question section) contain a []RR. Basic
use pattern for accessing the rdata of a TXT RR as the first RR in
the Answer section:
if t, ok := in.Answer[0].(*dns.TXT); ok {
// do something with t.Txt
}
Domain Name and TXT Character String Representations
Both domain names and TXT character strings are converted to presentation form
both when unpacked and when converted to strings.
For TXT character strings, tabs, carriage returns and line feeds will be
converted to \t, \r and \n respectively. Back slashes and quotations marks will
be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
For domain names, in addition to the above rules brackets, periods, spaces,
semicolons and the at symbol are escaped.
DNSSEC
DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
public key cryptography to sign resource records. The public keys are stored in
DNSKEY records and the signatures in RRSIG records.
Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
bit to a request.
m := new(dns.Msg)
m.SetEdns0(4096, true)
Signature generation, signature verification and key generation are all supported.
DYNAMIC UPDATES
Dynamic updates reuses the DNS message format, but renames three of the
sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
the Additional is not renamed. See RFC 2136 for the gory details.
You can set a rather complex set of rules for the existence of absence of
certain resource records or names in a zone to specify if resource records
should be added or removed. The table from RFC 2136 supplemented with the Go
DNS function shows which functions exist to specify the prerequisites.
3.2.4 - Table Of Metavalues Used In Prerequisite Section
CLASS TYPE RDATA Meaning Function
--------------------------------------------------------------
ANY ANY empty Name is in use dns.NameUsed
ANY rrset empty RRset exists (value indep) dns.RRsetUsed
NONE ANY empty Name is not in use dns.NameNotUsed
NONE rrset empty RRset does not exist dns.RRsetNotUsed
zone rrset rr RRset exists (value dep) dns.Used
The prerequisite section can also be left empty. If you have decided on the
prerequisites you can tell what RRs should be added or deleted. The next table
shows the options you have and what functions to call.
3.4.2.6 - Table Of Metavalues Used In Update Section
CLASS TYPE RDATA Meaning Function
---------------------------------------------------------------
ANY ANY empty Delete all RRsets from name dns.RemoveName
ANY rrset empty Delete an RRset dns.RemoveRRset
NONE rrset rr Delete an RR from RRset dns.Remove
zone rrset rr Add to an RRset dns.Insert
TRANSACTION SIGNATURE
An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
Basic use pattern when querying with a TSIG name "axfr." (note that these key names
must be fully qualified - as they are domain names) and the base64 secret
"so6ZGir4GPAqINNh9U5c3A==":
If an incoming message contains a TSIG record it MUST be the last record in
the additional section (RFC2845 3.2). This means that you should make the
call to SetTsig last, right before executing the query. If you make any
changes to the RRset after calling SetTsig() the signature will be incorrect.
c := new(dns.Client)
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m := new(dns.Msg)
m.SetQuestion("miek.nl.", dns.TypeMX)
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
...
// When sending the TSIG RR is calculated and filled in before sending
When requesting an zone transfer (almost all TSIG usage is when requesting zone
transfers), with TSIG, this is the basic use pattern. In this example we
request an AXFR for miek.nl. with TSIG key named "axfr." and secret
"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
t := new(dns.Transfer)
m := new(dns.Msg)
t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
m.SetAxfr("miek.nl.")
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
c, err := t.In(m, "176.58.119.54:53")
for r := range c { ... }
You can now read the records from the transfer as they come in. Each envelope
is checked with TSIG. If something is not correct an error is returned.
Basic use pattern validating and replying to a message that has TSIG set.
server := &dns.Server{Addr: ":53", Net: "udp"}
server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
go server.ListenAndServe()
dns.HandleFunc(".", handleRequest)
func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
m := new(dns.Msg)
m.SetReply(r)
if r.IsTsig() != nil {
if w.TsigStatus() == nil {
// *Msg r has an TSIG record and it was validated
m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
} else {
// *Msg r has an TSIG records and it was not valided
}
}
w.WriteMsg(m)
}
PRIVATE RRS
RFC 6895 sets aside a range of type codes for private use. This range is 65,280
- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
can be used, before requesting an official type code from IANA.
See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
information.
EDNS0
EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
RFC 6891. It defines an new RR type, the OPT RR, which is then completely
abused.
Basic use pattern for creating an (empty) OPT RR:
o := new(dns.OPT)
o.Hdr.Name = "." // MUST be the root zone, per definition.
o.Hdr.Rrtype = dns.TypeOPT
The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR.
Basic use pattern for a server to check if (and which) options are set:
// o is a dns.OPT
for _, s := range o.Option {
switch e := s.(type) {
case *dns.EDNS0_NSID:
// do stuff with e.Nsid
case *dns.EDNS0_SUBNET:
// access e.Family, e.Address, etc.
}
}
SIG(0)
From RFC 2931:
SIG(0) provides protection for DNS transactions and requests ....
... protection for glue records, DNS requests, protection for message headers
on requests and responses, and protection of the overall integrity of a response.
It works like TSIG, except that SIG(0) uses public key cryptography, instead of
the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
Signing subsequent messages in multi-message sessions is not implemented.
*/
package dns

38
vendor/github.com/miekg/dns/duplicate.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package dns
//go:generate go run duplicate_generate.go
// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
// So this means the header data is equal *and* the RDATA is the same. Return true
// is so, otherwise false.
// It's a protocol violation to have identical RRs in a message.
func IsDuplicate(r1, r2 RR) bool {
// Check whether the record header is identical.
if !r1.Header().isDuplicate(r2.Header()) {
return false
}
// Check whether the RDATA is identical.
return r1.isDuplicate(r2)
}
func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RR_Header)
if !ok {
return false
}
if r1.Class != r2.Class {
return false
}
if r1.Rrtype != r2.Rrtype {
return false
}
if !isDuplicateName(r1.Name, r2.Name) {
return false
}
// ignore TTL
return true
}
// isDuplicateName checks if the domain names s1 and s2 are equal.
func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }

675
vendor/github.com/miekg/dns/edns.go generated vendored Normal file
View File

@ -0,0 +1,675 @@
package dns
import (
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"net"
"strconv"
)
// EDNS0 Option codes.
const (
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
EDNS0NSID = 0x3 // nsid (See RFC 5001)
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
EDNS0DHU = 0x6 // DS Hash Understood
EDNS0N3U = 0x7 // NSEC3 Hash Understood
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
EDNS0EXPIRE = 0x9 // EDNS0 expire
EDNS0COOKIE = 0xa // EDNS0 Cookie
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
_DO = 1 << 15 // DNSSEC OK
)
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
// See RFC 6891.
type OPT struct {
Hdr RR_Header
Option []EDNS0 `dns:"opt"`
}
func (rr *OPT) String() string {
s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
if rr.Do() {
s += "flags: do; "
} else {
s += "flags: ; "
}
s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
for _, o := range rr.Option {
switch o.(type) {
case *EDNS0_NSID:
s += "\n; NSID: " + o.String()
h, e := o.pack()
var r string
if e == nil {
for _, c := range h {
r += "(" + string(c) + ")"
}
s += " " + r
}
case *EDNS0_SUBNET:
s += "\n; SUBNET: " + o.String()
case *EDNS0_COOKIE:
s += "\n; COOKIE: " + o.String()
case *EDNS0_UL:
s += "\n; UPDATE LEASE: " + o.String()
case *EDNS0_LLQ:
s += "\n; LONG LIVED QUERIES: " + o.String()
case *EDNS0_DAU:
s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
case *EDNS0_DHU:
s += "\n; DS HASH UNDERSTOOD: " + o.String()
case *EDNS0_N3U:
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
case *EDNS0_LOCAL:
s += "\n; LOCAL OPT: " + o.String()
case *EDNS0_PADDING:
s += "\n; PADDING: " + o.String()
}
}
return s
}
func (rr *OPT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, o := range rr.Option {
l += 4 // Account for 2-byte option code and 2-byte option length.
lo, _ := o.pack()
l += len(lo)
}
return l
}
func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
panic("dns: internal error: parse should never be called on OPT")
}
func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
// return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined.
func (rr *OPT) Version() uint8 {
return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
}
// SetVersion sets the version of EDNS. This is usually zero.
func (rr *OPT) SetVersion(v uint8) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
}
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
func (rr *OPT) ExtendedRcode() int {
return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
}
// SetExtendedRcode sets the EDNS extended RCODE field.
//
// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
func (rr *OPT) SetExtendedRcode(v uint16) {
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
}
// UDPSize returns the UDP buffer size.
func (rr *OPT) UDPSize() uint16 {
return rr.Hdr.Class
}
// SetUDPSize sets the UDP buffer size.
func (rr *OPT) SetUDPSize(size uint16) {
rr.Hdr.Class = size
}
// Do returns the value of the DO (DNSSEC OK) bit.
func (rr *OPT) Do() bool {
return rr.Hdr.Ttl&_DO == _DO
}
// SetDo sets the DO (DNSSEC OK) bit.
// If we pass an argument, set the DO bit to that value.
// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
func (rr *OPT) SetDo(do ...bool) {
if len(do) == 1 {
if do[0] {
rr.Hdr.Ttl |= _DO
} else {
rr.Hdr.Ttl &^= _DO
}
} else {
rr.Hdr.Ttl |= _DO
}
}
// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
type EDNS0 interface {
// Option returns the option code for the option.
Option() uint16
// pack returns the bytes of the option data.
pack() ([]byte, error)
// unpack sets the data as found in the buffer. Is also sets
// the length of the slice as the length of the option data.
unpack([]byte) error
// String returns the string representation of the option.
String() string
// copy returns a deep-copy of the option.
copy() EDNS0
}
// EDNS0_NSID option is used to retrieve a nameserver
// identifier. When sending a request Nsid must be set to the empty string
// The identifier is an opaque string encoded as hex.
// Basic use pattern for creating an nsid option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_NSID)
// e.Code = dns.EDNS0NSID
// e.Nsid = "AA"
// o.Option = append(o.Option, e)
type EDNS0_NSID struct {
Code uint16 // Always EDNS0NSID
Nsid string // This string needs to be hex encoded
}
func (e *EDNS0_NSID) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Nsid)
if err != nil {
return nil, err
}
return h, nil
}
// Option implements the EDNS0 interface.
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
func (e *EDNS0_NSID) String() string { return e.Nsid }
func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} }
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
// an idea of where the client lives. See RFC 7871. It can then give back a different
// answer depending on the location or network topology.
// Basic use pattern for creating an subnet option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_SUBNET)
// e.Code = dns.EDNS0SUBNET
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
// e.SourceScope = 0
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
// o.Option = append(o.Option, e)
//
// This code will parse all the available bits when unpacking (up to optlen).
// When packing it will apply SourceNetmask. If you need more advanced logic,
// patches welcome and good luck.
type EDNS0_SUBNET struct {
Code uint16 // Always EDNS0SUBNET
Family uint16 // 1 for IP, 2 for IP6
SourceNetmask uint8
SourceScope uint8
Address net.IP
}
// Option implements the EDNS0 interface.
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint16(b[0:], e.Family)
b[2] = e.SourceNetmask
b[3] = e.SourceScope
switch e.Family {
case 0:
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// We might don't need to complain either
if e.SourceNetmask != 0 {
return nil, errors.New("dns: bad address family")
}
case 1:
if e.SourceNetmask > net.IPv4len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address.To4()) != net.IPv4len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
case 2:
if e.SourceNetmask > net.IPv6len*8 {
return nil, errors.New("dns: bad netmask")
}
if len(e.Address) != net.IPv6len {
return nil, errors.New("dns: bad address")
}
ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
b = append(b, ip[:needLength]...)
default:
return nil, errors.New("dns: bad address family")
}
return b, nil
}
func (e *EDNS0_SUBNET) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Family = binary.BigEndian.Uint16(b)
e.SourceNetmask = b[2]
e.SourceScope = b[3]
switch e.Family {
case 0:
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
// It's okay to accept such a packet
if e.SourceNetmask != 0 {
return errors.New("dns: bad address family")
}
e.Address = net.IPv4(0, 0, 0, 0)
case 1:
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
return errors.New("dns: bad netmask")
}
addr := make(net.IP, net.IPv4len)
copy(addr, b[4:])
e.Address = addr.To16()
case 2:
if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
return errors.New("dns: bad netmask")
}
addr := make(net.IP, net.IPv6len)
copy(addr, b[4:])
e.Address = addr
default:
return errors.New("dns: bad address family")
}
return nil
}
func (e *EDNS0_SUBNET) String() (s string) {
if e.Address == nil {
s = "<nil>"
} else if e.Address.To4() != nil {
s = e.Address.String()
} else {
s = "[" + e.Address.String() + "]"
}
s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
return
}
func (e *EDNS0_SUBNET) copy() EDNS0 {
return &EDNS0_SUBNET{
e.Code,
e.Family,
e.SourceNetmask,
e.SourceScope,
e.Address,
}
}
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_COOKIE)
// e.Code = dns.EDNS0COOKIE
// e.Cookie = "24a5ac.."
// o.Option = append(o.Option, e)
//
// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
// always 8 bytes. It may then optionally be followed by the server cookie. The server
// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
//
// cCookie := o.Cookie[:16]
// sCookie := o.Cookie[16:]
//
// There is no guarantee that the Cookie string has a specific length.
type EDNS0_COOKIE struct {
Code uint16 // Always EDNS0COOKIE
Cookie string // Hex-encoded cookie data
}
func (e *EDNS0_COOKIE) pack() ([]byte, error) {
h, err := hex.DecodeString(e.Cookie)
if err != nil {
return nil, err
}
return h, nil
}
// Option implements the EDNS0 interface.
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} }
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
// an expiration on an update RR. This is helpful for clients that cannot clean
// up after themselves. This is a draft RFC and more information can be found at
// https://tools.ietf.org/html/draft-sekar-dns-ul-02
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_UL)
// e.Code = dns.EDNS0UL
// e.Lease = 120 // in seconds
// o.Option = append(o.Option, e)
type EDNS0_UL struct {
Code uint16 // Always EDNS0UL
Lease uint32
KeyLease uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) }
func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} }
// Copied: http://golang.org/src/pkg/net/dnsmsg.go
func (e *EDNS0_UL) pack() ([]byte, error) {
var b []byte
if e.KeyLease == 0 {
b = make([]byte, 4)
} else {
b = make([]byte, 8)
binary.BigEndian.PutUint32(b[4:], e.KeyLease)
}
binary.BigEndian.PutUint32(b, e.Lease)
return b, nil
}
func (e *EDNS0_UL) unpack(b []byte) error {
switch len(b) {
case 4:
e.KeyLease = 0
case 8:
e.KeyLease = binary.BigEndian.Uint32(b[4:])
default:
return ErrBuf
}
e.Lease = binary.BigEndian.Uint32(b)
return nil
}
// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
// Implemented for completeness, as the EDNS0 type code is assigned.
type EDNS0_LLQ struct {
Code uint16 // Always EDNS0LLQ
Version uint16
Opcode uint16
Error uint16
Id uint64
LeaseLife uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
func (e *EDNS0_LLQ) pack() ([]byte, error) {
b := make([]byte, 18)
binary.BigEndian.PutUint16(b[0:], e.Version)
binary.BigEndian.PutUint16(b[2:], e.Opcode)
binary.BigEndian.PutUint16(b[4:], e.Error)
binary.BigEndian.PutUint64(b[6:], e.Id)
binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
return b, nil
}
func (e *EDNS0_LLQ) unpack(b []byte) error {
if len(b) < 18 {
return ErrBuf
}
e.Version = binary.BigEndian.Uint16(b[0:])
e.Opcode = binary.BigEndian.Uint16(b[2:])
e.Error = binary.BigEndian.Uint16(b[4:])
e.Id = binary.BigEndian.Uint64(b[6:])
e.LeaseLife = binary.BigEndian.Uint32(b[14:])
return nil
}
func (e *EDNS0_LLQ) String() string {
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
" " + strconv.FormatUint(uint64(e.LeaseLife), 10)
return s
}
func (e *EDNS0_LLQ) copy() EDNS0 {
return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
}
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
type EDNS0_DAU struct {
Code uint16 // Always EDNS0DAU
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DAU) String() string {
s := ""
for _, alg := range e.AlgCode {
if a, ok := AlgorithmToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
type EDNS0_DHU struct {
Code uint16 // Always EDNS0DHU
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DHU) String() string {
s := ""
for _, alg := range e.AlgCode {
if a, ok := HashToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
type EDNS0_N3U struct {
Code uint16 // Always EDNS0N3U
AlgCode []uint8
}
// Option implements the EDNS0 interface.
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_N3U) String() string {
// Re-use the hash map
s := ""
for _, alg := range e.AlgCode {
if a, ok := HashToString[alg]; ok {
s += " " + a
} else {
s += " " + strconv.Itoa(int(alg))
}
}
return s
}
func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
type EDNS0_EXPIRE struct {
Code uint16 // Always EDNS0EXPIRE
Expire uint32
}
// Option implements the EDNS0 interface.
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} }
func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
b := make([]byte, 4)
binary.BigEndian.PutUint32(b, e.Expire)
return b, nil
}
func (e *EDNS0_EXPIRE) unpack(b []byte) error {
if len(b) == 0 {
// zero-length EXPIRE query, see RFC 7314 Section 2
return nil
}
if len(b) < 4 {
return ErrBuf
}
e.Expire = binary.BigEndian.Uint32(b)
return nil
}
// The EDNS0_LOCAL option is used for local/experimental purposes. The option
// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
// (RFC6891), although any unassigned code can actually be used. The content of
// the option is made available in Data, unaltered.
// Basic use pattern for creating a local option:
//
// o := new(dns.OPT)
// o.Hdr.Name = "."
// o.Hdr.Rrtype = dns.TypeOPT
// e := new(dns.EDNS0_LOCAL)
// e.Code = dns.EDNS0LOCALSTART
// e.Data = []byte{72, 82, 74}
// o.Option = append(o.Option, e)
type EDNS0_LOCAL struct {
Code uint16
Data []byte
}
// Option implements the EDNS0 interface.
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
}
func (e *EDNS0_LOCAL) copy() EDNS0 {
b := make([]byte, len(e.Data))
copy(b, e.Data)
return &EDNS0_LOCAL{e.Code, b}
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) {
b := make([]byte, len(e.Data))
copied := copy(b, e.Data)
if copied != len(e.Data) {
return nil, ErrBuf
}
return b, nil
}
func (e *EDNS0_LOCAL) unpack(b []byte) error {
e.Data = make([]byte, len(b))
copied := copy(e.Data, b)
if copied != len(b) {
return ErrBuf
}
return nil
}
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
// the TCP connection alive. See RFC 7828.
type EDNS0_TCP_KEEPALIVE struct {
Code uint16 // Always EDNSTCPKEEPALIVE
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
}
// Option implements the EDNS0 interface.
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
if e.Timeout != 0 && e.Length != 2 {
return nil, errors.New("dns: timeout specified but length is not 2")
}
if e.Timeout == 0 && e.Length != 0 {
return nil, errors.New("dns: timeout not specified but length is not 0")
}
b := make([]byte, 4+e.Length)
binary.BigEndian.PutUint16(b[0:], e.Code)
binary.BigEndian.PutUint16(b[2:], e.Length)
if e.Length == 2 {
binary.BigEndian.PutUint16(b[4:], e.Timeout)
}
return b, nil
}
func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
if len(b) < 4 {
return ErrBuf
}
e.Length = binary.BigEndian.Uint16(b[2:4])
if e.Length != 0 && e.Length != 2 {
return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
}
if e.Length == 2 {
if len(b) < 6 {
return ErrBuf
}
e.Timeout = binary.BigEndian.Uint16(b[4:6])
}
return nil
}
func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
s = "use tcp keep-alive"
if e.Length == 0 {
s += ", timeout omitted"
} else {
s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
}
return
}
func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} }
// EDNS0_PADDING option is used to add padding to a request/response. The default
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
// compression is applied before encryption which may break signatures.
type EDNS0_PADDING struct {
Padding []byte
}
// Option implements the EDNS0 interface.
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
func (e *EDNS0_PADDING) copy() EDNS0 {
b := make([]byte, len(e.Padding))
copy(b, e.Padding)
return &EDNS0_PADDING{b}
}

93
vendor/github.com/miekg/dns/format.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package dns
import (
"net"
"reflect"
"strconv"
)
// NumField returns the number of rdata fields r has.
func NumField(r RR) int {
return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
}
// Field returns the rdata field i as a string. Fields are indexed starting from 1.
// RR types that holds slice data, for instance the NSEC type bitmap will return a single
// string where the types are concatenated using a space.
// Accessing non existing fields will cause a panic.
func Field(r RR, i int) string {
if i == 0 {
return ""
}
d := reflect.ValueOf(r).Elem().Field(i)
switch d.Kind() {
case reflect.String:
return d.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(d.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return strconv.FormatUint(d.Uint(), 10)
case reflect.Slice:
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
case `dns:"a"`:
// TODO(miek): Hmm store this as 16 bytes
if d.Len() < net.IPv4len {
return ""
}
if d.Len() < net.IPv6len {
return net.IPv4(byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint())).String()
}
return net.IPv4(byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint())).String()
case `dns:"aaaa"`:
if d.Len() < net.IPv6len {
return ""
}
return net.IP{
byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()),
byte(d.Index(2).Uint()),
byte(d.Index(3).Uint()),
byte(d.Index(4).Uint()),
byte(d.Index(5).Uint()),
byte(d.Index(6).Uint()),
byte(d.Index(7).Uint()),
byte(d.Index(8).Uint()),
byte(d.Index(9).Uint()),
byte(d.Index(10).Uint()),
byte(d.Index(11).Uint()),
byte(d.Index(12).Uint()),
byte(d.Index(13).Uint()),
byte(d.Index(14).Uint()),
byte(d.Index(15).Uint()),
}.String()
case `dns:"nsec"`:
if d.Len() == 0 {
return ""
}
s := Type(d.Index(0).Uint()).String()
for i := 1; i < d.Len(); i++ {
s += " " + Type(d.Index(i).Uint()).String()
}
return s
default:
// if it does not have a tag its a string slice
fallthrough
case `dns:"txt"`:
if d.Len() == 0 {
return ""
}
s := d.Index(0).String()
for i := 1; i < d.Len(); i++ {
s += " " + d.Index(i).String()
}
return s
}
}
return ""
}

32
vendor/github.com/miekg/dns/fuzz.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// +build fuzz
package dns
import "strings"
func Fuzz(data []byte) int {
msg := new(Msg)
if err := msg.Unpack(data); err != nil {
return 0
}
if _, err := msg.Pack(); err != nil {
return 0
}
return 1
}
func FuzzNewRR(data []byte) int {
str := string(data)
// Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
// at avoiding them.
// See GH#1025 for context.
if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
return -1
}
if _, err := NewRR(str); err != nil {
return 0
}
return 1
}

247
vendor/github.com/miekg/dns/generate.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
package dns
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
// Parse the $GENERATE statement as used in BIND9 zones.
// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
// We are called after '$GENERATE '. After which we expect:
// * the range (12-24/2)
// * lhs (ownername)
// * [[ttl][class]]
// * type
// * rhs (rdata)
// But we are lazy here, only the range is parsed *all* occurrences
// of $ after that are interpreted.
func (zp *ZoneParser) generate(l lex) (RR, bool) {
token := l.token
step := 1
if i := strings.IndexByte(token, '/'); i >= 0 {
if i+1 == len(token) {
return zp.setParseError("bad step in $GENERATE range", l)
}
s, err := strconv.Atoi(token[i+1:])
if err != nil || s <= 0 {
return zp.setParseError("bad step in $GENERATE range", l)
}
step = s
token = token[:i]
}
sx := strings.SplitN(token, "-", 2)
if len(sx) != 2 {
return zp.setParseError("bad start-stop in $GENERATE range", l)
}
start, err := strconv.Atoi(sx[0])
if err != nil {
return zp.setParseError("bad start in $GENERATE range", l)
}
end, err := strconv.Atoi(sx[1])
if err != nil {
return zp.setParseError("bad stop in $GENERATE range", l)
}
if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
return zp.setParseError("bad range in $GENERATE range", l)
}
// _BLANK
l, ok := zp.c.Next()
if !ok || l.value != zBlank {
return zp.setParseError("garbage after $GENERATE range", l)
}
// Create a complete new string, which we then parse again.
var s string
for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
if l.err {
return zp.setParseError("bad data in $GENERATE directive", l)
}
if l.value == zNewline {
break
}
s += l.token
}
r := &generateReader{
s: s,
cur: start,
start: start,
end: end,
step: step,
file: zp.file,
lex: &l,
}
zp.sub = NewZoneParser(r, zp.origin, zp.file)
zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
zp.sub.generateDisallowed = true
zp.sub.SetDefaultTTL(defaultTtl)
return zp.subNext()
}
type generateReader struct {
s string
si int
cur int
start int
end int
step int
mod bytes.Buffer
escape bool
eof bool
file string
lex *lex
}
func (r *generateReader) parseError(msg string, end int) *ParseError {
r.eof = true // Make errors sticky.
l := *r.lex
l.token = r.s[r.si-1 : end]
l.column += r.si // l.column starts one zBLANK before r.s
return &ParseError{r.file, msg, l}
}
func (r *generateReader) Read(p []byte) (int, error) {
// NewZLexer, through NewZoneParser, should use ReadByte and
// not end up here.
panic("not implemented")
}
func (r *generateReader) ReadByte() (byte, error) {
if r.eof {
return 0, io.EOF
}
if r.mod.Len() > 0 {
return r.mod.ReadByte()
}
if r.si >= len(r.s) {
r.si = 0
r.cur += r.step
r.eof = r.cur > r.end || r.cur < 0
return '\n', nil
}
si := r.si
r.si++
switch r.s[si] {
case '\\':
if r.escape {
r.escape = false
return '\\', nil
}
r.escape = true
return r.ReadByte()
case '$':
if r.escape {
r.escape = false
return '$', nil
}
mod := "%d"
if si >= len(r.s)-1 {
// End of the string
fmt.Fprintf(&r.mod, mod, r.cur)
return r.mod.ReadByte()
}
if r.s[si+1] == '$' {
r.si++
return '$', nil
}
var offset int
// Search for { and }
if r.s[si+1] == '{' {
// Modifier block
sep := strings.Index(r.s[si+2:], "}")
if sep < 0 {
return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
}
var errMsg string
mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
if errMsg != "" {
return 0, r.parseError(errMsg, si+3+sep)
}
if r.start+offset < 0 || r.end+offset > 1<<31-1 {
return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
}
r.si += 2 + sep // Jump to it
}
fmt.Fprintf(&r.mod, mod, r.cur+offset)
return r.mod.ReadByte()
default:
if r.escape { // Pretty useless here
r.escape = false
return r.ReadByte()
}
return r.s[si], nil
}
}
// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
func modToPrintf(s string) (string, int, string) {
// Modifier is { offset [ ,width [ ,base ] ] } - provide default
// values for optional width and type, if necessary.
var offStr, widthStr, base string
switch xs := strings.Split(s, ","); len(xs) {
case 1:
offStr, widthStr, base = xs[0], "0", "d"
case 2:
offStr, widthStr, base = xs[0], xs[1], "d"
case 3:
offStr, widthStr, base = xs[0], xs[1], xs[2]
default:
return "", 0, "bad modifier in $GENERATE"
}
switch base {
case "o", "d", "x", "X":
default:
return "", 0, "bad base in $GENERATE"
}
offset, err := strconv.Atoi(offStr)
if err != nil {
return "", 0, "bad offset in $GENERATE"
}
width, err := strconv.Atoi(widthStr)
if err != nil || width < 0 || width > 255 {
return "", 0, "bad width in $GENERATE"
}
if width == 0 {
return "%" + base, offset, ""
}
return "%0" + widthStr + base, offset, ""
}

11
vendor/github.com/miekg/dns/go.mod generated vendored Normal file
View File

@ -0,0 +1,11 @@
module github.com/miekg/dns
go 1.12
require (
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
golang.org/x/net v0.0.0-20190923162816-aa69164e4478
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect
)

39
vendor/github.com/miekg/dns/go.sum generated vendored Normal file
View File

@ -0,0 +1,39 @@
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc=
golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM=
golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0=
golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611 h1:O33LKL7WyJgjN9CvxfTIomjIClbd/Kq86/iipowHQU0=
golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

212
vendor/github.com/miekg/dns/labels.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
package dns
// Holds a bunch of helper functions for dealing with labels.
// SplitDomainName splits a name string into it's labels.
// www.miek.nl. returns []string{"www", "miek", "nl"}
// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
// The root label (.) returns nil. Note that using
// strings.Split(s) will work in most cases, but does not handle
// escaped dots (\.) for instance.
// s must be a syntactically valid domain name, see IsDomainName.
func SplitDomainName(s string) (labels []string) {
if len(s) == 0 {
return nil
}
fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s)
begin := 0
if IsFqdn(s) {
fqdnEnd = len(s) - 1
} else {
fqdnEnd = len(s)
}
switch len(idx) {
case 0:
return nil
case 1:
// no-op
default:
for _, end := range idx[1:] {
labels = append(labels, s[begin:end-1])
begin = end
}
}
return append(labels, s[begin:fqdnEnd])
}
// CompareDomainName compares the names s1 and s2 and
// returns how many labels they have in common starting from the *right*.
// The comparison stops at the first inequality. The names are downcased
// before the comparison.
//
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
// www.miek.nl. and www.bla.nl. have one label in common: nl
//
// s1 and s2 must be syntactically valid domain names.
func CompareDomainName(s1, s2 string) (n int) {
// the first check: root label
if s1 == "." || s2 == "." {
return 0
}
l1 := Split(s1)
l2 := Split(s2)
j1 := len(l1) - 1 // end
i1 := len(l1) - 2 // start
j2 := len(l2) - 1
i2 := len(l2) - 2
// the second check can be done here: last/only label
// before we fall through into the for-loop below
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
n++
} else {
return
}
for {
if i1 < 0 || i2 < 0 {
break
}
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
n++
} else {
break
}
j1--
i1--
j2--
i2--
}
return
}
// CountLabel counts the the number of labels in the string s.
// s must be a syntactically valid domain name.
func CountLabel(s string) (labels int) {
if s == "." {
return
}
off := 0
end := false
for {
off, end = NextLabel(s, off)
labels++
if end {
return
}
}
}
// Split splits a name s into its label indexes.
// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
// The root name (.) returns nil. Also see SplitDomainName.
// s must be a syntactically valid domain name.
func Split(s string) []int {
if s == "." {
return nil
}
idx := make([]int, 1, 3)
off := 0
end := false
for {
off, end = NextLabel(s, off)
if end {
return idx
}
idx = append(idx, off)
}
}
// NextLabel returns the index of the start of the next label in the
// string s starting at offset.
// The bool end is true when the end of the string has been reached.
// Also see PrevLabel.
func NextLabel(s string, offset int) (i int, end bool) {
if s == "" {
return 0, true
}
for i = offset; i < len(s)-1; i++ {
if s[i] != '.' {
continue
}
j := i - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-i)%2 == 0 {
continue
}
return i + 1, false
}
return i + 1, true
}
// PrevLabel returns the index of the label when starting from the right and
// jumping n labels to the left.
// The bool start is true when the start of the string has been overshot.
// Also see NextLabel.
func PrevLabel(s string, n int) (i int, start bool) {
if s == "" {
return 0, true
}
if n == 0 {
return len(s), false
}
l := len(s) - 1
if s[l] == '.' {
l--
}
for ; l >= 0 && n > 0; l-- {
if s[l] != '.' {
continue
}
j := l - 1
for j >= 0 && s[j] == '\\' {
j--
}
if (j-l)%2 == 0 {
continue
}
n--
if n == 0 {
return l + 1, false
}
}
return 0, n > 1
}
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
func equal(a, b string) bool {
// might be lifted into API function.
la := len(a)
lb := len(b)
if la != lb {
return false
}
for i := la - 1; i >= 0; i-- {
ai := a[i]
bi := b[i]
if ai >= 'A' && ai <= 'Z' {
ai |= 'a' - 'A'
}
if bi >= 'A' && bi <= 'Z' {
bi |= 'a' - 'A'
}
if ai != bi {
return false
}
}
return true
}

44
vendor/github.com/miekg/dns/listen_go111.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// +build go1.11
// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
import (
"context"
"net"
"syscall"
"golang.org/x/sys/unix"
)
const supportsReusePort = true
func reuseportControl(network, address string, c syscall.RawConn) error {
var opErr error
err := c.Control(func(fd uintptr) {
opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
})
if err != nil {
return err
}
return opErr
}
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.Listen(context.Background(), network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
var lc net.ListenConfig
if reuseport {
lc.Control = reuseportControl
}
return lc.ListenPacket(context.Background(), network, addr)
}

23
vendor/github.com/miekg/dns/listen_go_not111.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
import "net"
const supportsReusePort = false
func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.Listen(network, addr)
}
func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
if reuseport {
// TODO(tmthrgd): return an error?
}
return net.ListenPacket(network, addr)
}

1196
vendor/github.com/miekg/dns/msg.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

810
vendor/github.com/miekg/dns/msg_helpers.go generated vendored Normal file
View File

@ -0,0 +1,810 @@
package dns
import (
"encoding/base32"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"net"
"strings"
)
// helper functions called from the generated zmsg.go
// These function are named after the tag to help pack/unpack, if there is no tag it is the name
// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
// packDataDomainName.
func unpackDataA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv4len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking a"}
}
a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
off += net.IPv4len
return a, off, nil
}
func packDataA(a net.IP, msg []byte, off int) (int, error) {
switch len(a) {
case net.IPv4len, net.IPv6len:
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
copy(msg[off:], a.To4())
off += net.IPv4len
case 0:
// Allowed, for dynamic updates.
default:
return len(msg), &Error{err: "overflow packing a"}
}
return off, nil
}
func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
if off+net.IPv6len > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
}
aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
off += net.IPv6len
return aaaa, off, nil
}
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
switch len(aaaa) {
case net.IPv6len:
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
copy(msg[off:], aaaa)
off += net.IPv6len
case 0:
// Allowed, dynamic updates.
default:
return len(msg), &Error{err: "overflow packing aaaa"}
}
return off, nil
}
// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
// re-sliced msg according to the expected length of the RR.
func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
hdr := RR_Header{}
if off == len(msg) {
return hdr, off, msg, nil
}
hdr.Name, off, err = UnpackDomainName(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rrtype, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Class, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Ttl, off, err = unpackUint32(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
hdr.Rdlength, off, err = unpackUint16(msg, off)
if err != nil {
return hdr, len(msg), msg, err
}
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
return hdr, off, msg, err
}
// packHeader packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression.
func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if off == len(msg) {
return off, nil
}
off, err := packDomainName(hdr.Name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Rrtype, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(hdr.Class, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint32(hdr.Ttl, msg, off)
if err != nil {
return len(msg), err
}
off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
if err != nil {
return len(msg), err
}
return off, nil
}
// helper helper functions.
// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
// Returns an error if msg is smaller than the expected size.
func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
lenrd := off + int(rdlength)
if lenrd > len(msg) {
return msg, &Error{err: "overflowing header size"}
}
return msg[:lenrd], nil
}
var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
func fromBase32(s []byte) (buf []byte, err error) {
for i, b := range s {
if b >= 'a' && b <= 'z' {
s[i] = b - 32
}
}
buflen := base32HexNoPadEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base32HexNoPadEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase32(b []byte) string {
return base32HexNoPadEncoding.EncodeToString(b)
}
func fromBase64(s []byte) (buf []byte, err error) {
buflen := base64.StdEncoding.DecodedLen(len(s))
buf = make([]byte, buflen)
n, err := base64.StdEncoding.Decode(buf, s)
buf = buf[:n]
return
}
func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
// dynamicUpdate returns true if the Rdlength is zero.
func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"}
}
return msg[off], off + 1, nil
}
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"}
}
msg[off] = i
return off + 1, nil
}
func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
if off+2 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint16"}
}
return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
}
func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
if off+2 > len(msg) {
return len(msg), &Error{err: "overflow packing uint16"}
}
binary.BigEndian.PutUint16(msg[off:], i)
return off + 2, nil
}
func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
if off+4 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint32"}
}
return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
}
func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
if off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing uint32"}
}
binary.BigEndian.PutUint32(msg[off:], i)
return off + 4, nil
}
func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
if off+6 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
}
// Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
uint64(msg[off+4])<<8 | uint64(msg[off+5])
off += 6
return i, off, nil
}
func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
if off+6 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64 as uint48"}
}
msg[off] = byte(i >> 40)
msg[off+1] = byte(i >> 32)
msg[off+2] = byte(i >> 24)
msg[off+3] = byte(i >> 16)
msg[off+4] = byte(i >> 8)
msg[off+5] = byte(i)
off += 6
return off, nil
}
func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
if off+8 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint64"}
}
return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
}
func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
if off+8 > len(msg) {
return len(msg), &Error{err: "overflow packing uint64"}
}
binary.BigEndian.PutUint64(msg[off:], i)
off += 8
return off, nil
}
func unpackString(msg []byte, off int) (string, int, error) {
if off+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
l := int(msg[off])
off++
if off+l > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
var s strings.Builder
consumed := 0
for i, b := range msg[off : off+l] {
switch {
case b == '"' || b == '\\':
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteByte('\\')
s.WriteByte(b)
consumed = i + 1
case b < ' ' || b > '~': // unprintable
if consumed == 0 {
s.Grow(l * 2)
}
s.Write(msg[off+consumed : off+i])
s.WriteString(escapeByte(b))
consumed = i + 1
}
}
if consumed == 0 { // no escaping needed
return string(msg[off : off+l]), off + l, nil
}
s.Write(msg[off+consumed : off+l])
return s.String(), off + l, nil
}
func packString(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packTxtString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base32"}
}
s := toBase32(msg[off:end])
return s, end, nil
}
func packStringBase32(s string, msg []byte, off int) (int, error) {
b32, err := fromBase32([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b32) > len(msg) {
return len(msg), &Error{err: "overflow packing base32"}
}
copy(msg[off:off+len(b32)], b32)
off += len(b32)
return off, nil
}
func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is base64 encoded value, so we don't need an explicit length
// to be set. Thus far all RR's that have base64 encoded fields have those as their
// last one. What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking base64"}
}
s := toBase64(msg[off:end])
return s, end, nil
}
func packStringBase64(s string, msg []byte, off int) (int, error) {
b64, err := fromBase64([]byte(s))
if err != nil {
return len(msg), err
}
if off+len(b64) > len(msg) {
return len(msg), &Error{err: "overflow packing base64"}
}
copy(msg[off:off+len(b64)], b64)
off += len(b64)
return off, nil
}
func unpackStringHex(msg []byte, off, end int) (string, int, error) {
// Rest of the RR is hex encoded value, so we don't need an explicit length
// to be set. NSEC and TSIG have hex fields with a length field.
// What we do need is the end of the RR!
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking hex"}
}
s := hex.EncodeToString(msg[off:end])
return s, end, nil
}
func packStringHex(s string, msg []byte, off int) (int, error) {
h, err := hex.DecodeString(s)
if err != nil {
return len(msg), err
}
if off+len(h) > len(msg) {
return len(msg), &Error{err: "overflow packing hex"}
}
copy(msg[off:off+len(h)], h)
off += len(h)
return off, nil
}
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking anything"}
}
return string(msg[off:end]), end, nil
}
func packStringAny(s string, msg []byte, off int) (int, error) {
if off+len(s) > len(msg) {
return len(msg), &Error{err: "overflow packing anything"}
}
copy(msg[off:off+len(s)], s)
off += len(s)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off)
if err != nil {
return nil, len(msg), err
}
return txt, off, nil
}
func packStringTxt(s []string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
off, err := packTxt(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0
Option:
var code uint16
if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
code = binary.BigEndian.Uint16(msg[off:])
off += 2
optlen := binary.BigEndian.Uint16(msg[off:])
off += 2
if off+int(optlen) > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"}
}
switch code {
case EDNS0NSID:
e := new(EDNS0_NSID)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0SUBNET:
e := new(EDNS0_SUBNET)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0COOKIE:
e := new(EDNS0_COOKIE)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0EXPIRE:
e := new(EDNS0_EXPIRE)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0UL:
e := new(EDNS0_UL)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0LLQ:
e := new(EDNS0_LLQ)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DAU:
e := new(EDNS0_DAU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0DHU:
e := new(EDNS0_DHU)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0N3U:
e := new(EDNS0_N3U)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
case EDNS0PADDING:
e := new(EDNS0_PADDING)
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
default:
e := new(EDNS0_LOCAL)
e.Code = code
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
return nil, len(msg), err
}
edns = append(edns, e)
off += int(optlen)
}
if off < len(msg) {
goto Option
}
return edns, off, nil
}
func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
for _, el := range options {
b, err := el.pack()
if err != nil || off+4 > len(msg) {
return len(msg), &Error{err: "overflow packing opt"}
}
binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
off += 4
if off+len(b) > len(msg) {
copy(msg[off:], b)
off = len(msg)
continue
}
// Actual data
copy(msg[off:off+len(b)], b)
off += len(b)
}
return off, nil
}
func unpackStringOctet(msg []byte, off int) (string, int, error) {
s := string(msg[off:])
return s, len(msg), nil
}
func packStringOctet(s string, msg []byte, off int) (int, error) {
txtTmp := make([]byte, 256*4+1)
off, err := packOctetString(s, msg, off, txtTmp)
if err != nil {
return len(msg), err
}
return off, nil
}
func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
var nsec []uint16
length, window, lastwindow := 0, 0, -1
for off < len(msg) {
if off+2 > len(msg) {
return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
}
window = int(msg[off])
length = int(msg[off+1])
off += 2
if window <= lastwindow {
// RFC 4034: Blocks are present in the NSEC RR RDATA in
// increasing numerical order.
return nsec, len(msg), &Error{err: "out of order NSEC block"}
}
if length == 0 {
// RFC 4034: Blocks with no types present MUST NOT be included.
return nsec, len(msg), &Error{err: "empty NSEC block"}
}
if length > 32 {
return nsec, len(msg), &Error{err: "NSEC block too long"}
}
if off+length > len(msg) {
return nsec, len(msg), &Error{err: "overflowing NSEC block"}
}
// Walk the bytes in the window and extract the type bits
for j, b := range msg[off : off+length] {
// Check the bits one by one, and set the type
if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0))
}
if b&0x40 == 0x40 {
nsec = append(nsec, uint16(window*256+j*8+1))
}
if b&0x20 == 0x20 {
nsec = append(nsec, uint16(window*256+j*8+2))
}
if b&0x10 == 0x10 {
nsec = append(nsec, uint16(window*256+j*8+3))
}
if b&0x8 == 0x8 {
nsec = append(nsec, uint16(window*256+j*8+4))
}
if b&0x4 == 0x4 {
nsec = append(nsec, uint16(window*256+j*8+5))
}
if b&0x2 == 0x2 {
nsec = append(nsec, uint16(window*256+j*8+6))
}
if b&0x1 == 0x1 {
nsec = append(nsec, uint16(window*256+j*8+7))
}
}
off += length
lastwindow = window
}
return nsec, off, nil
}
// typeBitMapLen is a helper function which computes the "maximum" length of
// a the NSEC Type BitMap field.
func typeBitMapLen(bitmap []uint16) int {
var l int
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
l += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
// packDataNsec would return Error{err: "nsec bits out of order"} here, but
// when computing the length, we want do be liberal.
continue
}
lastwindow, lastlength = window, length
}
l += int(lastlength) + 2
return l
}
func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
if len(bitmap) == 0 {
return off, nil
}
var lastwindow, lastlength uint16
for _, t := range bitmap {
window := t / 256
length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
off += int(lastlength) + 2
lastlength = 0
}
if window < lastwindow || length < lastlength {
return len(msg), &Error{err: "nsec bits out of order"}
}
if off+2+int(length) > len(msg) {
return len(msg), &Error{err: "overflow packing nsec"}
}
// Setting the window #
msg[off] = byte(window)
// Setting the octets length
msg[off+1] = byte(length)
// Setting the bit value for the type in the right octet
msg[off+1+int(length)] |= byte(1 << (7 - t%8))
lastwindow, lastlength = window, length
}
off += int(lastlength) + 2
return off, nil
}
func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
var (
servers []string
s string
err error
)
if end > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking domain names"}
}
for off < end {
s, off, err = UnpackDomainName(msg, off)
if err != nil {
return servers, len(msg), err
}
servers = append(servers, s)
}
return servers, off, nil
}
func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
var err error
for _, name := range names {
off, err = packDomainName(name, msg, off, compression, compress)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
var err error
for i := range data {
off, err = packDataAplPrefix(&data[i], msg, off)
if err != nil {
return len(msg), err
}
}
return off, nil
}
func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
if len(p.Network.IP) != len(p.Network.Mask) {
return len(msg), &Error{err: "address and mask lengths don't match"}
}
var err error
prefix, _ := p.Network.Mask.Size()
addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
switch len(p.Network.IP) {
case net.IPv4len:
off, err = packUint16(1, msg, off)
case net.IPv6len:
off, err = packUint16(2, msg, off)
default:
err = &Error{err: "unrecognized address family"}
}
if err != nil {
return len(msg), err
}
off, err = packUint8(uint8(prefix), msg, off)
if err != nil {
return len(msg), err
}
var n uint8
if p.Negation {
n = 0x80
}
adflen := uint8(len(addr)) & 0x7f
off, err = packUint8(n|adflen, msg, off)
if err != nil {
return len(msg), err
}
if off+len(addr) > len(msg) {
return len(msg), &Error{err: "overflow packing APL prefix"}
}
off += copy(msg[off:], addr)
return off, nil
}
func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
var result []APLPrefix
for off < len(msg) {
prefix, end, err := unpackDataAplPrefix(msg, off)
if err != nil {
return nil, len(msg), err
}
off = end
result = append(result, prefix)
}
return result, off, nil
}
func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
family, off, err := unpackUint16(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
prefix, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
nlen, off, err := unpackUint8(msg, off)
if err != nil {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
}
var ip []byte
switch family {
case 1:
ip = make([]byte, net.IPv4len)
case 2:
ip = make([]byte, net.IPv6len)
default:
return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
}
if int(prefix) > 8*len(ip) {
return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
}
afdlen := int(nlen & 0x7f)
if (int(prefix)+7)/8 != afdlen {
return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"}
}
if off+afdlen > len(msg) {
return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
}
off += copy(ip, msg[off:off+afdlen])
if prefix%8 > 0 {
last := ip[afdlen-1]
zero := uint8(0xff) >> (prefix % 8)
if last&zero > 0 {
return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
}
}
return APLPrefix{
Negation: (nlen & 0x80) != 0,
Network: net.IPNet{
IP: ip,
Mask: net.CIDRMask(int(prefix), 8*len(ip)),
},
}, off, nil
}

111
vendor/github.com/miekg/dns/msg_truncate.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package dns
// Truncate ensures the reply message will fit into the requested buffer
// size by removing records that exceed the requested size.
//
// It will first check if the reply fits without compression and then with
// compression. If it won't fit with compression, Truncate then walks the
// record adding as many records as possible without exceeding the
// requested buffer size.
//
// The TC bit will be set if any records were excluded from the message.
// This indicates to that the client should retry over TCP.
//
// According to RFC 2181, the TC bit should only be set if not all of the
// "required" RRs can be included in the response. Unfortunately, we have
// no way of knowing which RRs are required so we set the TC bit if any RR
// had to be omitted from the response.
//
// The appropriate buffer size can be retrieved from the requests OPT
// record, if present, and is transport specific otherwise. dns.MinMsgSize
// should be used for UDP requests without an OPT record, and
// dns.MaxMsgSize for TCP requests without an OPT record.
func (dns *Msg) Truncate(size int) {
if dns.IsTsig() != nil {
// To simplify this implementation, we don't perform
// truncation on responses with a TSIG record.
return
}
// RFC 6891 mandates that the payload size in an OPT record
// less than 512 bytes must be treated as equal to 512 bytes.
//
// For ease of use, we impose that restriction here.
if size < 512 {
size = 512
}
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
if l <= size {
// Don't waste effort compressing this message.
dns.Compress = false
return
}
dns.Compress = true
edns0 := dns.popEdns0()
if edns0 != nil {
// Account for the OPT record that gets added at the end,
// by subtracting that length from our budget.
//
// The EDNS(0) OPT record must have the root domain and
// it's length is thus unaffected by compression.
size -= Len(edns0)
}
compression := make(map[string]struct{})
l = headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
var numAnswer int
if l < size {
l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
}
var numNS int
if l < size {
l, numNS = truncateLoop(dns.Ns, size, l, compression)
}
var numExtra int
if l < size {
l, numExtra = truncateLoop(dns.Extra, size, l, compression)
}
// See the function documentation for when we set this.
dns.Truncated = len(dns.Answer) > numAnswer ||
len(dns.Ns) > numNS || len(dns.Extra) > numExtra
dns.Answer = dns.Answer[:numAnswer]
dns.Ns = dns.Ns[:numNS]
dns.Extra = dns.Extra[:numExtra]
if edns0 != nil {
// Add the OPT record back onto the additional section.
dns.Extra = append(dns.Extra, edns0)
}
}
func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
for i, r := range rrs {
if r == nil {
continue
}
l += r.len(l, compression)
if l > size {
// Return size, rather than l prior to this record,
// to prevent any further records being added.
return size, i
}
if l == size {
return l, i + 1
}
}
return l, len(rrs)
}

95
vendor/github.com/miekg/dns/nsecx.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
package dns
import (
"crypto/sha1"
"encoding/hex"
"strings"
)
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string {
if ha != SHA1 {
return ""
}
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
n, err := packStringHex(salt, wireSalt, 0)
if err != nil {
return ""
}
wireSalt = wireSalt[:n]
name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil {
return ""
}
name = name[:off]
s := sha1.New()
// k = 0
s.Write(name)
s.Write(wireSalt)
nsec3 := s.Sum(nil)
// k > 0
for k := uint16(0); k < iter; k++ {
s.Reset()
s.Write(nsec3)
s.Write(wireSalt)
nsec3 = s.Sum(nsec3[:0])
}
return toBase32(nsec3)
}
// Cover returns true if a name is covered by the NSEC3 record
func (rr *NSEC3) Cover(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
nextHash := rr.NextDomain
// if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
if ownerHash == nextHash && nameHash != ownerHash { // empty interval
return true
}
if ownerHash > nextHash { // end of zone
if nameHash > ownerHash { // covered since there is nothing after ownerHash
return true
}
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
}
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
return false
}
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
}
// Match returns true if a name matches the NSEC3 record
func (rr *NSEC3) Match(name string) bool {
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
owner := strings.ToUpper(rr.Hdr.Name)
labelIndices := Split(owner)
if len(labelIndices) < 2 {
return false
}
ownerHash := owner[:labelIndices[1]-1]
ownerZone := owner[labelIndices[1]:]
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
return false
}
if ownerHash == nameHash {
return true
}
return false
}

114
vendor/github.com/miekg/dns/privaterr.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package dns
import "strings"
// PrivateRdata is an interface used for implementing "Private Use" RR types, see
// RFC 6895. This allows one to experiment with new RR types, without requesting an
// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
type PrivateRdata interface {
// String returns the text presentaton of the Rdata of the Private RR.
String() string
// Parse parses the Rdata of the private RR.
Parse([]string) error
// Pack is used when packing a private RR into a buffer.
Pack([]byte) (int, error)
// Unpack is used when unpacking a private RR from a buffer.
// TODO(miek): diff. signature than Pack, see edns0.go for instance.
Unpack([]byte) (int, error)
// Copy copies the Rdata into the PrivateRdata argument.
Copy(PrivateRdata) error
// Len returns the length in octets of the Rdata.
Len() int
}
// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
// It mocks normal RRs and implements dns.RR interface.
type PrivateRR struct {
Hdr RR_Header
Data PrivateRdata
generator func() PrivateRdata // for copy
}
// Header return the RR header of r.
func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR {
// make new RR like this:
rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
if err := r.Data.Copy(rr.Data); err != nil {
panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
}
return rr
}
func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
n, err := r.Data.Pack(msg[off:])
if err != nil {
return len(msg), err
}
off += n
return off, nil
}
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
off1, err := r.Data.Unpack(msg[off:])
off += off1
return off, err
}
func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := r.Data.Parse(text)
if err != nil {
return &ParseError{"", err.Error(), l}
}
return nil
}
func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
rtypestr = strings.ToUpper(rtypestr)
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype
}
// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
delete(TypeToRR, rtype)
delete(TypeToString, rtype)
delete(StringToType, rtypestr)
}
}

52
vendor/github.com/miekg/dns/reverse.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package dns
// StringToType is the reverse of TypeToString, needed for string parsing.
var StringToType = reverseInt16(TypeToString)
// StringToClass is the reverse of ClassToString, needed for string parsing.
var StringToClass = reverseInt16(ClassToString)
// StringToOpcode is a map of opcodes to strings.
var StringToOpcode = reverseInt(OpcodeToString)
// StringToRcode is a map of rcodes to strings.
var StringToRcode = reverseInt(RcodeToString)
func init() {
// Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
StringToRcode["NOTIMPL"] = RcodeNotImplemented
}
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
// Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt16(m map[uint16]string) map[string]uint16 {
n := make(map[string]uint16, len(m))
for u, s := range m {
n[s] = u
}
return n
}
func reverseInt(m map[int]string) map[string]int {
n := make(map[string]int, len(m))
for u, s := range m {
n[s] = u
}
return n
}

86
vendor/github.com/miekg/dns/sanitize.go generated vendored Normal file
View File

@ -0,0 +1,86 @@
package dns
// Dedup removes identical RRs from rrs. It preserves the original ordering.
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
// rrs.
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
func Dedup(rrs []RR, m map[string]RR) []RR {
if m == nil {
m = make(map[string]RR)
}
// Save the keys, so we don't have to call normalizedString twice.
keys := make([]*string, 0, len(rrs))
for _, r := range rrs {
key := normalizedString(r)
keys = append(keys, &key)
if mr, ok := m[key]; ok {
// Shortest TTL wins.
rh, mrh := r.Header(), mr.Header()
if mrh.Ttl > rh.Ttl {
mrh.Ttl = rh.Ttl
}
continue
}
m[key] = r
}
// If the length of the result map equals the amount of RRs we got,
// it means they were all different. We can then just return the original rrset.
if len(m) == len(rrs) {
return rrs
}
j := 0
for i, r := range rrs {
// If keys[i] lives in the map, we should copy and remove it.
if _, ok := m[*keys[i]]; ok {
delete(m, *keys[i])
rrs[j] = r
j++
}
if len(m) == 0 {
break
}
}
return rrs[:j]
}
// normalizedString returns a normalized string from r. The TTL
// is removed and the domain name is lowercased. We go from this:
// DomainName<TAB>TTL<TAB>CLASS<TAB>TYPE<TAB>RDATA to:
// lowercasename<TAB>CLASS<TAB>TYPE...
func normalizedString(r RR) string {
// A string Go DNS makes has: domainname<TAB>TTL<TAB>...
b := []byte(r.String())
// find the first non-escaped tab, then another, so we capture where the TTL lives.
esc := false
ttlStart, ttlEnd := 0, 0
for i := 0; i < len(b) && ttlEnd == 0; i++ {
switch {
case b[i] == '\\':
esc = !esc
case b[i] == '\t' && !esc:
if ttlStart == 0 {
ttlStart = i
continue
}
if ttlEnd == 0 {
ttlEnd = i
}
case b[i] >= 'A' && b[i] <= 'Z' && !esc:
b[i] += 32
default:
esc = false
}
}
// remove TTL.
copy(b[ttlStart:], b[ttlEnd:])
cut := ttlEnd - ttlStart
return string(b[:len(b)-cut])
}

1408
vendor/github.com/miekg/dns/scan.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1764
vendor/github.com/miekg/dns/scan_rr.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

123
vendor/github.com/miekg/dns/serve_mux.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package dns
import (
"strings"
"sync"
)
// ServeMux is an DNS request multiplexer. It matches the zone name of
// each incoming request against a list of registered patterns add calls
// the handler for the pattern that most closely matches the zone name.
//
// ServeMux is DNSSEC aware, meaning that queries for the DS record are
// redirected to the parent zone (if that is also registered), otherwise
// the child gets the query.
//
// ServeMux is also safe for concurrent access from multiple goroutines.
//
// The zero ServeMux is empty and ready for use.
type ServeMux struct {
z map[string]Handler
m sync.RWMutex
}
// NewServeMux allocates and returns a new ServeMux.
func NewServeMux() *ServeMux {
return new(ServeMux)
}
// DefaultServeMux is the default ServeMux used by Serve.
var DefaultServeMux = NewServeMux()
func (mux *ServeMux) match(q string, t uint16) Handler {
mux.m.RLock()
defer mux.m.RUnlock()
if mux.z == nil {
return nil
}
q = strings.ToLower(q)
var handler Handler
for off, end := 0, false; !end; off, end = NextLabel(q, off) {
if h, ok := mux.z[q[off:]]; ok {
if t != TypeDS {
return h
}
// Continue for DS to see if we have a parent too, if so delegate to the parent
handler = h
}
}
// Wildcard match, if we have found nothing try the root zone as a last resort.
if h, ok := mux.z["."]; ok {
return h
}
return handler
}
// Handle adds a handler to the ServeMux for pattern.
func (mux *ServeMux) Handle(pattern string, handler Handler) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
if mux.z == nil {
mux.z = make(map[string]Handler)
}
mux.z[Fqdn(pattern)] = handler
mux.m.Unlock()
}
// HandleFunc adds a handler function to the ServeMux for pattern.
func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
mux.Handle(pattern, HandlerFunc(handler))
}
// HandleRemove deregisters the handler specific for pattern from the ServeMux.
func (mux *ServeMux) HandleRemove(pattern string) {
if pattern == "" {
panic("dns: invalid pattern " + pattern)
}
mux.m.Lock()
delete(mux.z, Fqdn(pattern))
mux.m.Unlock()
}
// ServeDNS dispatches the request to the handler whose pattern most
// closely matches the request message.
//
// ServeDNS is DNSSEC aware, meaning that queries for the DS record
// are redirected to the parent zone (if that is also registered),
// otherwise the child gets the query.
//
// If no handler is found, or there is no question, a standard SERVFAIL
// message is returned
func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
var h Handler
if len(req.Question) >= 1 { // allow more than one question
h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
}
if h != nil {
h.ServeDNS(w, req)
} else {
HandleFailed(w, req)
}
}
// Handle registers the handler with the given pattern
// in the DefaultServeMux. The documentation for
// ServeMux explains how patterns are matched.
func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
// HandleRemove deregisters the handle with the given pattern
// in the DefaultServeMux.
func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
// HandleFunc registers the handler function with the given pattern
// in the DefaultServeMux.
func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
DefaultServeMux.HandleFunc(pattern, handler)
}

764
vendor/github.com/miekg/dns/server.go generated vendored Normal file
View File

@ -0,0 +1,764 @@
// DNS server implementation.
package dns
import (
"context"
"crypto/tls"
"encoding/binary"
"errors"
"io"
"net"
"strings"
"sync"
"time"
)
// Default maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128
// aLongTimeAgo is a non-zero time, far in the past, used for
// immediate cancelation of network operations.
var aLongTimeAgo = time.Unix(1, 0)
// Handler is implemented by any value that implements ServeDNS.
type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as DNS handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Msg)
// ServeDNS calls f(w, r).
func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
f(w, r)
}
// A ResponseWriter interface is used by an DNS handler to
// construct an DNS response.
type ResponseWriter interface {
// LocalAddr returns the net.Addr of the server
LocalAddr() net.Addr
// RemoteAddr returns the net.Addr of the client that sent the current request.
RemoteAddr() net.Addr
// WriteMsg writes a reply back to the client.
WriteMsg(*Msg) error
// Write writes a raw buffer back to the client.
Write([]byte) (int, error)
// Close closes the connection.
Close() error
// TsigStatus returns the status of the Tsig.
TsigStatus() error
// TsigTimersOnly sets the tsig timers only boolean.
TsigTimersOnly(bool)
// Hijack lets the caller take over the connection.
// After a call to Hijack(), the DNS package will not do anything with the connection.
Hijack()
}
// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
// when available.
type ConnectionStater interface {
ConnectionState() *tls.ConnectionState
}
type response struct {
closed bool // connection has been closed
hijacked bool // connection has been hijacked by handler
tsigTimersOnly bool
tsigStatus error
tsigRequestMAC string
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right
writer Writer // writer to output the raw DNS bits
}
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
m.SetRcode(r, RcodeServerFailure)
// does not matter if this write fails
w.WriteMsg(m)
}
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
server := &Server{Addr: addr, Net: network, Handler: handler}
return server.ListenAndServe()
}
// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
// http://golang.org/pkg/net/http/#ListenAndServeTLS
func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return err
}
config := tls.Config{
Certificates: []tls.Certificate{cert},
}
server := &Server{
Addr: addr,
Net: "tcp-tls",
TLSConfig: &config,
Handler: handler,
}
return server.ListenAndServe()
}
// ActivateAndServe activates a server with a listener from systemd,
// l and p should not both be non-nil.
// If both l and p are not nil only p will be used.
// Invoke handler for incoming queries.
func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
server := &Server{Listener: l, PacketConn: p, Handler: handler}
return server.ActivateAndServe()
}
// Writer writes raw DNS messages; each call to Write should send an entire message.
type Writer interface {
io.Writer
}
// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
type Reader interface {
// ReadTCP reads a raw message from a TCP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
// ReadUDP reads a raw message from a UDP connection. Implementations may alter
// connection properties, for example the read-deadline.
ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
}
// defaultReader is an adapter for the Server struct that implements the Reader interface
// using the readTCP and readUDP func of the embedded Server.
type defaultReader struct {
*Server
}
func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout)
}
func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
return dr.readUDP(conn, timeout)
}
// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
// Implementations should never return a nil Reader.
type DecorateReader func(Reader) Reader
// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
// Implementations should never return a nil Writer.
type DecorateWriter func(Writer) Writer
// A Server defines parameters for running an DNS server.
type Server struct {
// Address to listen on, ":dns" if empty.
Addr string
// if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
Net string
// TCP Listener to use, this is to aid in systemd's socket activation.
Listener net.Listener
// TLS connection configuration
TLSConfig *tls.Config
// UDP "Listener" to use, this is to aid in systemd's socket activation.
PacketConn net.PacketConn
// Handler to invoke, dns.DefaultServeMux if nil.
Handler Handler
// Default buffer size to use to read incoming UDP messages. If not set
// it defaults to MinMsgSize (512 B).
UDPSize int
// The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
ReadTimeout time.Duration
// The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
WriteTimeout time.Duration
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
IdleTimeout func() time.Duration
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
TsigSecret map[string]string
// If NotifyStartedFunc is set it is called once the server has started listening.
NotifyStartedFunc func()
// DecorateReader is optional, allows customization of the process that reads raw DNS messages.
DecorateReader DecorateReader
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
DecorateWriter DecorateWriter
// Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
MaxTCPQueries int
// Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
// It is only supported on go1.11+ and when using ListenAndServe.
ReusePort bool
// AcceptMsgFunc will check the incoming message and will reject it early in the process.
// By default DefaultMsgAcceptFunc will be used.
MsgAcceptFunc MsgAcceptFunc
// Shutdown handling
lock sync.RWMutex
started bool
shutdown chan struct{}
conns map[net.Conn]struct{}
// A pool for UDP message buffers.
udpPool sync.Pool
}
func (srv *Server) isStarted() bool {
srv.lock.RLock()
started := srv.started
srv.lock.RUnlock()
return started
}
func makeUDPBuffer(size int) func() interface{} {
return func() interface{} {
return make([]byte, size)
}
}
func (srv *Server) init() {
srv.shutdown = make(chan struct{})
srv.conns = make(map[net.Conn]struct{})
if srv.UDPSize == 0 {
srv.UDPSize = MinMsgSize
}
if srv.MsgAcceptFunc == nil {
srv.MsgAcceptFunc = DefaultMsgAcceptFunc
}
if srv.Handler == nil {
srv.Handler = DefaultServeMux
}
srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
}
func unlockOnce(l sync.Locker) func() {
var once sync.Once
return func() { once.Do(l.Unlock) }
}
// ListenAndServe starts a nameserver on the configured address in *Server.
func (srv *Server) ListenAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
return &Error{err: "server already started"}
}
addr := srv.Addr
if addr == "" {
addr = ":domain"
}
srv.init()
switch srv.Net {
case "tcp", "tcp4", "tcp6":
l, err := listenTCP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "tcp-tls", "tcp4-tls", "tcp6-tls":
if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
return errors.New("dns: neither Certificates nor GetCertificate set in Config")
}
network := strings.TrimSuffix(srv.Net, "-tls")
l, err := listenTCP(network, addr, srv.ReusePort)
if err != nil {
return err
}
l = tls.NewListener(l, srv.TLSConfig)
srv.Listener = l
srv.started = true
unlock()
return srv.serveTCP(l)
case "udp", "udp4", "udp6":
l, err := listenUDP(srv.Net, addr, srv.ReusePort)
if err != nil {
return err
}
u := l.(*net.UDPConn)
if e := setUDPSocketOptions(u); e != nil {
return e
}
srv.PacketConn = l
srv.started = true
unlock()
return srv.serveUDP(u)
}
return &Error{err: "bad network"}
}
// ActivateAndServe starts a nameserver with the PacketConn or Listener
// configured in *Server. Its main use is to start a server from systemd.
func (srv *Server) ActivateAndServe() error {
unlock := unlockOnce(&srv.lock)
srv.lock.Lock()
defer unlock()
if srv.started {
return &Error{err: "server already started"}
}
srv.init()
pConn := srv.PacketConn
l := srv.Listener
if pConn != nil {
// Check PacketConn interface's type is valid and value
// is not nil
if t, ok := pConn.(*net.UDPConn); ok && t != nil {
if e := setUDPSocketOptions(t); e != nil {
return e
}
srv.started = true
unlock()
return srv.serveUDP(t)
}
}
if l != nil {
srv.started = true
unlock()
return srv.serveTCP(l)
}
return &Error{err: "bad listeners"}
}
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
// ActivateAndServe will return.
func (srv *Server) Shutdown() error {
return srv.ShutdownContext(context.Background())
}
// ShutdownContext shuts down a server. After a call to ShutdownContext,
// ListenAndServe and ActivateAndServe will return.
//
// A context.Context may be passed to limit how long to wait for connections
// to terminate.
func (srv *Server) ShutdownContext(ctx context.Context) error {
srv.lock.Lock()
if !srv.started {
srv.lock.Unlock()
return &Error{err: "server not started"}
}
srv.started = false
if srv.PacketConn != nil {
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
if srv.Listener != nil {
srv.Listener.Close()
}
for rw := range srv.conns {
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
srv.lock.Unlock()
if testShutdownNotify != nil {
testShutdownNotify.Broadcast()
}
var ctxErr error
select {
case <-srv.shutdown:
case <-ctx.Done():
ctxErr = ctx.Err()
}
if srv.PacketConn != nil {
srv.PacketConn.Close()
}
return ctxErr
}
var testShutdownNotify *sync.Cond
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
func (srv *Server) getReadTimeout() time.Duration {
if srv.ReadTimeout != 0 {
return srv.ReadTimeout
}
return dnsTimeout
}
// serveTCP starts a TCP listener for the server.
func (srv *Server) serveTCP(l net.Listener) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
for srv.isStarted() {
rw, err := l.Accept()
if err != nil {
if !srv.isStarted() {
return nil
}
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
continue
}
return err
}
srv.lock.Lock()
// Track the connection to allow unblocking reads on shutdown.
srv.conns[rw] = struct{}{}
srv.lock.Unlock()
wg.Add(1)
go srv.serveTCPConn(&wg, rw)
}
return nil
}
// serveUDP starts a UDP listener for the server.
func (srv *Server) serveUDP(l *net.UDPConn) error {
defer l.Close()
if srv.NotifyStartedFunc != nil {
srv.NotifyStartedFunc()
}
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
var wg sync.WaitGroup
defer func() {
wg.Wait()
close(srv.shutdown)
}()
rtimeout := srv.getReadTimeout()
// deadline is not used here
for srv.isStarted() {
m, s, err := reader.ReadUDP(l, rtimeout)
if err != nil {
if !srv.isStarted() {
return nil
}
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
continue
}
return err
}
if len(m) < headerSize {
if cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
continue
}
wg.Add(1)
go srv.serveUDPPacket(&wg, m, l, s)
}
return nil
}
// Serve a new TCP connection.
func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
w := &response{tsigSecret: srv.TsigSecret, tcp: rw}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader)
}
idleTimeout := tcpIdleTimeout
if srv.IdleTimeout != nil {
idleTimeout = srv.IdleTimeout()
}
timeout := srv.getReadTimeout()
limit := srv.MaxTCPQueries
if limit == 0 {
limit = maxTCPQueries
}
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
m, err := reader.ReadTCP(w.tcp, timeout)
if err != nil {
// TODO(tmthrgd): handle error
break
}
srv.serveDNS(m, w)
if w.closed {
break // Close() was called
}
if w.hijacked {
break // client will call Close() themselves
}
// The first read uses the read timeout, the rest use the
// idle timeout.
timeout = idleTimeout
}
if !w.hijacked {
w.Close()
}
srv.lock.Lock()
delete(srv.conns, w.tcp)
srv.lock.Unlock()
wg.Done()
}
// Serve a new UDP request.
func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) {
w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
srv.serveDNS(m, w)
wg.Done()
}
func (srv *Server) serveDNS(m []byte, w *response) {
dh, off, err := unpackMsgHdr(m, 0)
if err != nil {
// Let client hang, they are sending crap; any reply can be used to amplify.
return
}
req := new(Msg)
req.setHdr(dh)
switch action := srv.MsgAcceptFunc(dh); action {
case MsgAccept:
if req.unpack(dh, m, off) == nil {
break
}
fallthrough
case MsgReject, MsgRejectNotImplemented:
opcode := req.Opcode
req.SetRcodeFormatError(req)
req.Zero = false
if action == MsgRejectNotImplemented {
req.Opcode = opcode
req.Rcode = RcodeNotImplemented
}
// Are we allowed to delete any OPT records here?
req.Ns, req.Answer, req.Extra = nil, nil, nil
w.WriteMsg(req)
fallthrough
case MsgIgnore:
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
return
}
w.tsigStatus = nil
if w.tsigSecret != nil {
if t := req.IsTsig(); t != nil {
if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
w.tsigStatus = TsigVerify(m, secret, "", false)
} else {
w.tsigStatus = ErrSecret
}
w.tsigTimersOnly = false
w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
}
}
if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
srv.Handler.ServeDNS(w, req) // Writes back to the client
}
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
// If we race with ShutdownContext, the read deadline may
// have been set in the distant past to unblock the read
// below. We must not override it, otherwise we may block
// ShutdownContext.
srv.lock.RLock()
if srv.started {
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
var length uint16
if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
return nil, err
}
m := make([]byte, length)
if _, err := io.ReadFull(conn, m); err != nil {
return nil, err
}
return m, nil
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
srv.lock.RLock()
if srv.started {
// See the comment in readTCP above.
conn.SetReadDeadline(time.Now().Add(timeout))
}
srv.lock.RUnlock()
m := srv.udpPool.Get().([]byte)
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil {
srv.udpPool.Put(m)
return nil, nil, err
}
m = m[:n]
return m, s, nil
}
// WriteMsg implements the ResponseWriter.WriteMsg method.
func (w *response) WriteMsg(m *Msg) (err error) {
if w.closed {
return &Error{err: "WriteMsg called after Close"}
}
var data []byte
if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
if t := m.IsTsig(); t != nil {
data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
}
data, err = m.Pack()
if err != nil {
return err
}
_, err = w.writer.Write(data)
return err
}
// Write implements the ResponseWriter.Write method.
func (w *response) Write(m []byte) (int, error) {
if w.closed {
return 0, &Error{err: "Write called after Close"}
}
switch {
case w.udp != nil:
return WriteToSessionUDP(w.udp, m, w.udpSession)
case w.tcp != nil:
if len(m) > MaxMsgSize {
return 0, &Error{err: "message too large"}
}
l := make([]byte, 2)
binary.BigEndian.PutUint16(l, uint16(len(m)))
n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
return int(n), err
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// LocalAddr implements the ResponseWriter.LocalAddr method.
func (w *response) LocalAddr() net.Addr {
switch {
case w.udp != nil:
return w.udp.LocalAddr()
case w.tcp != nil:
return w.tcp.LocalAddr()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
func (w *response) RemoteAddr() net.Addr {
switch {
case w.udpSession != nil:
return w.udpSession.RemoteAddr()
case w.tcp != nil:
return w.tcp.RemoteAddr()
default:
panic("dns: internal error: udpSession and tcp both nil")
}
}
// TsigStatus implements the ResponseWriter.TsigStatus method.
func (w *response) TsigStatus() error { return w.tsigStatus }
// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
// Hijack implements the ResponseWriter.Hijack method.
func (w *response) Hijack() { w.hijacked = true }
// Close implements the ResponseWriter.Close method
func (w *response) Close() error {
if w.closed {
return &Error{err: "connection already closed"}
}
w.closed = true
switch {
case w.udp != nil:
// Can't close the udp conn, as that is actually the listener.
return nil
case w.tcp != nil:
return w.tcp.Close()
default:
panic("dns: internal error: udp and tcp both nil")
}
}
// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
func (w *response) ConnectionState() *tls.ConnectionState {
type tlsConnectionStater interface {
ConnectionState() tls.ConnectionState
}
if v, ok := w.tcp.(tlsConnectionStater); ok {
t := v.ConnectionState()
return &t
}
return nil
}

209
vendor/github.com/miekg/dns/sig0.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
package dns
import (
"crypto"
"crypto/dsa"
"crypto/ecdsa"
"crypto/rsa"
"encoding/binary"
"math/big"
"strings"
"time"
)
// Sign signs a dns.Msg. It fills the signature with the appropriate data.
// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
// and Expiration set.
func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
if k == nil {
return nil, ErrPrivKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return nil, ErrKey
}
rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
buf := make([]byte, m.Len()+Len(rr))
mbuf, err := m.PackBuffer(buf)
if err != nil {
return nil, err
}
if &buf[0] != &mbuf[0] {
return nil, ErrBuf
}
off, err := PackRR(rr, buf, len(mbuf), nil, false)
if err != nil {
return nil, err
}
buf = buf[:off:cap(buf)]
hash, ok := AlgorithmToHash[rr.Algorithm]
if !ok {
return nil, ErrAlg
}
hasher := hash.New()
// Write SIG rdata
hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
// Write message
hasher.Write(buf[:len(mbuf)])
signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
if err != nil {
return nil, err
}
rr.Signature = toBase64(signature)
buf = append(buf, signature...)
if len(buf) > int(^uint16(0)) {
return nil, ErrBuf
}
// Adjust sig data length
rdoff := len(mbuf) + 1 + 2 + 2 + 4
rdlen := binary.BigEndian.Uint16(buf[rdoff:])
rdlen += uint16(len(signature))
binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
// Adjust additional count
adc := binary.BigEndian.Uint16(buf[10:])
adc++
binary.BigEndian.PutUint16(buf[10:], adc)
return buf, nil
}
// Verify validates the message buf using the key k.
// It's assumed that buf is a valid message from which rr was unpacked.
func (rr *SIG) Verify(k *KEY, buf []byte) error {
if k == nil {
return ErrKey
}
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return ErrKey
}
var hash crypto.Hash
switch rr.Algorithm {
case DSA, RSASHA1:
hash = crypto.SHA1
case RSASHA256, ECDSAP256SHA256:
hash = crypto.SHA256
case ECDSAP384SHA384:
hash = crypto.SHA384
case RSASHA512:
hash = crypto.SHA512
default:
return ErrAlg
}
hasher := hash.New()
buflen := len(buf)
qdc := binary.BigEndian.Uint16(buf[4:])
anc := binary.BigEndian.Uint16(buf[6:])
auc := binary.BigEndian.Uint16(buf[8:])
adc := binary.BigEndian.Uint16(buf[10:])
offset := headerSize
var err error
for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type and Class
offset += 2 + 2
}
for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip past Type, Class and TTL
offset += 2 + 2 + 4
if offset+1 >= buflen {
continue
}
rdlen := binary.BigEndian.Uint16(buf[offset:])
offset += 2
offset += int(rdlen)
}
if offset >= buflen {
return &Error{err: "overflowing unpacking signed message"}
}
// offset should be just prior to SIG
bodyend := offset
// owner name SHOULD be root
_, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// Skip Type, Class, TTL, RDLen
offset += 2 + 2 + 4 + 2
sigstart := offset
// Skip Type Covered, Algorithm, Labels, Original TTL
offset += 2 + 1 + 1 + 4
if offset+4+4 >= buflen {
return &Error{err: "overflow unpacking signed message"}
}
expire := binary.BigEndian.Uint32(buf[offset:])
offset += 4
incept := binary.BigEndian.Uint32(buf[offset:])
offset += 4
now := uint32(time.Now().Unix())
if now < incept || now > expire {
return ErrTime
}
// Skip key tag
offset += 2
var signername string
signername, offset, err = UnpackDomainName(buf, offset)
if err != nil {
return err
}
// If key has come from the DNS name compression might
// have mangled the case of the name
if !strings.EqualFold(signername, k.Header().Name) {
return &Error{err: "signer name doesn't match key name"}
}
sigend := offset
hasher.Write(buf[sigstart:sigend])
hasher.Write(buf[:10])
hasher.Write([]byte{
byte((adc - 1) << 8),
byte(adc - 1),
})
hasher.Write(buf[12:bodyend])
hashed := hasher.Sum(nil)
sig := buf[sigend:]
switch k.Algorithm {
case DSA:
pk := k.publicKeyDSA()
sig = sig[1:]
r := new(big.Int).SetBytes(sig[:len(sig)/2])
s := new(big.Int).SetBytes(sig[len(sig)/2:])
if pk != nil {
if dsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
case RSASHA1, RSASHA256, RSASHA512:
pk := k.publicKeyRSA()
if pk != nil {
return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
}
case ECDSAP256SHA256, ECDSAP384SHA384:
pk := k.publicKeyECDSA()
r := new(big.Int).SetBytes(sig[:len(sig)/2])
s := new(big.Int).SetBytes(sig[len(sig)/2:])
if pk != nil {
if ecdsa.Verify(pk, hashed, r, s) {
return nil
}
return ErrSig
}
}
return ErrKeyAlg
}

61
vendor/github.com/miekg/dns/singleinflight.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for dns package usage by Miek Gieben.
package dns
import "sync"
import "time"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
val *Msg
rtt time.Duration
err error
dups int
}
// singleflight represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type singleflight struct {
sync.Mutex // protects m
m map[string]*call // lazily initialized
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
g.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.Unlock()
c.wg.Wait()
return c.val, c.rtt, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.Unlock()
c.val, c.rtt, c.err = fn()
c.wg.Done()
if !g.dontDeleteForTesting {
g.Lock()
delete(g.m, key)
g.Unlock()
}
return c.val, c.rtt, c.err, c.dups > 0
}

44
vendor/github.com/miekg/dns/smimea.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package dns
import (
"crypto/sha256"
"crypto/x509"
"encoding/hex"
)
// Sign creates a SMIMEA record from an SSL certificate.
func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeSMIMEA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
return err
}
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *SMIMEA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
func SMIMEAName(email, domain string) (string, error) {
hasher := sha256.New()
hasher.Write([]byte(email))
// RFC Section 3: "The local-part is hashed using the SHA2-256
// algorithm with the hash truncated to 28 octets and
// represented in its hexadecimal representation to become the
// left-most label in the prepared domain name"
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
}

44
vendor/github.com/miekg/dns/tlsa.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package dns
import (
"crypto/x509"
"net"
"strconv"
)
// Sign creates a TLSA record from an SSL certificate.
func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
r.Hdr.Rrtype = TypeTLSA
r.Usage = uint8(usage)
r.Selector = uint8(selector)
r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
return err
}
// Verify verifies a TLSA record against an SSL certificate. If it is OK
// a nil error is returned.
func (r *TLSA) Verify(cert *x509.Certificate) error {
c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil {
return err // Not also ErrSig?
}
if r.Certificate == c {
return nil
}
return ErrSig // ErrSig, really?
}
// TLSAName returns the ownername of a TLSA resource record as per the
// rules specified in RFC 6698, Section 3.
func TLSAName(name, service, network string) (string, error) {
if !IsFqdn(name) {
return "", ErrFqdn
}
p, err := net.LookupPort(network, service)
if err != nil {
return "", err
}
return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
}

389
vendor/github.com/miekg/dns/tsig.go generated vendored Normal file
View File

@ -0,0 +1,389 @@
package dns
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"encoding/binary"
"encoding/hex"
"hash"
"strconv"
"strings"
"time"
)
// HMAC hashing codes. These are transmitted as domain names.
const (
HmacMD5 = "hmac-md5.sig-alg.reg.int."
HmacSHA1 = "hmac-sha1."
HmacSHA256 = "hmac-sha256."
HmacSHA512 = "hmac-sha512."
)
// TSIG is the RR the holds the transaction signature of a message.
// See RFC 2845 and RFC 4635.
type TSIG struct {
Hdr RR_Header
Algorithm string `dns:"domain-name"`
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
MACSize uint16
MAC string `dns:"size-hex:MACSize"`
OrigId uint16
Error uint16
OtherLen uint16
OtherData string `dns:"size-hex:OtherLen"`
}
// TSIG has no official presentation format, but this will suffice.
func (rr *TSIG) String() string {
s := "\n;; TSIG PSEUDOSECTION:\n; " // add another semi-colon to signify TSIG does not have a presentation format
s += rr.Hdr.String() +
" " + rr.Algorithm +
" " + tsigTimeToString(rr.TimeSigned) +
" " + strconv.Itoa(int(rr.Fudge)) +
" " + strconv.Itoa(int(rr.MACSize)) +
" " + strings.ToUpper(rr.MAC) +
" " + strconv.Itoa(int(rr.OrigId)) +
" " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
" " + strconv.Itoa(int(rr.OtherLen)) +
" " + rr.OtherData
return s
}
func (rr *TSIG) parse(c *zlexer, origin string) *ParseError {
panic("dns: internal error: parse should never be called on TSIG")
}
// The following values must be put in wireformat, so that the MAC can be calculated.
// RFC 2845, section 3.4.2. TSIG Variables.
type tsigWireFmt struct {
// From RR_Header
Name string `dns:"domain-name"`
Class uint16
Ttl uint32
// Rdata of the TSIG
Algorithm string `dns:"domain-name"`
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
// MACSize, MAC and OrigId excluded
Error uint16
OtherLen uint16
OtherData string `dns:"size-hex:OtherLen"`
}
// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
type macWireFmt struct {
MACSize uint16
MAC string `dns:"size-hex:MACSize"`
}
// 3.3. Time values used in TSIG calculations
type timerWireFmt struct {
TimeSigned uint64 `dns:"uint48"`
Fudge uint16
}
// TsigGenerate fills out the TSIG record attached to the message.
// The message should contain
// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
// time fudge (defaults to 300 seconds) and the current time
// The TSIG MAC is saved in that Tsig RR.
// When TsigGenerate is called for the first time requestMAC is set to the empty string and
// timersOnly is false.
// If something goes wrong an error is returned, otherwise it is nil.
func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
if m.IsTsig() == nil {
panic("dns: TSIG not last RR in additional")
}
// If we barf here, the caller is to blame
rawsecret, err := fromBase64([]byte(secret))
if err != nil {
return nil, "", err
}
rr := m.Extra[len(m.Extra)-1].(*TSIG)
m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
mbuf, err := m.Pack()
if err != nil {
return nil, "", err
}
buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
t := new(TSIG)
var h hash.Hash
switch strings.ToLower(rr.Algorithm) {
case HmacMD5:
h = hmac.New(md5.New, rawsecret)
case HmacSHA1:
h = hmac.New(sha1.New, rawsecret)
case HmacSHA256:
h = hmac.New(sha256.New, rawsecret)
case HmacSHA512:
h = hmac.New(sha512.New, rawsecret)
default:
return nil, "", ErrKeyAlg
}
h.Write(buf)
t.MAC = hex.EncodeToString(h.Sum(nil))
t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
t.Fudge = rr.Fudge
t.TimeSigned = rr.TimeSigned
t.Algorithm = rr.Algorithm
t.OrigId = m.Id
tbuf := make([]byte, Len(t))
off, err := PackRR(t, tbuf, 0, nil, false)
if err != nil {
return nil, "", err
}
mbuf = append(mbuf, tbuf[:off]...)
// Update the ArCount directly in the buffer.
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
return mbuf, t.MAC, nil
}
// TsigVerify verifies the TSIG on a message.
// If the signature does not validate err contains the
// error, otherwise it is nil.
func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
rawsecret, err := fromBase64([]byte(secret))
if err != nil {
return err
}
// Strip the TSIG from the incoming msg
stripped, tsig, err := stripTsig(msg)
if err != nil {
return err
}
msgMAC, err := hex.DecodeString(tsig.MAC)
if err != nil {
return err
}
buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
// Fudge factor works both ways. A message can arrive before it was signed because
// of clock skew.
now := uint64(time.Now().Unix())
ti := now - tsig.TimeSigned
if now < tsig.TimeSigned {
ti = tsig.TimeSigned - now
}
if uint64(tsig.Fudge) < ti {
return ErrTime
}
var h hash.Hash
switch strings.ToLower(tsig.Algorithm) {
case HmacMD5:
h = hmac.New(md5.New, rawsecret)
case HmacSHA1:
h = hmac.New(sha1.New, rawsecret)
case HmacSHA256:
h = hmac.New(sha256.New, rawsecret)
case HmacSHA512:
h = hmac.New(sha512.New, rawsecret)
default:
return ErrKeyAlg
}
h.Write(buf)
if !hmac.Equal(h.Sum(nil), msgMAC) {
return ErrSig
}
return nil
}
// Create a wiredata buffer for the MAC calculation.
func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
var buf []byte
if rr.TimeSigned == 0 {
rr.TimeSigned = uint64(time.Now().Unix())
}
if rr.Fudge == 0 {
rr.Fudge = 300 // Standard (RFC) default.
}
// Replace message ID in header with original ID from TSIG
binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId)
if requestMAC != "" {
m := new(macWireFmt)
m.MACSize = uint16(len(requestMAC) / 2)
m.MAC = requestMAC
buf = make([]byte, len(requestMAC)) // long enough
n, _ := packMacWire(m, buf)
buf = buf[:n]
}
tsigvar := make([]byte, DefaultMsgSize)
if timersOnly {
tsig := new(timerWireFmt)
tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge
n, _ := packTimerWire(tsig, tsigvar)
tsigvar = tsigvar[:n]
} else {
tsig := new(tsigWireFmt)
tsig.Name = strings.ToLower(rr.Hdr.Name)
tsig.Class = ClassANY
tsig.Ttl = rr.Hdr.Ttl
tsig.Algorithm = strings.ToLower(rr.Algorithm)
tsig.TimeSigned = rr.TimeSigned
tsig.Fudge = rr.Fudge
tsig.Error = rr.Error
tsig.OtherLen = rr.OtherLen
tsig.OtherData = rr.OtherData
n, _ := packTsigWire(tsig, tsigvar)
tsigvar = tsigvar[:n]
}
if requestMAC != "" {
x := append(buf, msgbuf...)
buf = append(x, tsigvar...)
} else {
buf = append(msgbuf, tsigvar...)
}
return buf
}
// Strip the TSIG from the raw message.
func stripTsig(msg []byte) ([]byte, *TSIG, error) {
// Copied from msg.go's Unpack() Header, but modified.
var (
dh Header
err error
)
off, tsigoff := 0, 0
if dh, off, err = unpackMsgHdr(msg, off); err != nil {
return nil, nil, err
}
if dh.Arcount == 0 {
return nil, nil, ErrNoSig
}
// Rcode, see msg.go Unpack()
if int(dh.Bits&0xF) == RcodeNotAuth {
return nil, nil, ErrAuth
}
for i := 0; i < int(dh.Qdcount); i++ {
_, off, err = unpackQuestion(msg, off)
if err != nil {
return nil, nil, err
}
}
_, off, err = unpackRRslice(int(dh.Ancount), msg, off)
if err != nil {
return nil, nil, err
}
_, off, err = unpackRRslice(int(dh.Nscount), msg, off)
if err != nil {
return nil, nil, err
}
rr := new(TSIG)
var extra RR
for i := 0; i < int(dh.Arcount); i++ {
tsigoff = off
extra, off, err = UnpackRR(msg, off)
if err != nil {
return nil, nil, err
}
if extra.Header().Rrtype == TypeTSIG {
rr = extra.(*TSIG)
// Adjust Arcount.
arcount := binary.BigEndian.Uint16(msg[10:])
binary.BigEndian.PutUint16(msg[10:], arcount-1)
break
}
}
if rr == nil {
return nil, nil, ErrNoSig
}
return msg[:tsigoff], rr, nil
}
// Translate the TSIG time signed into a date. There is no
// need for RFC1982 calculations as this date is 48 bits.
func tsigTimeToString(t uint64) string {
ti := time.Unix(int64(t), 0).UTC()
return ti.Format("20060102150405")
}
func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
// copied from zmsg.go TSIG packing
// RR_Header
off, err := PackDomainName(tw.Name, msg, 0, nil, false)
if err != nil {
return off, err
}
off, err = packUint16(tw.Class, msg, off)
if err != nil {
return off, err
}
off, err = packUint32(tw.Ttl, msg, off)
if err != nil {
return off, err
}
off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
if err != nil {
return off, err
}
off, err = packUint48(tw.TimeSigned, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.Fudge, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.Error, msg, off)
if err != nil {
return off, err
}
off, err = packUint16(tw.OtherLen, msg, off)
if err != nil {
return off, err
}
off, err = packStringHex(tw.OtherData, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
off, err := packUint16(mw.MACSize, msg, 0)
if err != nil {
return off, err
}
off, err = packStringHex(mw.MAC, msg, off)
if err != nil {
return off, err
}
return off, nil
}
func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
off, err := packUint48(tw.TimeSigned, msg, 0)
if err != nil {
return off, err
}
off, err = packUint16(tw.Fudge, msg, off)
if err != nil {
return off, err
}
return off, nil
}

1527
vendor/github.com/miekg/dns/types.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

102
vendor/github.com/miekg/dns/udp.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
// +build !windows
package dns
import (
"net"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
// This is the required size of the OOB buffer to pass to ReadMsgUDP.
var udpOOBSize = func() int {
// We can't know whether we'll get an IPv4 control message or an
// IPv6 control message ahead of time. To get around this, we size
// the buffer equal to the largest of the two.
oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface)
oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface)
if len(oob4) > len(oob6) {
return len(oob4)
}
return len(oob6)
}()
// SessionUDP holds the remote address and the associated
// out-of-band data.
type SessionUDP struct {
raddr *net.UDPAddr
context []byte
}
// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
oob := make([]byte, udpOOBSize)
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
if err != nil {
return n, nil, err
}
return n, &SessionUDP{raddr, oob[:oobn]}, err
}
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
oob := correctSource(session.context)
n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
return n, err
}
func setUDPSocketOptions(conn *net.UDPConn) error {
// Try setting the flags for both families and ignore the errors unless they
// both error.
err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
if err6 != nil && err4 != nil {
return err4
}
return nil
}
// parseDstFromOOB takes oob data and returns the destination IP.
func parseDstFromOOB(oob []byte) net.IP {
// Start with IPv6 and then fallback to IPv4
// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
// the lvl of the header for a 0 or 41 isn't cross-platform.
cm6 := new(ipv6.ControlMessage)
if cm6.Parse(oob) == nil && cm6.Dst != nil {
return cm6.Dst
}
cm4 := new(ipv4.ControlMessage)
if cm4.Parse(oob) == nil && cm4.Dst != nil {
return cm4.Dst
}
return nil
}
// correctSource takes oob data and returns new oob data with the Src equal to the Dst
func correctSource(oob []byte) []byte {
dst := parseDstFromOOB(oob)
if dst == nil {
return nil
}
// If the dst is definitely an IPv6, then use ipv6's ControlMessage to
// respond otherwise use ipv4's because ipv6's marshal ignores ipv4
// addresses.
if dst.To4() == nil {
cm := new(ipv6.ControlMessage)
cm.Src = dst
oob = cm.Marshal()
} else {
cm := new(ipv4.ControlMessage)
cm.Src = dst
oob = cm.Marshal()
}
return oob
}

35
vendor/github.com/miekg/dns/udp_windows.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// +build windows
package dns
import "net"
// SessionUDP holds the remote address
type SessionUDP struct {
raddr *net.UDPAddr
}
// RemoteAddr returns the remote network address.
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
// net.UDPAddr.
// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
n, raddr, err := conn.ReadFrom(b)
if err != nil {
return n, nil, err
}
return n, &SessionUDP{raddr.(*net.UDPAddr)}, err
}
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
return conn.WriteTo(b, session.raddr)
}
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
// use the standard method in udp.go for these.
func setUDPSocketOptions(*net.UDPConn) error { return nil }
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }

110
vendor/github.com/miekg/dns/update.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package dns
// NameUsed sets the RRs in the prereq section to
// "Name is in use" RRs. RFC 2136 section 2.4.4.
func (u *Msg) NameUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
}
}
// NameNotUsed sets the RRs in the prereq section to
// "Name is in not use" RRs. RFC 2136 section 2.4.5.
func (u *Msg) NameNotUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
}
}
// Used sets the RRs in the prereq section to
// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
func (u *Msg) Used(rr []RR) {
if len(u.Question) == 0 {
panic("dns: empty question section")
}
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
r.Header().Class = u.Question[0].Qclass
u.Answer = append(u.Answer, r)
}
}
// RRsetUsed sets the RRs in the prereq section to
// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
func (u *Msg) RRsetUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
h := r.Header()
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
}
}
// RRsetNotUsed sets the RRs in the prereq section to
// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
func (u *Msg) RRsetNotUsed(rr []RR) {
if u.Answer == nil {
u.Answer = make([]RR, 0, len(rr))
}
for _, r := range rr {
h := r.Header()
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}})
}
}
// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
func (u *Msg) Insert(rr []RR) {
if len(u.Question) == 0 {
panic("dns: empty question section")
}
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
r.Header().Class = u.Question[0].Qclass
u.Ns = append(u.Ns, r)
}
}
// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
func (u *Msg) RemoveRRset(rr []RR) {
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
h := r.Header()
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
}
}
// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
func (u *Msg) RemoveName(rr []RR) {
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
}
}
// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
func (u *Msg) Remove(rr []RR) {
if u.Ns == nil {
u.Ns = make([]RR, 0, len(rr))
}
for _, r := range rr {
h := r.Header()
h.Class = ClassNONE
h.Ttl = 0
u.Ns = append(u.Ns, r)
}
}

15
vendor/github.com/miekg/dns/version.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package dns
import "fmt"
// Version is current version of this library.
var Version = V{1, 1, 27}
// V holds the version of this library.
type V struct {
Major, Minor, Patch int
}
func (v V) String() string {
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
}

266
vendor/github.com/miekg/dns/xfr.go generated vendored Normal file
View File

@ -0,0 +1,266 @@
package dns
import (
"fmt"
"time"
)
// Envelope is used when doing a zone transfer with a remote server.
type Envelope struct {
RR []RR // The set of RRs in the answer section of the xfr reply message.
Error error // If something went wrong, this contains the error.
}
// A Transfer defines parameters that are used during a zone transfer.
type Transfer struct {
*Conn
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
tsigTimersOnly bool
}
// Think we need to away to stop the transfer
// In performs an incoming transfer with the server in a.
// If you would like to set the source IP, or some other attribute
// of a Dialer for a Transfer, you can do so by specifying the attributes
// in the Transfer.Conn:
//
// d := net.Dialer{LocalAddr: transfer_source}
// con, err := d.Dial("tcp", master)
// dnscon := &dns.Conn{Conn:con}
// transfer = &dns.Transfer{Conn: dnscon}
// channel, err := transfer.In(message, master)
//
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
switch q.Question[0].Qtype {
case TypeAXFR, TypeIXFR:
default:
return nil, &Error{"unsupported question type"}
}
timeout := dnsTimeout
if t.DialTimeout != 0 {
timeout = t.DialTimeout
}
if t.Conn == nil {
t.Conn, err = DialTimeout("tcp", a, timeout)
if err != nil {
return nil, err
}
}
if err := t.WriteMsg(q); err != nil {
return nil, err
}
env = make(chan *Envelope)
switch q.Question[0].Qtype {
case TypeAXFR:
go t.inAxfr(q, env)
case TypeIXFR:
go t.inIxfr(q, env)
}
return env, nil
}
func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
first := true
defer t.Close()
defer close(c)
timeout := dnsTimeout
if t.ReadTimeout != 0 {
timeout = t.ReadTimeout
}
for {
t.Conn.SetReadDeadline(time.Now().Add(timeout))
in, err := t.ReadMsg()
if err != nil {
c <- &Envelope{nil, err}
return
}
if q.Id != in.Id {
c <- &Envelope{in.Answer, ErrId}
return
}
if first {
if in.Rcode != RcodeSuccess {
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
return
}
if !isSOAFirst(in) {
c <- &Envelope{in.Answer, ErrSoa}
return
}
first = !first
// only one answer that is SOA, receive more
if len(in.Answer) == 1 {
t.tsigTimersOnly = true
c <- &Envelope{in.Answer, nil}
continue
}
}
if !first {
t.tsigTimersOnly = true // Subsequent envelopes use this.
if isSOALast(in) {
c <- &Envelope{in.Answer, nil}
return
}
c <- &Envelope{in.Answer, nil}
}
}
}
func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
var serial uint32 // The first serial seen is the current server serial
axfr := true
n := 0
qser := q.Ns[0].(*SOA).Serial
defer t.Close()
defer close(c)
timeout := dnsTimeout
if t.ReadTimeout != 0 {
timeout = t.ReadTimeout
}
for {
t.SetReadDeadline(time.Now().Add(timeout))
in, err := t.ReadMsg()
if err != nil {
c <- &Envelope{nil, err}
return
}
if q.Id != in.Id {
c <- &Envelope{in.Answer, ErrId}
return
}
if in.Rcode != RcodeSuccess {
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
return
}
if n == 0 {
// Check if the returned answer is ok
if !isSOAFirst(in) {
c <- &Envelope{in.Answer, ErrSoa}
return
}
// This serial is important
serial = in.Answer[0].(*SOA).Serial
// Check if there are no changes in zone
if qser >= serial {
c <- &Envelope{in.Answer, nil}
return
}
}
// Now we need to check each message for SOA records, to see what we need to do
t.tsigTimersOnly = true
for _, rr := range in.Answer {
if v, ok := rr.(*SOA); ok {
if v.Serial == serial {
n++
// quit if it's a full axfr or the the servers' SOA is repeated the third time
if axfr && n == 2 || n == 3 {
c <- &Envelope{in.Answer, nil}
return
}
} else if axfr {
// it's an ixfr
axfr = false
}
}
}
c <- &Envelope{in.Answer, nil}
}
}
// Out performs an outgoing transfer with the client connecting in w.
// Basic use pattern:
//
// ch := make(chan *dns.Envelope)
// tr := new(dns.Transfer)
// var wg sync.WaitGroup
// go func() {
// tr.Out(w, r, ch)
// wg.Done()
// }()
// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
// close(ch)
// wg.Wait() // wait until everything is written out
// w.Close() // close connection
//
// The server is responsible for sending the correct sequence of RRs through the channel ch.
func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
for x := range ch {
r := new(Msg)
// Compress?
r.SetReply(q)
r.Authoritative = true
// assume it fits TODO(miek): fix
r.Answer = append(r.Answer, x.RR...)
if tsig := q.IsTsig(); tsig != nil && w.TsigStatus() == nil {
r.SetTsig(tsig.Hdr.Name, tsig.Algorithm, tsig.Fudge, time.Now().Unix())
}
if err := w.WriteMsg(r); err != nil {
return err
}
w.TsigTimersOnly(true)
}
return nil
}
// ReadMsg reads a message from the transfer connection t.
func (t *Transfer) ReadMsg() (*Msg, error) {
m := new(Msg)
p := make([]byte, MaxMsgSize)
n, err := t.Read(p)
if err != nil && n == 0 {
return nil, err
}
p = p[:n]
if err := m.Unpack(p); err != nil {
return nil, err
}
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
return m, ErrSecret
}
// Need to work on the original message p, as that was used to calculate the tsig.
err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
t.tsigRequestMAC = ts.MAC
}
return m, err
}
// WriteMsg writes a message through the transfer connection t.
func (t *Transfer) WriteMsg(m *Msg) (err error) {
var out []byte
if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
return ErrSecret
}
out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
} else {
out, err = m.Pack()
}
if err != nil {
return err
}
_, err = t.Write(out)
return err
}
func isSOAFirst(in *Msg) bool {
return len(in.Answer) > 0 &&
in.Answer[0].Header().Rrtype == TypeSOA
}
func isSOALast(in *Msg) bool {
return len(in.Answer) > 0 &&
in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
}
const errXFR = "bad xfr rcode: %d"

1157
vendor/github.com/miekg/dns/zduplicate.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

2741
vendor/github.com/miekg/dns/zmsg.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

898
vendor/github.com/miekg/dns/ztypes.go generated vendored Normal file
View File

@ -0,0 +1,898 @@
// Code generated by "go run types_generate.go"; DO NOT EDIT.
package dns
import (
"encoding/base64"
"net"
)
// TypeToRR is a map of constructors for each RR type.
var TypeToRR = map[uint16]func() RR{
TypeA: func() RR { return new(A) },
TypeAAAA: func() RR { return new(AAAA) },
TypeAFSDB: func() RR { return new(AFSDB) },
TypeANY: func() RR { return new(ANY) },
TypeAPL: func() RR { return new(APL) },
TypeAVC: func() RR { return new(AVC) },
TypeCAA: func() RR { return new(CAA) },
TypeCDNSKEY: func() RR { return new(CDNSKEY) },
TypeCDS: func() RR { return new(CDS) },
TypeCERT: func() RR { return new(CERT) },
TypeCNAME: func() RR { return new(CNAME) },
TypeCSYNC: func() RR { return new(CSYNC) },
TypeDHCID: func() RR { return new(DHCID) },
TypeDLV: func() RR { return new(DLV) },
TypeDNAME: func() RR { return new(DNAME) },
TypeDNSKEY: func() RR { return new(DNSKEY) },
TypeDS: func() RR { return new(DS) },
TypeEID: func() RR { return new(EID) },
TypeEUI48: func() RR { return new(EUI48) },
TypeEUI64: func() RR { return new(EUI64) },
TypeGID: func() RR { return new(GID) },
TypeGPOS: func() RR { return new(GPOS) },
TypeHINFO: func() RR { return new(HINFO) },
TypeHIP: func() RR { return new(HIP) },
TypeKEY: func() RR { return new(KEY) },
TypeKX: func() RR { return new(KX) },
TypeL32: func() RR { return new(L32) },
TypeL64: func() RR { return new(L64) },
TypeLOC: func() RR { return new(LOC) },
TypeLP: func() RR { return new(LP) },
TypeMB: func() RR { return new(MB) },
TypeMD: func() RR { return new(MD) },
TypeMF: func() RR { return new(MF) },
TypeMG: func() RR { return new(MG) },
TypeMINFO: func() RR { return new(MINFO) },
TypeMR: func() RR { return new(MR) },
TypeMX: func() RR { return new(MX) },
TypeNAPTR: func() RR { return new(NAPTR) },
TypeNID: func() RR { return new(NID) },
TypeNIMLOC: func() RR { return new(NIMLOC) },
TypeNINFO: func() RR { return new(NINFO) },
TypeNS: func() RR { return new(NS) },
TypeNSAPPTR: func() RR { return new(NSAPPTR) },
TypeNSEC: func() RR { return new(NSEC) },
TypeNSEC3: func() RR { return new(NSEC3) },
TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
TypeNULL: func() RR { return new(NULL) },
TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
TypeOPT: func() RR { return new(OPT) },
TypePTR: func() RR { return new(PTR) },
TypePX: func() RR { return new(PX) },
TypeRKEY: func() RR { return new(RKEY) },
TypeRP: func() RR { return new(RP) },
TypeRRSIG: func() RR { return new(RRSIG) },
TypeRT: func() RR { return new(RT) },
TypeSIG: func() RR { return new(SIG) },
TypeSMIMEA: func() RR { return new(SMIMEA) },
TypeSOA: func() RR { return new(SOA) },
TypeSPF: func() RR { return new(SPF) },
TypeSRV: func() RR { return new(SRV) },
TypeSSHFP: func() RR { return new(SSHFP) },
TypeTA: func() RR { return new(TA) },
TypeTALINK: func() RR { return new(TALINK) },
TypeTKEY: func() RR { return new(TKEY) },
TypeTLSA: func() RR { return new(TLSA) },
TypeTSIG: func() RR { return new(TSIG) },
TypeTXT: func() RR { return new(TXT) },
TypeUID: func() RR { return new(UID) },
TypeUINFO: func() RR { return new(UINFO) },
TypeURI: func() RR { return new(URI) },
TypeX25: func() RR { return new(X25) },
}
// TypeToString is a map of strings for each RR type.
var TypeToString = map[uint16]string{
TypeA: "A",
TypeAAAA: "AAAA",
TypeAFSDB: "AFSDB",
TypeANY: "ANY",
TypeAPL: "APL",
TypeATMA: "ATMA",
TypeAVC: "AVC",
TypeAXFR: "AXFR",
TypeCAA: "CAA",
TypeCDNSKEY: "CDNSKEY",
TypeCDS: "CDS",
TypeCERT: "CERT",
TypeCNAME: "CNAME",
TypeCSYNC: "CSYNC",
TypeDHCID: "DHCID",
TypeDLV: "DLV",
TypeDNAME: "DNAME",
TypeDNSKEY: "DNSKEY",
TypeDS: "DS",
TypeEID: "EID",
TypeEUI48: "EUI48",
TypeEUI64: "EUI64",
TypeGID: "GID",
TypeGPOS: "GPOS",
TypeHINFO: "HINFO",
TypeHIP: "HIP",
TypeISDN: "ISDN",
TypeIXFR: "IXFR",
TypeKEY: "KEY",
TypeKX: "KX",
TypeL32: "L32",
TypeL64: "L64",
TypeLOC: "LOC",
TypeLP: "LP",
TypeMAILA: "MAILA",
TypeMAILB: "MAILB",
TypeMB: "MB",
TypeMD: "MD",
TypeMF: "MF",
TypeMG: "MG",
TypeMINFO: "MINFO",
TypeMR: "MR",
TypeMX: "MX",
TypeNAPTR: "NAPTR",
TypeNID: "NID",
TypeNIMLOC: "NIMLOC",
TypeNINFO: "NINFO",
TypeNS: "NS",
TypeNSEC: "NSEC",
TypeNSEC3: "NSEC3",
TypeNSEC3PARAM: "NSEC3PARAM",
TypeNULL: "NULL",
TypeNXT: "NXT",
TypeNone: "None",
TypeOPENPGPKEY: "OPENPGPKEY",
TypeOPT: "OPT",
TypePTR: "PTR",
TypePX: "PX",
TypeRKEY: "RKEY",
TypeRP: "RP",
TypeRRSIG: "RRSIG",
TypeRT: "RT",
TypeReserved: "Reserved",
TypeSIG: "SIG",
TypeSMIMEA: "SMIMEA",
TypeSOA: "SOA",
TypeSPF: "SPF",
TypeSRV: "SRV",
TypeSSHFP: "SSHFP",
TypeTA: "TA",
TypeTALINK: "TALINK",
TypeTKEY: "TKEY",
TypeTLSA: "TLSA",
TypeTSIG: "TSIG",
TypeTXT: "TXT",
TypeUID: "UID",
TypeUINFO: "UINFO",
TypeUNSPEC: "UNSPEC",
TypeURI: "URI",
TypeX25: "X25",
TypeNSAPPTR: "NSAP-PTR",
}
func (rr *A) Header() *RR_Header { return &rr.Hdr }
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
func (rr *APL) Header() *RR_Header { return &rr.Hdr }
func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr }
func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *DS) Header() *RR_Header { return &rr.Hdr }
func (rr *EID) Header() *RR_Header { return &rr.Hdr }
func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
func (rr *GID) Header() *RR_Header { return &rr.Hdr }
func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
func (rr *KX) Header() *RR_Header { return &rr.Hdr }
func (rr *L32) Header() *RR_Header { return &rr.Hdr }
func (rr *L64) Header() *RR_Header { return &rr.Hdr }
func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
func (rr *LP) Header() *RR_Header { return &rr.Hdr }
func (rr *MB) Header() *RR_Header { return &rr.Hdr }
func (rr *MD) Header() *RR_Header { return &rr.Hdr }
func (rr *MF) Header() *RR_Header { return &rr.Hdr }
func (rr *MG) Header() *RR_Header { return &rr.Hdr }
func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *MR) Header() *RR_Header { return &rr.Hdr }
func (rr *MX) Header() *RR_Header { return &rr.Hdr }
func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
func (rr *NID) Header() *RR_Header { return &rr.Hdr }
func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *NS) Header() *RR_Header { return &rr.Hdr }
func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
func (rr *NULL) Header() *RR_Header { return &rr.Hdr }
func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
func (rr *PX) Header() *RR_Header { return &rr.Hdr }
func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *RP) Header() *RR_Header { return &rr.Hdr }
func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
func (rr *RT) Header() *RR_Header { return &rr.Hdr }
func (rr *SIG) Header() *RR_Header { return &rr.Hdr }
func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr }
func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
func (rr *TA) Header() *RR_Header { return &rr.Hdr }
func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
func (rr *TSIG) Header() *RR_Header { return &rr.Hdr }
func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
func (rr *UID) Header() *RR_Header { return &rr.Hdr }
func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
func (rr *URI) Header() *RR_Header { return &rr.Hdr }
func (rr *X25) Header() *RR_Header { return &rr.Hdr }
// len() functions
func (rr *A) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
if len(rr.A) != 0 {
l += net.IPv4len
}
return l
}
func (rr *AAAA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
if len(rr.AAAA) != 0 {
l += net.IPv6len
}
return l
}
func (rr *AFSDB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Subtype
l += domainNameLen(rr.Hostname, off+l, compression, false)
return l
}
func (rr *ANY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
return l
}
func (rr *APL) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Prefixes {
l += x.len()
}
return l
}
func (rr *AVC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
l += len(x) + 1
}
return l
}
func (rr *CAA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Flag
l += len(rr.Tag) + 1
l += len(rr.Value)
return l
}
func (rr *CERT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Type
l += 2 // KeyTag
l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
return l
}
func (rr *CNAME) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Target, off+l, compression, true)
return l
}
func (rr *DHCID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.Digest))
return l
}
func (rr *DNAME) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Target, off+l, compression, false)
return l
}
func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Flags
l++ // Protocol
l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *DS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // KeyTag
l++ // Algorithm
l++ // DigestType
l += len(rr.Digest) / 2
return l
}
func (rr *EID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Endpoint) / 2
return l
}
func (rr *EUI48) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 6 // Address
return l
}
func (rr *EUI64) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 8 // Address
return l
}
func (rr *GID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 4 // Gid
return l
}
func (rr *GPOS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Longitude) + 1
l += len(rr.Latitude) + 1
l += len(rr.Altitude) + 1
return l
}
func (rr *HINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Cpu) + 1
l += len(rr.Os) + 1
return l
}
func (rr *HIP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // HitLength
l++ // PublicKeyAlgorithm
l += 2 // PublicKeyLength
l += len(rr.Hit) / 2
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
for _, x := range rr.RendezvousServers {
l += domainNameLen(x, off+l, compression, false)
}
return l
}
func (rr *KX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Exchanger, off+l, compression, false)
return l
}
func (rr *L32) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
if len(rr.Locator32) != 0 {
l += net.IPv4len
}
return l
}
func (rr *L64) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += 8 // Locator64
return l
}
func (rr *LOC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Version
l++ // Size
l++ // HorizPre
l++ // VertPre
l += 4 // Latitude
l += 4 // Longitude
l += 4 // Altitude
return l
}
func (rr *LP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Fqdn, off+l, compression, false)
return l
}
func (rr *MB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mb, off+l, compression, true)
return l
}
func (rr *MD) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Md, off+l, compression, true)
return l
}
func (rr *MF) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mf, off+l, compression, true)
return l
}
func (rr *MG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mg, off+l, compression, true)
return l
}
func (rr *MINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Rmail, off+l, compression, true)
l += domainNameLen(rr.Email, off+l, compression, true)
return l
}
func (rr *MR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mr, off+l, compression, true)
return l
}
func (rr *MX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Mx, off+l, compression, true)
return l
}
func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Order
l += 2 // Preference
l += len(rr.Flags) + 1
l += len(rr.Service) + 1
l += len(rr.Regexp) + 1
l += domainNameLen(rr.Replacement, off+l, compression, false)
return l
}
func (rr *NID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += 8 // NodeID
return l
}
func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Locator) / 2
return l
}
func (rr *NINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.ZSData {
l += len(x) + 1
}
return l
}
func (rr *NS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ns, off+l, compression, true)
return l
}
func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ptr, off+l, compression, false)
return l
}
func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Hash
l++ // Flags
l += 2 // Iterations
l++ // SaltLength
l += len(rr.Salt) / 2
return l
}
func (rr *NULL) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Data)
return l
}
func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *PTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ptr, off+l, compression, true)
return l
}
func (rr *PX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Map822, off+l, compression, false)
l += domainNameLen(rr.Mapx400, off+l, compression, false)
return l
}
func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Rdata) / 2
return l
}
func (rr *RKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Flags
l++ // Protocol
l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l
}
func (rr *RP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Mbox, off+l, compression, false)
l += domainNameLen(rr.Txt, off+l, compression, false)
return l
}
func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // TypeCovered
l++ // Algorithm
l++ // Labels
l += 4 // OrigTtl
l += 4 // Expiration
l += 4 // Inception
l += 2 // KeyTag
l += domainNameLen(rr.SignerName, off+l, compression, false)
l += base64.StdEncoding.DecodedLen(len(rr.Signature))
return l
}
func (rr *RT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Preference
l += domainNameLen(rr.Host, off+l, compression, false)
return l
}
func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Usage
l++ // Selector
l++ // MatchingType
l += len(rr.Certificate) / 2
return l
}
func (rr *SOA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Ns, off+l, compression, true)
l += domainNameLen(rr.Mbox, off+l, compression, true)
l += 4 // Serial
l += 4 // Refresh
l += 4 // Retry
l += 4 // Expire
l += 4 // Minttl
return l
}
func (rr *SPF) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
l += len(x) + 1
}
return l
}
func (rr *SRV) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Priority
l += 2 // Weight
l += 2 // Port
l += domainNameLen(rr.Target, off+l, compression, false)
return l
}
func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Algorithm
l++ // Type
l += len(rr.FingerPrint) / 2
return l
}
func (rr *TA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // KeyTag
l++ // Algorithm
l++ // DigestType
l += len(rr.Digest) / 2
return l
}
func (rr *TALINK) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.PreviousName, off+l, compression, false)
l += domainNameLen(rr.NextName, off+l, compression, false)
return l
}
func (rr *TKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Algorithm, off+l, compression, false)
l += 4 // Inception
l += 4 // Expiration
l += 2 // Mode
l += 2 // Error
l += 2 // KeySize
l += len(rr.Key) / 2
l += 2 // OtherLen
l += len(rr.OtherData) / 2
return l
}
func (rr *TLSA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l++ // Usage
l++ // Selector
l++ // MatchingType
l += len(rr.Certificate) / 2
return l
}
func (rr *TSIG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.Algorithm, off+l, compression, false)
l += 6 // TimeSigned
l += 2 // Fudge
l += 2 // MACSize
l += len(rr.MAC) / 2
l += 2 // OrigId
l += 2 // Error
l += 2 // OtherLen
l += len(rr.OtherData) / 2
return l
}
func (rr *TXT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt {
l += len(x) + 1
}
return l
}
func (rr *UID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 4 // Uid
return l
}
func (rr *UINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.Uinfo) + 1
return l
}
func (rr *URI) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += 2 // Priority
l += 2 // Weight
l += len(rr.Target)
return l
}
func (rr *X25) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += len(rr.PSDNAddress) + 1
return l
}
// copy() functions
func (rr *A) copy() RR {
return &A{rr.Hdr, copyIP(rr.A)}
}
func (rr *AAAA) copy() RR {
return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
}
func (rr *AFSDB) copy() RR {
return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
}
func (rr *ANY) copy() RR {
return &ANY{rr.Hdr}
}
func (rr *APL) copy() RR {
Prefixes := make([]APLPrefix, len(rr.Prefixes))
for i := range rr.Prefixes {
Prefixes[i] = rr.Prefixes[i].copy()
}
return &APL{rr.Hdr, Prefixes}
}
func (rr *AVC) copy() RR {
Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt)
return &AVC{rr.Hdr, Txt}
}
func (rr *CAA) copy() RR {
return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
}
func (rr *CERT) copy() RR {
return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
}
func (rr *CNAME) copy() RR {
return &CNAME{rr.Hdr, rr.Target}
}
func (rr *CSYNC) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap)
return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
}
func (rr *DHCID) copy() RR {
return &DHCID{rr.Hdr, rr.Digest}
}
func (rr *DNAME) copy() RR {
return &DNAME{rr.Hdr, rr.Target}
}
func (rr *DNSKEY) copy() RR {
return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
}
func (rr *DS) copy() RR {
return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
}
func (rr *EID) copy() RR {
return &EID{rr.Hdr, rr.Endpoint}
}
func (rr *EUI48) copy() RR {
return &EUI48{rr.Hdr, rr.Address}
}
func (rr *EUI64) copy() RR {
return &EUI64{rr.Hdr, rr.Address}
}
func (rr *GID) copy() RR {
return &GID{rr.Hdr, rr.Gid}
}
func (rr *GPOS) copy() RR {
return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
}
func (rr *HINFO) copy() RR {
return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
}
func (rr *HIP) copy() RR {
RendezvousServers := make([]string, len(rr.RendezvousServers))
copy(RendezvousServers, rr.RendezvousServers)
return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
}
func (rr *KX) copy() RR {
return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
}
func (rr *L32) copy() RR {
return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
}
func (rr *L64) copy() RR {
return &L64{rr.Hdr, rr.Preference, rr.Locator64}
}
func (rr *LOC) copy() RR {
return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
}
func (rr *LP) copy() RR {
return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
}
func (rr *MB) copy() RR {
return &MB{rr.Hdr, rr.Mb}
}
func (rr *MD) copy() RR {
return &MD{rr.Hdr, rr.Md}
}
func (rr *MF) copy() RR {
return &MF{rr.Hdr, rr.Mf}
}
func (rr *MG) copy() RR {
return &MG{rr.Hdr, rr.Mg}
}
func (rr *MINFO) copy() RR {
return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
}
func (rr *MR) copy() RR {
return &MR{rr.Hdr, rr.Mr}
}
func (rr *MX) copy() RR {
return &MX{rr.Hdr, rr.Preference, rr.Mx}
}
func (rr *NAPTR) copy() RR {
return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
}
func (rr *NID) copy() RR {
return &NID{rr.Hdr, rr.Preference, rr.NodeID}
}
func (rr *NIMLOC) copy() RR {
return &NIMLOC{rr.Hdr, rr.Locator}
}
func (rr *NINFO) copy() RR {
ZSData := make([]string, len(rr.ZSData))
copy(ZSData, rr.ZSData)
return &NINFO{rr.Hdr, ZSData}
}
func (rr *NS) copy() RR {
return &NS{rr.Hdr, rr.Ns}
}
func (rr *NSAPPTR) copy() RR {
return &NSAPPTR{rr.Hdr, rr.Ptr}
}
func (rr *NSEC) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap)
return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
}
func (rr *NSEC3) copy() RR {
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
copy(TypeBitMap, rr.TypeBitMap)
return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
}
func (rr *NSEC3PARAM) copy() RR {
return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
}
func (rr *NULL) copy() RR {
return &NULL{rr.Hdr, rr.Data}
}
func (rr *OPENPGPKEY) copy() RR {
return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
}
func (rr *OPT) copy() RR {
Option := make([]EDNS0, len(rr.Option))
for i, e := range rr.Option {
Option[i] = e.copy()
}
return &OPT{rr.Hdr, Option}
}
func (rr *PTR) copy() RR {
return &PTR{rr.Hdr, rr.Ptr}
}
func (rr *PX) copy() RR {
return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
}
func (rr *RFC3597) copy() RR {
return &RFC3597{rr.Hdr, rr.Rdata}
}
func (rr *RKEY) copy() RR {
return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
}
func (rr *RP) copy() RR {
return &RP{rr.Hdr, rr.Mbox, rr.Txt}
}
func (rr *RRSIG) copy() RR {
return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
}
func (rr *RT) copy() RR {
return &RT{rr.Hdr, rr.Preference, rr.Host}
}
func (rr *SMIMEA) copy() RR {
return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
}
func (rr *SOA) copy() RR {
return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
}
func (rr *SPF) copy() RR {
Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt)
return &SPF{rr.Hdr, Txt}
}
func (rr *SRV) copy() RR {
return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
}
func (rr *SSHFP) copy() RR {
return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
}
func (rr *TA) copy() RR {
return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
}
func (rr *TALINK) copy() RR {
return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
}
func (rr *TKEY) copy() RR {
return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
}
func (rr *TLSA) copy() RR {
return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
}
func (rr *TSIG) copy() RR {
return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
}
func (rr *TXT) copy() RR {
Txt := make([]string, len(rr.Txt))
copy(Txt, rr.Txt)
return &TXT{rr.Hdr, Txt}
}
func (rr *UID) copy() RR {
return &UID{rr.Hdr, rr.Uid}
}
func (rr *UINFO) copy() RR {
return &UINFO{rr.Hdr, rr.Uinfo}
}
func (rr *URI) copy() RR {
return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
}
func (rr *X25) copy() RR {
return &X25{rr.Hdr, rr.PSDNAddress}
}

24
vendor/github.com/syndtr/goleveldb/LICENSE generated vendored Normal file
View File

@ -0,0 +1,24 @@
Copyright 2012 Suryandaru Triandana <syndtr@gmail.com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

349
vendor/github.com/syndtr/goleveldb/leveldb/batch.go generated vendored Normal file
View File

@ -0,0 +1,349 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package leveldb
import (
"encoding/binary"
"fmt"
"io"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/memdb"
"github.com/syndtr/goleveldb/leveldb/storage"
)
// ErrBatchCorrupted records reason of batch corruption. This error will be
// wrapped with errors.ErrCorrupted.
type ErrBatchCorrupted struct {
Reason string
}
func (e *ErrBatchCorrupted) Error() string {
return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
}
func newErrBatchCorrupted(reason string) error {
return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
}
const (
batchHeaderLen = 8 + 4
batchGrowRec = 3000
batchBufioSize = 16
)
// BatchReplay wraps basic batch operations.
type BatchReplay interface {
Put(key, value []byte)
Delete(key []byte)
}
type batchIndex struct {
keyType keyType
keyPos, keyLen int
valuePos, valueLen int
}
func (index batchIndex) k(data []byte) []byte {
return data[index.keyPos : index.keyPos+index.keyLen]
}
func (index batchIndex) v(data []byte) []byte {
if index.valueLen != 0 {
return data[index.valuePos : index.valuePos+index.valueLen]
}
return nil
}
func (index batchIndex) kv(data []byte) (key, value []byte) {
return index.k(data), index.v(data)
}
// Batch is a write batch.
type Batch struct {
data []byte
index []batchIndex
// internalLen is sums of key/value pair length plus 8-bytes internal key.
internalLen int
}
func (b *Batch) grow(n int) {
o := len(b.data)
if cap(b.data)-o < n {
div := 1
if len(b.index) > batchGrowRec {
div = len(b.index) / batchGrowRec
}
ndata := make([]byte, o, o+n+o/div)
copy(ndata, b.data)
b.data = ndata
}
}
func (b *Batch) appendRec(kt keyType, key, value []byte) {
n := 1 + binary.MaxVarintLen32 + len(key)
if kt == keyTypeVal {
n += binary.MaxVarintLen32 + len(value)
}
b.grow(n)
index := batchIndex{keyType: kt}
o := len(b.data)
data := b.data[:o+n]
data[o] = byte(kt)
o++
o += binary.PutUvarint(data[o:], uint64(len(key)))
index.keyPos = o
index.keyLen = len(key)
o += copy(data[o:], key)
if kt == keyTypeVal {
o += binary.PutUvarint(data[o:], uint64(len(value)))
index.valuePos = o
index.valueLen = len(value)
o += copy(data[o:], value)
}
b.data = data[:o]
b.index = append(b.index, index)
b.internalLen += index.keyLen + index.valueLen + 8
}
// Put appends 'put operation' of the given key/value pair to the batch.
// It is safe to modify the contents of the argument after Put returns but not
// before.
func (b *Batch) Put(key, value []byte) {
b.appendRec(keyTypeVal, key, value)
}
// Delete appends 'delete operation' of the given key to the batch.
// It is safe to modify the contents of the argument after Delete returns but
// not before.
func (b *Batch) Delete(key []byte) {
b.appendRec(keyTypeDel, key, nil)
}
// Dump dumps batch contents. The returned slice can be loaded into the
// batch using Load method.
// The returned slice is not its own copy, so the contents should not be
// modified.
func (b *Batch) Dump() []byte {
return b.data
}
// Load loads given slice into the batch. Previous contents of the batch
// will be discarded.
// The given slice will not be copied and will be used as batch buffer, so
// it is not safe to modify the contents of the slice.
func (b *Batch) Load(data []byte) error {
return b.decode(data, -1)
}
// Replay replays batch contents.
func (b *Batch) Replay(r BatchReplay) error {
for _, index := range b.index {
switch index.keyType {
case keyTypeVal:
r.Put(index.k(b.data), index.v(b.data))
case keyTypeDel:
r.Delete(index.k(b.data))
}
}
return nil
}
// Len returns number of records in the batch.
func (b *Batch) Len() int {
return len(b.index)
}
// Reset resets the batch.
func (b *Batch) Reset() {
b.data = b.data[:0]
b.index = b.index[:0]
b.internalLen = 0
}
func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
for i, index := range b.index {
if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
return err
}
}
return nil
}
func (b *Batch) append(p *Batch) {
ob := len(b.data)
oi := len(b.index)
b.data = append(b.data, p.data...)
b.index = append(b.index, p.index...)
b.internalLen += p.internalLen
// Updating index offset.
if ob != 0 {
for ; oi < len(b.index); oi++ {
index := &b.index[oi]
index.keyPos += ob
if index.valueLen != 0 {
index.valuePos += ob
}
}
}
}
func (b *Batch) decode(data []byte, expectedLen int) error {
b.data = data
b.index = b.index[:0]
b.internalLen = 0
err := decodeBatch(data, func(i int, index batchIndex) error {
b.index = append(b.index, index)
b.internalLen += index.keyLen + index.valueLen + 8
return nil
})
if err != nil {
return err
}
if expectedLen >= 0 && len(b.index) != expectedLen {
return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
}
return nil
}
func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
var ik []byte
for i, index := range b.index {
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
if err := mdb.Put(ik, index.v(b.data)); err != nil {
return err
}
}
return nil
}
func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
var ik []byte
for i, index := range b.index {
ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
if err := mdb.Delete(ik); err != nil {
return err
}
}
return nil
}
func newBatch() interface{} {
return &Batch{}
}
func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
var index batchIndex
for i, o := 0, 0; o < len(data); i++ {
// Key type.
index.keyType = keyType(data[o])
if index.keyType > keyTypeVal {
return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
}
o++
// Key.
x, n := binary.Uvarint(data[o:])
o += n
if n <= 0 || o+int(x) > len(data) {
return newErrBatchCorrupted("bad record: invalid key length")
}
index.keyPos = o
index.keyLen = int(x)
o += index.keyLen
// Value.
if index.keyType == keyTypeVal {
x, n = binary.Uvarint(data[o:])
o += n
if n <= 0 || o+int(x) > len(data) {
return newErrBatchCorrupted("bad record: invalid value length")
}
index.valuePos = o
index.valueLen = int(x)
o += index.valueLen
} else {
index.valuePos = 0
index.valueLen = 0
}
if err := fn(i, index); err != nil {
return err
}
}
return nil
}
func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
seq, batchLen, err = decodeBatchHeader(data)
if err != nil {
return 0, 0, err
}
if seq < expectSeq {
return 0, 0, newErrBatchCorrupted("invalid sequence number")
}
data = data[batchHeaderLen:]
var ik []byte
var decodedLen int
err = decodeBatch(data, func(i int, index batchIndex) error {
if i >= batchLen {
return newErrBatchCorrupted("invalid records length")
}
ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
if err := mdb.Put(ik, index.v(data)); err != nil {
return err
}
decodedLen++
return nil
})
if err == nil && decodedLen != batchLen {
err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
}
return
}
func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
dst = ensureBuffer(dst, batchHeaderLen)
binary.LittleEndian.PutUint64(dst, seq)
binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
return dst
}
func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
if len(data) < batchHeaderLen {
return 0, 0, newErrBatchCorrupted("too short")
}
seq = binary.LittleEndian.Uint64(data)
batchLen = int(binary.LittleEndian.Uint32(data[8:]))
if batchLen < 0 {
return 0, 0, newErrBatchCorrupted("invalid records length")
}
return
}
func batchesLen(batches []*Batch) int {
batchLen := 0
for _, batch := range batches {
batchLen += batch.Len()
}
return batchLen
}
func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
return err
}
for _, batch := range batches {
if _, err := wr.Write(batch.data); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,704 @@
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package cache provides interface and implementation of a cache algorithms.
package cache
import (
"sync"
"sync/atomic"
"unsafe"
"github.com/syndtr/goleveldb/leveldb/util"
)
// Cacher provides interface to implements a caching functionality.
// An implementation must be safe for concurrent use.
type Cacher interface {
// Capacity returns cache capacity.
Capacity() int
// SetCapacity sets cache capacity.
SetCapacity(capacity int)
// Promote promotes the 'cache node'.
Promote(n *Node)
// Ban evicts the 'cache node' and prevent subsequent 'promote'.
Ban(n *Node)
// Evict evicts the 'cache node'.
Evict(n *Node)
// EvictNS evicts 'cache node' with the given namespace.
EvictNS(ns uint64)
// EvictAll evicts all 'cache node'.
EvictAll()
// Close closes the 'cache tree'
Close() error
}
// Value is a 'cacheable object'. It may implements util.Releaser, if
// so the the Release method will be called once object is released.
type Value interface{}
// NamespaceGetter provides convenient wrapper for namespace.
type NamespaceGetter struct {
Cache *Cache
NS uint64
}
// Get simply calls Cache.Get() method.
func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
return g.Cache.Get(g.NS, key, setFunc)
}
// The hash tables implementation is based on:
// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
// Kunlong Zhang, and Michael Spear.
// ACM Symposium on Principles of Distributed Computing, Jul 2014.
const (
mInitialSize = 1 << 4
mOverflowThreshold = 1 << 5
mOverflowGrowThreshold = 1 << 7
)
type mBucket struct {
mu sync.Mutex
node []*Node
frozen bool
}
func (b *mBucket) freeze() []*Node {
b.mu.Lock()
defer b.mu.Unlock()
if !b.frozen {
b.frozen = true
}
return b.node
}
func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
b.mu.Lock()
if b.frozen {
b.mu.Unlock()
return
}
// Scan the node.
for _, n := range b.node {
if n.hash == hash && n.ns == ns && n.key == key {
atomic.AddInt32(&n.ref, 1)
b.mu.Unlock()
return true, false, n
}
}
// Get only.
if noset {
b.mu.Unlock()
return true, false, nil
}
// Create node.
n = &Node{
r: r,
hash: hash,
ns: ns,
key: key,
ref: 1,
}
// Add node to bucket.
b.node = append(b.node, n)
bLen := len(b.node)
b.mu.Unlock()
// Update counter.
grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
if bLen > mOverflowThreshold {
grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
}
// Grow.
if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
nhLen := len(h.buckets) << 1
nh := &mNode{
buckets: make([]unsafe.Pointer, nhLen),
mask: uint32(nhLen) - 1,
pred: unsafe.Pointer(h),
growThreshold: int32(nhLen * mOverflowThreshold),
shrinkThreshold: int32(nhLen >> 1),
}
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
if !ok {
panic("BUG: failed swapping head")
}
go nh.initBuckets()
}
return true, true, n
}
func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
b.mu.Lock()
if b.frozen {
b.mu.Unlock()
return
}
// Scan the node.
var (
n *Node
bLen int
)
for i := range b.node {
n = b.node[i]
if n.ns == ns && n.key == key {
if atomic.LoadInt32(&n.ref) == 0 {
deleted = true
// Call releaser.
if n.value != nil {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
n.value = nil
}
// Remove node from bucket.
b.node = append(b.node[:i], b.node[i+1:]...)
bLen = len(b.node)
}
break
}
}
b.mu.Unlock()
if deleted {
// Call OnDel.
for _, f := range n.onDel {
f()
}
// Update counter.
atomic.AddInt32(&r.size, int32(n.size)*-1)
shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
if bLen >= mOverflowThreshold {
atomic.AddInt32(&h.overflow, -1)
}
// Shrink.
if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
nhLen := len(h.buckets) >> 1
nh := &mNode{
buckets: make([]unsafe.Pointer, nhLen),
mask: uint32(nhLen) - 1,
pred: unsafe.Pointer(h),
growThreshold: int32(nhLen * mOverflowThreshold),
shrinkThreshold: int32(nhLen >> 1),
}
ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
if !ok {
panic("BUG: failed swapping head")
}
go nh.initBuckets()
}
}
return true, deleted
}
type mNode struct {
buckets []unsafe.Pointer // []*mBucket
mask uint32
pred unsafe.Pointer // *mNode
resizeInProgess int32
overflow int32
growThreshold int32
shrinkThreshold int32
}
func (n *mNode) initBucket(i uint32) *mBucket {
if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
return b
}
p := (*mNode)(atomic.LoadPointer(&n.pred))
if p != nil {
var node []*Node
if n.mask > p.mask {
// Grow.
pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
if pb == nil {
pb = p.initBucket(i & p.mask)
}
m := pb.freeze()
// Split nodes.
for _, x := range m {
if x.hash&n.mask == i {
node = append(node, x)
}
}
} else {
// Shrink.
pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
if pb0 == nil {
pb0 = p.initBucket(i)
}
pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
if pb1 == nil {
pb1 = p.initBucket(i + uint32(len(n.buckets)))
}
m0 := pb0.freeze()
m1 := pb1.freeze()
// Merge nodes.
node = make([]*Node, 0, len(m0)+len(m1))
node = append(node, m0...)
node = append(node, m1...)
}
b := &mBucket{node: node}
if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
if len(node) > mOverflowThreshold {
atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
}
return b
}
}
return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
}
func (n *mNode) initBuckets() {
for i := range n.buckets {
n.initBucket(uint32(i))
}
atomic.StorePointer(&n.pred, nil)
}
// Cache is a 'cache map'.
type Cache struct {
mu sync.RWMutex
mHead unsafe.Pointer // *mNode
nodes int32
size int32
cacher Cacher
closed bool
}
// NewCache creates a new 'cache map'. The cacher is optional and
// may be nil.
func NewCache(cacher Cacher) *Cache {
h := &mNode{
buckets: make([]unsafe.Pointer, mInitialSize),
mask: mInitialSize - 1,
growThreshold: int32(mInitialSize * mOverflowThreshold),
shrinkThreshold: 0,
}
for i := range h.buckets {
h.buckets[i] = unsafe.Pointer(&mBucket{})
}
r := &Cache{
mHead: unsafe.Pointer(h),
cacher: cacher,
}
return r
}
func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
h := (*mNode)(atomic.LoadPointer(&r.mHead))
i := hash & h.mask
b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
if b == nil {
b = h.initBucket(i)
}
return h, b
}
func (r *Cache) delete(n *Node) bool {
for {
h, b := r.getBucket(n.hash)
done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
if done {
return deleted
}
}
}
// Nodes returns number of 'cache node' in the map.
func (r *Cache) Nodes() int {
return int(atomic.LoadInt32(&r.nodes))
}
// Size returns sums of 'cache node' size in the map.
func (r *Cache) Size() int {
return int(atomic.LoadInt32(&r.size))
}
// Capacity returns cache capacity.
func (r *Cache) Capacity() int {
if r.cacher == nil {
return 0
}
return r.cacher.Capacity()
}
// SetCapacity sets cache capacity.
func (r *Cache) SetCapacity(capacity int) {
if r.cacher != nil {
r.cacher.SetCapacity(capacity)
}
}
// Get gets 'cache node' with the given namespace and key.
// If cache node is not found and setFunc is not nil, Get will atomically creates
// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
//
// The returned 'cache handle' should be released after use by calling Release
// method.
func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return nil
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
if done {
if n != nil {
n.mu.Lock()
if n.value == nil {
if setFunc == nil {
n.mu.Unlock()
n.unref()
return nil
}
n.size, n.value = setFunc()
if n.value == nil {
n.size = 0
n.mu.Unlock()
n.unref()
return nil
}
atomic.AddInt32(&r.size, int32(n.size))
}
n.mu.Unlock()
if r.cacher != nil {
r.cacher.Promote(n)
}
return &Handle{unsafe.Pointer(n)}
}
break
}
}
return nil
}
// Delete removes and ban 'cache node' with the given namespace and key.
// A banned 'cache node' will never inserted into the 'cache tree'. Ban
// only attributed to the particular 'cache node', so when a 'cache node'
// is recreated it will not be banned.
//
// If onDel is not nil, then it will be executed if such 'cache node'
// doesn't exist or once the 'cache node' is released.
//
// Delete return true is such 'cache node' exist.
func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return false
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, true)
if done {
if n != nil {
if onDel != nil {
n.mu.Lock()
n.onDel = append(n.onDel, onDel)
n.mu.Unlock()
}
if r.cacher != nil {
r.cacher.Ban(n)
}
n.unref()
return true
}
break
}
}
if onDel != nil {
onDel()
}
return false
}
// Evict evicts 'cache node' with the given namespace and key. This will
// simply call Cacher.Evict.
//
// Evict return true is such 'cache node' exist.
func (r *Cache) Evict(ns, key uint64) bool {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return false
}
hash := murmur32(ns, key, 0xf00)
for {
h, b := r.getBucket(hash)
done, _, n := b.get(r, h, hash, ns, key, true)
if done {
if n != nil {
if r.cacher != nil {
r.cacher.Evict(n)
}
n.unref()
return true
}
break
}
}
return false
}
// EvictNS evicts 'cache node' with the given namespace. This will
// simply call Cacher.EvictNS.
func (r *Cache) EvictNS(ns uint64) {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return
}
if r.cacher != nil {
r.cacher.EvictNS(ns)
}
}
// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
func (r *Cache) EvictAll() {
r.mu.RLock()
defer r.mu.RUnlock()
if r.closed {
return
}
if r.cacher != nil {
r.cacher.EvictAll()
}
}
// Close closes the 'cache map' and forcefully releases all 'cache node'.
func (r *Cache) Close() error {
r.mu.Lock()
if !r.closed {
r.closed = true
h := (*mNode)(r.mHead)
h.initBuckets()
for i := range h.buckets {
b := (*mBucket)(h.buckets[i])
for _, n := range b.node {
// Call releaser.
if n.value != nil {
if r, ok := n.value.(util.Releaser); ok {
r.Release()
}
n.value = nil
}
// Call OnDel.
for _, f := range n.onDel {
f()
}
n.onDel = nil
}
}
}
r.mu.Unlock()
// Avoid deadlock.
if r.cacher != nil {
if err := r.cacher.Close(); err != nil {
return err
}
}
return nil
}
// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
// unlike Close it doesn't forcefully releases 'cache node'.
func (r *Cache) CloseWeak() error {
r.mu.Lock()
if !r.closed {
r.closed = true
}
r.mu.Unlock()
// Avoid deadlock.
if r.cacher != nil {
r.cacher.EvictAll()
if err := r.cacher.Close(); err != nil {
return err
}
}
return nil
}
// Node is a 'cache node'.
type Node struct {
r *Cache
hash uint32
ns, key uint64
mu sync.Mutex
size int
value Value
ref int32
onDel []func()
CacheData unsafe.Pointer
}
// NS returns this 'cache node' namespace.
func (n *Node) NS() uint64 {
return n.ns
}
// Key returns this 'cache node' key.
func (n *Node) Key() uint64 {
return n.key
}
// Size returns this 'cache node' size.
func (n *Node) Size() int {
return n.size
}
// Value returns this 'cache node' value.
func (n *Node) Value() Value {
return n.value
}
// Ref returns this 'cache node' ref counter.
func (n *Node) Ref() int32 {
return atomic.LoadInt32(&n.ref)
}
// GetHandle returns an handle for this 'cache node'.
func (n *Node) GetHandle() *Handle {
if atomic.AddInt32(&n.ref, 1) <= 1 {
panic("BUG: Node.GetHandle on zero ref")
}
return &Handle{unsafe.Pointer(n)}
}
func (n *Node) unref() {
if atomic.AddInt32(&n.ref, -1) == 0 {
n.r.delete(n)
}
}
func (n *Node) unrefLocked() {
if atomic.AddInt32(&n.ref, -1) == 0 {
n.r.mu.RLock()
if !n.r.closed {
n.r.delete(n)
}
n.r.mu.RUnlock()
}
}
// Handle is a 'cache handle' of a 'cache node'.
type Handle struct {
n unsafe.Pointer // *Node
}
// Value returns the value of the 'cache node'.
func (h *Handle) Value() Value {
n := (*Node)(atomic.LoadPointer(&h.n))
if n != nil {
return n.value
}
return nil
}
// Release releases this 'cache handle'.
// It is safe to call release multiple times.
func (h *Handle) Release() {
nPtr := atomic.LoadPointer(&h.n)
if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
n := (*Node)(nPtr)
n.unrefLocked()
}
}
func murmur32(ns, key uint64, seed uint32) uint32 {
const (
m = uint32(0x5bd1e995)
r = 24
)
k1 := uint32(ns >> 32)
k2 := uint32(ns)
k3 := uint32(key >> 32)
k4 := uint32(key)
k1 *= m
k1 ^= k1 >> r
k1 *= m
k2 *= m
k2 ^= k2 >> r
k2 *= m
k3 *= m
k3 ^= k3 >> r
k3 *= m
k4 *= m
k4 ^= k4 >> r
k4 *= m
h := seed
h *= m
h ^= k1
h *= m
h ^= k2
h *= m
h ^= k3
h *= m
h ^= k4
h ^= h >> 13
h *= m
h ^= h >> 15
return h
}

Some files were not shown because too many files have changed in this diff Show More