diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e3b906d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,10 @@
+zabov
+killfile
+killfile/*
+db/zabov.db
+binaries
+binaries/*
+build.sh
+.vscode
+.vscode/*
+
diff --git a/00.database.go b/00.database.go
new file mode 100644
index 0000000..3f5e82a
--- /dev/null
+++ b/00.database.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/syndtr/goleveldb/leveldb"
+)
+
+//MyZabovKDB is the storage where we'll put domains to block
+var MyZabovKDB *leveldb.DB
+
+//MyZabovCDB is the storage where we'll put domains to cache
+var MyZabovCDB *leveldb.DB
+
+func init() {
+
+ var err error
+
+ os.RemoveAll("./db")
+
+ os.MkdirAll("./db", 0755)
+
+ MyZabovKDB, err = leveldb.OpenFile("./db/killfile", nil)
+ if err != nil {
+ fmt.Println("Cannot create Killfile db: ", err.Error())
+ } else {
+ fmt.Println("Killfile DB created")
+ }
+
+ MyZabovCDB, err = leveldb.OpenFile("./db/cache", nil)
+ if err != nil {
+ fmt.Println("Cannot create Cache db: ", err.Error())
+ } else {
+ fmt.Println("Cache DB created")
+ }
+
+}
diff --git a/00.memory.go b/00.memory.go
new file mode 100644
index 0000000..762ac9f
--- /dev/null
+++ b/00.memory.go
@@ -0,0 +1,26 @@
+package main
+
+import (
+ "fmt"
+ "runtime"
+ "time"
+)
+
+func init() {
+
+ fmt.Println("Garbage Collector Thread Starting")
+
+ go memoryCleanerThread()
+
+}
+
+func memoryCleanerThread() {
+
+ for {
+ time.Sleep(10 * time.Minute)
+ fmt.Println("Time to clean memory...")
+ runtime.GC()
+ fmt.Println("Garbage Collection done.")
+ }
+
+}
diff --git a/01.conf.go b/01.conf.go
new file mode 100644
index 0000000..45d1cf8
--- /dev/null
+++ b/01.conf.go
@@ -0,0 +1,69 @@
+package main
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+
+ "github.com/miekg/dns"
+)
+
+func init() {
+
+ //ZabovConf describes the Json we use for configuration
+ type ZabovConf struct {
+ Zabov struct {
+ Port string `json:"port"`
+ Proto string `json:"proto"`
+ Ipaddr string `json:"ipaddr"`
+ Upstream string `json:"upstream"`
+ Cachettl int `json:"cachettl"`
+ Killfilettl int `json:"killfilettl"`
+ Singlefilters string `json:"singlefilters"`
+ Doublefilters string `json:"doublefilters"`
+ Blackholeip string `json:"blackholeip"`
+ Hostsfile string `json:"hostsfile"`
+ } `json:"zabov"`
+ }
+
+ var MyConf ZabovConf
+
+ file, err := ioutil.ReadFile("config.json")
+
+ if err != nil {
+ log.Println("Cannot open config file", err.Error())
+ os.Exit(1)
+ }
+
+ err = json.Unmarshal([]byte(file), &MyConf)
+
+ if err != nil {
+ log.Println("Cannot marshal json: ", err.Error())
+ os.Exit(1)
+ }
+
+ // now we read configuration file
+ fmt.Println("Reading configuration file...")
+
+ ZabovPort := MyConf.Zabov.Port
+ ZabovType := MyConf.Zabov.Proto
+ ZabovAddr := MyConf.Zabov.Ipaddr
+ ZabovUpDNS = MyConf.Zabov.Upstream
+ ZabovSingleBL = MyConf.Zabov.Singlefilters
+ ZabovDoubleBL = MyConf.Zabov.Doublefilters
+ ZabovAddBL = MyConf.Zabov.Blackholeip
+ ZabovCacheTTL = MyConf.Zabov.Cachettl
+ ZabovKillTTL = MyConf.Zabov.Killfilettl
+ ZabovHostsFile = MyConf.Zabov.Hostsfile
+
+ zabovString := ZabovAddr + ":" + ZabovPort
+
+ MyDNS = new(dns.Server)
+ MyDNS.Addr = zabovString
+ MyDNS.Net = ZabovType
+
+ ZabovDNSArray = fileByLines(ZabovUpDNS)
+
+}
diff --git a/01.dnscheck.go b/01.dnscheck.go
new file mode 100644
index 0000000..b8579ad
--- /dev/null
+++ b/01.dnscheck.go
@@ -0,0 +1,37 @@
+package main
+
+import (
+ "fmt"
+ "net/http"
+ "time"
+)
+
+//NetworkUp tells the system if the network is up or not
+var NetworkUp bool
+
+func checkNetworkUp() bool {
+ // RFC2606 test domain, should always work, unless internet is down.
+ _, err := http.Get("http://example.com")
+ if err != nil {
+ return false
+ }
+ return true
+}
+
+func checkNetworkUpThread() {
+
+ ticker := time.NewTicker(2 * time.Minute)
+
+ for range ticker.C {
+ NetworkUp = checkNetworkUp()
+ }
+
+}
+
+func init() {
+
+ fmt.Println("Network Checker starting....")
+
+ go checkNetworkUpThread()
+
+}
diff --git a/01.killfile.go b/01.killfile.go
new file mode 100644
index 0000000..b5d914b
--- /dev/null
+++ b/01.killfile.go
@@ -0,0 +1,79 @@
+package main
+
+import (
+ "fmt"
+ "strings"
+)
+
+var zabovKbucket = []byte("killfile")
+
+type killfileItem struct {
+ Kdomain string
+ Ksource string
+}
+
+var bChannel chan killfileItem
+
+func init() {
+
+ bChannel = make(chan killfileItem, 1024)
+ fmt.Println("Initializing kill channel engine.")
+
+ go bWriteThread()
+
+}
+
+func bWriteThread() {
+
+ for item := range bChannel {
+
+ writeInKillfile(item.Kdomain, item.Ksource)
+ incrementStats("BL domains from "+item.Ksource, 1)
+ incrementStats("TOTAL", 1)
+
+ }
+
+}
+
+//DomainKill stores a domain name inside the killfile
+func DomainKill(s, durl string) {
+
+ if len(s) > 2 {
+
+ s = strings.ToLower(s)
+
+ var k killfileItem
+
+ k.Kdomain = s
+ k.Ksource = durl
+
+ bChannel <- k
+
+ }
+
+}
+
+func writeInKillfile(key, value string) {
+
+ stK := []byte(key)
+ stV := []byte(value)
+
+ err := MyZabovKDB.Put(stK, stV, nil)
+ if err != nil {
+ fmt.Println("Cannot write to Killfile DB: ", err.Error())
+ }
+
+}
+
+func domainInKillfile(domain string) bool {
+
+ s := strings.ToLower(domain)
+
+ has, err := MyZabovKDB.Has([]byte(s), nil)
+ if err != nil {
+ fmt.Println("Cannot read from Killfile DB: ", err.Error())
+ }
+
+ return has
+
+}
diff --git a/01.stats.go b/01.stats.go
new file mode 100644
index 0000000..61e2584
--- /dev/null
+++ b/01.stats.go
@@ -0,0 +1,100 @@
+package main
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+type send struct {
+ Payload string
+ Number int64
+ Operation string
+}
+
+//ZabovStats is used to keep statistics to print
+var ZabovStats map[string]int64
+
+var stats chan send
+
+func init() {
+
+ stats = make(chan send, 1024)
+
+ ZabovStats = make(map[string]int64)
+
+ fmt.Println("Initializing stats engine.")
+ go reportPrintThread()
+ go statsThread()
+}
+
+func statsPrint() {
+ fmt.Println()
+ stat, _ := json.Marshal(ZabovStats)
+ fmt.Println(jsonPrettyPrint(string(stat)))
+ fmt.Println()
+}
+
+func incrementStats(key string, value int64) {
+
+ var s send
+
+ s.Payload = key
+ s.Number = value
+ s.Operation = "INC"
+
+ stats <- s
+
+}
+
+func setstatsvalue(key string, value int64) {
+
+ var s send
+
+ s.Payload = key
+ s.Number = value
+ s.Operation = "SET"
+
+ stats <- s
+
+}
+
+func reportPrintThread() {
+ for {
+ var s send
+ s.Operation = "PRI"
+ s.Payload = "-"
+ s.Number = 0
+ stats <- s
+ time.Sleep(2 * time.Minute)
+ }
+}
+
+func statsThread() {
+
+ fmt.Println("Starting Statistical Collection Thread")
+
+ for item := range stats {
+
+ switch item.Operation {
+ case "INC":
+ ZabovStats[item.Payload] += item.Number
+ case "SET":
+ ZabovStats[item.Payload] = item.Number
+ case "PRI":
+ statsPrint()
+ }
+
+ }
+
+}
+
+func jsonPrettyPrint(in string) string {
+ var out bytes.Buffer
+ err := json.Indent(&out, []byte(in), "", "\t")
+ if err != nil {
+ return in
+ }
+ return out.String()
+}
diff --git a/02.cache.go b/02.cache.go
new file mode 100644
index 0000000..7c4e1b5
--- /dev/null
+++ b/02.cache.go
@@ -0,0 +1,104 @@
+package main
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "time"
+
+ "github.com/miekg/dns"
+)
+
+
+
+type cacheItem struct {
+ Query []byte
+ Date time.Time
+}
+
+//DomainCache stores a domain name inside the cache
+func DomainCache(s string, resp *dns.Msg) {
+
+ var domain2cache cacheItem
+ var err error
+ var dom2 bytes.Buffer
+ enc := gob.NewEncoder(&dom2)
+
+ domain2cache.Query, err = resp.Pack()
+ if err != nil {
+ fmt.Println("Problems packing the response: ", err.Error())
+ }
+ domain2cache.Date = time.Now()
+
+ err = enc.Encode(domain2cache)
+
+ if err != nil {
+ fmt.Println("Cannot GOB the domain to cache: ", err.Error())
+ }
+
+ cacheDomain(s, dom2.Bytes())
+
+}
+
+func cacheDomain(key string, domain []byte) {
+
+ err := MyZabovCDB.Put([]byte(key), domain, nil)
+ if err != nil {
+ fmt.Println("Cannot write to Cache DB: ", err.Error())
+ }
+
+}
+
+//GetDomainFromCache stores a domain name inside the cache
+func GetDomainFromCache(s string) *dns.Msg {
+
+ ret := new(dns.Msg)
+ var cache bytes.Buffer
+ dec := gob.NewDecoder(&cache)
+ var record cacheItem
+ var conf []byte
+ var errDB error
+
+ if domainInCache(s) == false {
+ return nil
+ }
+
+ conf, errDB = MyZabovCDB.Get([]byte(s), nil)
+ if errDB != nil {
+ fmt.Println("Cant READ DB :" , errDB.Error() )
+ return nil
+ }
+
+ cache.Write(conf)
+
+ err := dec.Decode(&record)
+ if err != nil {
+ fmt.Println("Decode error :", err.Error())
+ return nil
+ }
+
+ if time.Since(record.Date) > (time.Duration(ZabovCacheTTL) * time.Hour) {
+ return nil
+ }
+
+ err = ret.Unpack(record.Query)
+ if err != nil {
+ fmt.Println("Problem unpacking response: ", err.Error())
+ return nil
+ }
+
+ return ret
+
+}
+
+func domainInCache(domain string) bool {
+
+ has, err := MyZabovCDB.Has([]byte(domain), nil)
+ if err != nil {
+ fmt.Println("Cannot search Cache DB: ", err.Error())
+ return false
+ }
+
+ return has
+
+}
diff --git a/Dockerfile.amd64 b/Dockerfile.amd64
new file mode 100644
index 0000000..0632b7b
--- /dev/null
+++ b/Dockerfile.amd64
@@ -0,0 +1,18 @@
+FROM golang:1.14.1 AS builder
+RUN apt install git -y
+RUN mkdir -p /go/src/zabov
+RUN git clone https://git.keinpfusch.net/loweel/zabov /go/src/zabov
+WORKDIR /go/src/zabov
+ENV GO111MODULE=auto
+RUN go get ; go build -mod=vendor
+
+FROM debian:latest
+RUN apt update
+RUN apt upgrade -y
+RUN apt install ca-certificates -y
+RUN mkdir -p /opt/zabov
+WORKDIR /opt/zabov
+COPY --from=builder /go/src/zabov /opt/zabov
+EXPOSE 53/udp
+ENTRYPOINT ["/opt/zabov/zabov"]
+
diff --git a/Dockerfile.arm32v7 b/Dockerfile.arm32v7
new file mode 100644
index 0000000..606e3d4
--- /dev/null
+++ b/Dockerfile.arm32v7
@@ -0,0 +1,17 @@
+FROM arm32v7/golang:1.14.1 AS builder
+RUN apt install git -y
+RUN mkdir -p /go/src/zabov
+RUN git clone https://git.keinpfusch.net/loweel/zabov /go/src/zabov
+WORKDIR /go/src/zabov
+ENV GO111MODULE=auto
+RUN go get ; go build -mod=vendor
+
+FROM arm32v7/debian:latest
+RUN apt update
+RUN apt upgrade -y
+RUN apt install ca-certificates -y
+RUN mkdir -p /opt/zabov
+WORKDIR /opt/zabov
+COPY --from=builder /go/src/zabov /opt/zabov
+EXPOSE 53/udp
+ENTRYPOINT ["/opt/zabov/zabov"]
diff --git a/Dockerfile.arm64v8 b/Dockerfile.arm64v8
new file mode 100644
index 0000000..aa716eb
--- /dev/null
+++ b/Dockerfile.arm64v8
@@ -0,0 +1,17 @@
+FROM arm64v8/golang:1.14.1 AS builder
+RUN apt install git -y
+RUN mkdir -p /go/src/zabov
+RUN git clone https://git.keinpfusch.net/loweel/zabov /go/src/zabov
+WORKDIR /go/src/zabov
+ENV GO111MODULE=auto
+RUN go get ; go build -mod=vendor
+
+FROM arm64v8/debian:latest
+RUN apt update
+RUN apt upgrade -y
+RUN apt install ca-certificates -y
+RUN mkdir -p /opt/zabov
+WORKDIR /opt/zabov
+COPY --from=builder /go/src/zabov /opt/zabov
+EXPOSE 53/udp
+ENTRYPOINT ["/opt/zabov/zabov"]
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..fd3b63f
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,14 @@
+ Copyright (C) 2020 loweel@keinpfusch.net
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..6a84604
--- /dev/null
+++ b/README.md
@@ -0,0 +1,91 @@
+# zabov
+
+Tiny replacement for piHole DNS filter
+
+Still Work in progress, usable.
+
+Idea is to produce a very simple, no-web-interface , IP DNS blocker.
+
+# INSTALL
+
+Zabov requires golang 1.13 or later.
+
+
+git clone https://git.keinpfusch.net/Loweel/zabov.git
+cd zabov
+go get
+go build -mod=vendor
+
+
+Then, edit config.json: please notice config.json must be in the same folder of the executable you run.
+
+
+Just a few words about "singlefilters" and "doublefilters":
+
+Data must be downloaded from URLs of blacklist mantainers.They may come in different formats.
+
+There are two kinds of blacklists:
+
+One is the format zabov calls "singlefilter", where we find a single column , full of domains:
+
+
+domain1.com
+domain2.com
+domain3.com
+
+
+The second is the format zabov calls "doublefilter" (a file in "/etc/hosts" format, to be precise), where there is an IP, usually localhost or 0.0.0.0 and then the domain:
+
+
+127.0.0.1 domain1.com
+127.0.0.1 domain2.com
+127.0.0.1 domain3.com
+
+
+This is why configuration file has two separated items.
+
+The config file should look like:
+
+
+{
+ "zabov": {
+ "port":"53",
+ "proto":"udp",
+ "ipaddr":"127.0.0.1",
+ "upstream":"./dns-upstream.txt",
+ "cachettl": "4",
+ "killfilettl": "12",
+ "singlefilters":"./urls-hosts.txt" ,
+ "doublefilters":"./urls-domains.txt",
+ "blackholeip":"127.0.0.1",
+ "hostsfile":"./urls-local.txt"
+ }
+
+}
+
+
+
+
+
+Where:
+
+- port is the port number. Usually is 53, you can change for docker, if you like
+- proto is the protocol. Choices are "udp", "tcp", "tcp/udp"
+- ipaddr is the port to listen to. Maybe empty, (which will result in listening to 0.0.0.0) to avoid issues with docker.
+- upstream: file containing all DNS we want to query : each line in format IP:PORT
+- cachettl: amount of time the cache is kept (in hours)
+- killfilettl: refresh time for _killfiles_
+- singlefilters: name of the file for blacklists following the "singlefilter" schema.(one URL per line)
+- doublefilters: name of the file, for blacklists following the "doublefilter" schema.(one URL per line)
+- blackholeip: IP address to return when the IP is banned. This is because you may want to avoid MX issues, mail loops on localhost, or you have a web server running on localhost
+- hostsfile: path where you keep your local blacklistfile : this is in the format "singlefilter", meaning one domain per line, unlike hosts file.
+
+# DOCKER
+Multistage Dockerfiles are provided for AMD64, ARMv7, ARM64V8
+
+# TODO:
+
+- ~~caching~~
+- monitoring port
+
+
diff --git a/adlist_hosts.go b/adlist_hosts.go
new file mode 100644
index 0000000..5e9150d
--- /dev/null
+++ b/adlist_hosts.go
@@ -0,0 +1,96 @@
+package main
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+
+ "strings"
+ "time"
+)
+
+func init() {
+ go downloadDoubleThread()
+}
+
+//DoubleIndexFilter puts the domains inside file
+func DoubleIndexFilter(durl string) error {
+
+ fmt.Println("Retrieving HostFile from: ", durl)
+
+ var err error
+
+ // Get the data
+ resp, err := http.Get(durl)
+ if err != nil {
+ fmt.Println("HTTP problem: ", err)
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == 200 { // OK
+ fmt.Println(durl + " Response: OK")
+ } else {
+ fmt.Println("Server <"+durl+"> returned status code: ", resp.StatusCode)
+ return errors.New("Server <" + durl + "> returned status code: " + resp.Status)
+ }
+
+ scanner := bufio.NewScanner(resp.Body)
+ splitter := func(c rune) bool {
+ return c == ' ' || c == '\t'
+ }
+
+ var numLines int64
+
+ for scanner.Scan() {
+
+ line := scanner.Text()
+
+ h := strings.FieldsFunc(line, splitter)
+
+ if h == nil {
+ continue
+ }
+
+ if len(h) < 2 {
+ continue
+ }
+
+ if net.ParseIP(h[0]) != nil {
+ DomainKill(h[1], durl)
+
+ // fmt.Println("MATCH: ", h[1])
+ numLines++
+ } else {
+ incrementStats("Malformed HostLines "+durl, 1)
+ // fmt.Println("Malformed line: <" + line + ">")
+ }
+
+ }
+
+ fmt.Println("Finished to parse: "+durl+" ,number of lines", numLines)
+
+ return err
+
+}
+
+func getDoubleFilters() {
+
+ s := fileByLines(ZabovDoubleBL)
+
+ for _, a := range s {
+ DoubleIndexFilter(a)
+ }
+
+}
+
+func downloadDoubleThread() {
+ fmt.Println("Starting updater of DOUBLE lists, each (hours):", ZabovKillTTL)
+ for {
+ getDoubleFilters()
+ time.Sleep(time.Duration(ZabovKillTTL) * time.Hour)
+ }
+
+}
diff --git a/adlist_single.go b/adlist_single.go
new file mode 100644
index 0000000..6945c5e
--- /dev/null
+++ b/adlist_single.go
@@ -0,0 +1,93 @@
+package main
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+)
+
+func init() {
+ go downloadThread()
+}
+
+//SingleIndexFilter puts the domains inside file
+func SingleIndexFilter(durl string) error {
+
+ fmt.Println("Retrieving DomainFile from: ", durl)
+
+ var err error
+
+ // Get the data
+ resp, err := http.Get(durl)
+ if err != nil {
+ fmt.Println("HTTP Problem: ", err)
+ return err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode == 200 { // OK
+ fmt.Println(durl + " Response: OK")
+ } else {
+ fmt.Println("Server <"+durl+"> returned status code: ", resp.StatusCode)
+ return errors.New("Server <" + durl + "> returned status code: " + resp.Status)
+ }
+
+ scanner := bufio.NewScanner(resp.Body)
+ splitter := func(c rune) bool {
+ return c == ' ' || c == '\t'
+ }
+
+ var numLines int64
+
+ for scanner.Scan() {
+
+ line := scanner.Text()
+
+ h := strings.FieldsFunc(line, splitter)
+
+ if h == nil {
+ continue
+ }
+
+ if len(h) < 1 {
+ continue
+ }
+
+ if !strings.Contains(h[0], "#") {
+ DomainKill(h[0], durl)
+ // fmt.Println("MATCH: ", h[1])
+ numLines++
+ } else {
+ incrementStats("Malformed DomainLines "+durl, 1)
+ // fmt.Println("Malformed line: <" + line + ">")
+ }
+
+ }
+
+ fmt.Println("Finished to parse: "+durl+" ,number of lines", numLines)
+
+ return err
+
+}
+
+func getSingleFilters() {
+
+ s := fileByLines(ZabovSingleBL)
+
+ for _, a := range s {
+ SingleIndexFilter(a)
+ }
+
+}
+
+func downloadThread() {
+ fmt.Println("Starting updater of SINGLE lists, each (hours): ", ZabovKillTTL)
+ for {
+ getSingleFilters()
+ time.Sleep(time.Duration(ZabovKillTTL) * time.Hour)
+ }
+
+}
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..79394d9
--- /dev/null
+++ b/config.json
@@ -0,0 +1,15 @@
+{
+ "zabov": {
+ "port":"53",
+ "proto":"udp",
+ "ipaddr":"0.0.0.0",
+ "upstream":"./dns-upstream.txt" ,
+ "cachettl": 1,
+ "killfilettl": 12,
+ "singlefilters":"./urls-domains.txt" ,
+ "doublefilters":"./urls-hosts.txt",
+ "blackholeip":"127.0.0.1",
+ "hostsfile":"./urls-local.txt"
+ }
+
+}
diff --git a/dns-upstream.txt b/dns-upstream.txt
new file mode 100644
index 0000000..83a6424
--- /dev/null
+++ b/dns-upstream.txt
@@ -0,0 +1,387 @@
+194.150.168.168:53
+194.25.0.68:53
+141.1.1.1:53
+213.239.204.35:53
+194.25.0.52:53
+212.211.132.4:53
+213.68.194.51:53
+195.243.214.4:53
+141.1.27.249:53
+80.237.197.14:53
+217.28.98.62:53
+82.96.64.2:53
+82.96.65.2:53
+194.25.0.60:53
+193.101.111.10:53
+193.101.111.20:53
+192.76.144.66:53
+217.69.169.25:53
+85.88.19.10:53
+85.88.19.11:53
+85.214.20.141:53
+194.169.239.10:53
+194.172.160.4:53
+212.102.225.2:53
+212.51.16.1:53
+212.51.17.1:53
+212.66.129.98:53
+212.89.130.180:53
+213.209.122.11:53
+213.23.108.129:53
+91.204.4.133:53
+84.200.69.80:53
+62.146.63.211:53
+212.77.178.83:53
+78.46.58.246:53
+85.214.102.25:53
+87.106.62.128:53
+81.20.87.84:53
+81.20.87.181:53
+217.5.159.227:53
+185.38.9.99:53
+62.146.25.130:53
+37.59.218.50:53
+212.184.191.193:53
+213.23.143.154:53
+178.32.187.10:53
+62.154.214.86:53
+85.214.151.164:53
+5.175.225.2:53
+193.105.38.142:53
+145.253.183.21:53
+178.15.146.43:53
+62.245.233.22:53
+212.224.71.71:53
+213.136.78.213:53
+80.156.196.196:53
+193.158.99.67:53
+194.95.75.230:53
+212.184.191.2:53
+213.138.38.22:53
+195.145.80.150:53
+139.18.25.34:53
+79.143.180.116:53
+213.240.172.200:53
+217.160.238.238:53
+213.61.185.238:53
+84.201.0.34:53
+82.194.105.219:53
+62.157.89.178:53
+46.189.26.123:53
+85.214.208.8:53
+87.239.128.130:53
+78.111.65.40:53
+85.214.69.126:53
+109.75.29.1:53
+80.81.19.226:53
+81.169.162.74:53
+217.14.164.35:53
+5.9.172.92:53
+62.225.102.180:53
+217.7.71.203:53
+217.6.71.61:53
+62.154.138.43:53
+146.0.38.140:53
+78.111.67.10:53
+217.6.110.20:53
+87.245.18.221:53
+62.225.66.19:53
+81.169.212.52:53
+178.162.205.123:53
+212.227.83.183:53
+139.18.25.33:53
+193.29.2.4:53
+212.91.246.11:53
+62.153.141.15:53
+148.251.120.228:53
+62.154.253.226:53
+194.25.218.2:53
+194.174.73.36:53
+62.245.226.182:53
+87.234.222.68:53
+194.25.169.130:53
+62.225.15.253:53
+176.94.20.4:53
+188.40.115.29:53
+188.40.115.22:53
+194.187.240.10:53
+80.150.109.197:53
+217.86.149.109:53
+91.208.193.1:53
+195.243.99.35:53
+62.225.102.177:53
+178.210.102.9:53
+80.228.113.125:53
+178.210.102.12:53
+130.255.121.9:53
+212.204.56.218:53
+37.59.218.151:53
+80.148.52.109:53
+194.30.174.222:53
+5.199.141.5:53
+94.135.173.22:53
+88.79.208.11:53
+141.16.180.9:53
+82.193.241.125:53
+212.8.216.37:53
+109.75.29.2:53
+78.46.17.82:53
+81.169.185.49:53
+217.244.13.14:53
+93.104.209.27:53
+79.143.182.174:53
+81.20.82.131:53
+213.136.68.181:53
+213.136.68.189:53
+193.107.145.233:53
+195.145.241.3:53
+80.242.182.182:53
+193.159.181.250:53
+195.243.124.75:53
+62.159.104.102:53
+92.222.202.244:53
+85.214.254.13:53
+85.114.128.115:53
+145.253.176.50:53
+217.7.63.1:53
+78.35.40.149:53
+81.169.187.253:53
+94.249.192.20:53
+85.214.43.157:53
+80.149.83.60:53
+178.210.102.225:53
+178.210.102.193:53
+62.154.236.126:53
+213.183.185.50:53
+212.60.229.242:53
+80.146.192.66:53
+79.133.62.62:53
+178.33.33.219:53
+62.245.225.225:53
+46.38.235.212:53
+213.136.88.31:53
+212.66.135.250:53
+194.231.138.26:53
+62.225.1.33:53
+80.148.34.131:53
+94.23.163.114:53
+80.64.189.94:53
+81.169.241.28:53
+212.38.26.132:53
+62.91.19.67:53
+87.239.128.25:53
+212.185.196.10:53
+89.221.2.171:53
+217.243.239.11:53
+213.136.69.214:53
+213.138.56.75:53
+212.122.52.11:53
+46.4.166.113:53
+77.37.30.12:53
+194.187.242.10:53
+188.40.132.212:53
+194.150.168.169:53
+85.25.105.193:53
+185.93.180.131:53
+109.234.249.10:53
+109.234.248.10:53
+138.201.120.250:53
+81.3.27.54:53
+78.46.231.161:53
+78.46.231.162:53
+212.51.16.197:53
+212.28.34.65:53
+148.251.24.48:53
+212.75.32.4:53
+91.103.112.150:53
+217.69.169.26:53
+195.63.103.144:53
+213.209.121.30:53
+88.79.149.4:53
+185.194.143.243:53
+46.182.19.48:53
+217.111.24.246:53
+62.96.37.74:53
+213.61.64.174:53
+213.61.65.226:53
+62.96.190.134:53
+217.111.123.166:53
+213.61.176.118:53
+185.216.33.82:53
+185.220.70.50:53
+188.138.57.95:53
+195.145.137.164:53
+195.167.223.164:53
+195.226.69.82:53
+195.37.174.194:53
+195.4.138.12:53
+195.63.61.189:53
+212.124.35.25:53
+212.184.191.100:53
+212.38.2.130:53
+212.51.16.193:53
+212.66.129.107:53
+212.8.216.41:53
+212.89.128.28:53
+213.133.116.14:53
+213.166.247.100:53
+217.243.173.82:53
+217.5.182.118:53
+217.7.80.40:53
+217.7.81.136:53
+217.9.50.199:53
+46.237.220.2:53
+5.189.179.105:53
+52.28.79.14:53
+52.29.2.17:53
+62.146.202.2:53
+62.146.2.48:53
+62.153.122.2:53
+62.153.237.200:53
+62.153.237.201:53
+62.154.139.99:53
+62.154.159.12:53
+62.154.159.5:53
+62.154.160.3:53
+62.209.40.75:53
+62.217.61.162:53
+62.245.225.55:53
+78.111.224.224:53
+78.111.226.226:53
+78.138.80.42:53
+80.149.112.139:53
+80.156.6.209:53
+80.190.209.218:53
+80.228.231.122:53
+80.228.231.48:53
+80.245.65.100:53
+81.14.182.169:53
+81.27.162.100:53
+83.97.23.178:53
+83.97.23.226:53
+84.16.240.43:53
+85.214.98.185:53
+89.19.228.52:53
+89.200.168.203:53
+91.217.86.4:53
+93.104.195.2:53
+94.247.43.254:53
+109.234.248.8:53
+131.220.20.199:53
+131.220.23.123:53
+144.76.173.169:53
+144.76.83.104:53
+148.251.92.241:53
+176.9.136.236:53
+195.10.195.195:53
+173.212.249.41:53
+85.214.41.155:53
+54.37.75.2:53
+194.55.13.75:53
+5.189.138.153:53
+159.69.51.18:53
+51.75.77.179:53
+138.201.169.84:53
+138.201.239.66:53
+138.68.106.109:53
+145.253.109.162:53
+159.69.68.181:53
+167.86.78.56:53
+173.212.208.116:53
+173.212.218.206:53
+173.212.219.129:53
+173.212.239.87:53
+173.212.242.89:53
+173.212.244.78:53
+173.249.41.233:53
+173.249.48.6:53
+176.9.233.171:53
+176.9.58.218:53
+178.162.199.27:53
+178.162.208.135:53
+178.238.230.127:53
+178.238.235.218:53
+18.195.121.224:53
+185.139.98.100:53
+185.40.135.11:53
+185.53.169.22:53
+185.90.131.194:53
+188.40.239.99:53
+188.68.35.145:53
+192.162.85.48:53
+193.159.232.5:53
+194.77.237.31:53
+194.77.253.32:53
+195.201.192.29:53
+195.202.52.30:53
+195.243.101.5:53
+207.180.203.42:53
+207.180.243.200:53
+207.180.247.212:53
+213.136.71.68:53
+213.136.77.39:53
+213.144.24.234:53
+217.147.96.210:53
+217.182.198.203:53
+217.6.131.248:53
+217.6.247.237:53
+217.6.64.5:53
+217.79.177.220:53
+46.163.119.155:53
+46.228.199.116:53
+5.175.26.208:53
+51.77.65.15:53
+5.189.133.151:53
+5.189.141.216:53
+5.189.186.154:53
+5.189.186.93:53
+5.189.187.34:53
+5.199.141.30:53
+5.45.96.220:53
+62.138.20.211:53
+62.144.82.252:53
+62.153.165.107:53
+62.153.201.91:53
+62.154.214.84:53
+62.157.242.85:53
+79.143.177.243:53
+79.143.183.45:53
+80.156.198.146:53
+80.156.6.206:53
+80.237.207.100:53
+80.82.223.94:53
+81.169.215.29:53
+81.169.223.126:53
+81.169.230.157:53
+81.20.80.79:53
+83.236.183.211:53
+84.16.240.224:53
+85.214.224.76:53
+85.214.238.190:53
+85.214.246.133:53
+85.214.62.160:53
+85.93.91.101:53
+87.106.63.208:53
+87.118.126.225:53
+88.198.37.146:53
+88.99.66.18:53
+89.163.150.209:53
+89.163.220.114:53
+89.19.236.152:53
+93.104.213.74:53
+93.186.196.137:53
+93.190.71.172:53
+94.177.246.221:53
+80.241.218.68:53
+172.105.81.90:53
+172.105.81.92:53
+84.200.70.40:53
+94.16.114.254:53
+93.90.207.192:53
+93.90.201.211:53
+144.91.68.146:53
+176.9.37.132:53
+176.9.93.198:53
+176.9.1.117:53
+144.91.115.47:53
+91.237.100.4:53
\ No newline at end of file
diff --git a/dns_client.go b/dns_client.go
new file mode 100644
index 0000000..6aa97a0
--- /dev/null
+++ b/dns_client.go
@@ -0,0 +1,98 @@
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "math/rand"
+ "strings"
+
+ "github.com/miekg/dns"
+)
+
+//ForwardQuery forwards the query to the upstream server
+//first server to answer wins
+func ForwardQuery(query *dns.Msg) *dns.Msg {
+
+ go incrementStats("ForwardQueries", 1)
+
+ r := new(dns.Msg)
+ r.SetReply(query)
+ r.Authoritative = true
+
+ fqdn := strings.TrimRight(query.Question[0].Name, ".")
+
+ lfqdn := fmt.Sprintf("%d", query.Question[0].Qtype) + "." + fqdn
+ if cached := GetDomainFromCache(lfqdn); cached != nil {
+ go incrementStats("CacheHit", 1)
+ cached.SetReply(query)
+ cached.Authoritative = true
+ return cached
+
+ }
+
+ c := new(dns.Client)
+
+ c.ReadTimeout = 500 * time.Millisecond
+ c.WriteTimeout = 500 * time.Millisecond
+
+ for {
+ // round robin with retry
+
+ if !NetworkUp {
+ time.Sleep(10 * time.Second)
+ go incrementStats("Network Problems ", 1)
+ continue
+ }
+
+ d := oneTimeDNS()
+
+ in, _, err := c.Exchange(query, d)
+ if err != nil {
+ fmt.Printf("Problem with DNS %s : %s\n", d, err.Error())
+ go incrementStats("DNS Problems "+d, 1)
+ continue
+ } else {
+ go incrementStats(d, 1)
+ in.SetReply(query)
+ in.Authoritative = true
+ go DomainCache(lfqdn, in)
+ return in
+
+ }
+
+ }
+
+}
+
+func init() {
+
+ fmt.Println("DNS client engine starting")
+ NetworkUp = checkNetworkUp()
+
+ if NetworkUp {
+ fmt.Println("[OK]: Network is UP")
+ } else {
+ fmt.Println("[KO] Network is DOWN: system will check again in 2 minutes")
+ }
+
+}
+
+func oneTimeDNS() (dns string) {
+
+ rand.Seed(time.Now().Unix())
+
+ upl := ZabovDNSArray
+
+ if len(upl) < 1 {
+ fmt.Println("No DNS defined, using default 127.0.0.53:53. Hope it works!")
+ return "127.0.0.53:53"
+ }
+
+ n := rand.Intn(128*len(upl)) % len(upl)
+
+ dns = upl[n]
+
+ return
+
+}
diff --git a/dns_handler.go b/dns_handler.go
new file mode 100644
index 0000000..9cf0fb1
--- /dev/null
+++ b/dns_handler.go
@@ -0,0 +1,44 @@
+package main
+
+import (
+ "net"
+ "strings"
+
+ "github.com/miekg/dns"
+)
+
+func (mydns *handler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
+ go incrementStats("TotalQueries", 1)
+
+ remIP, _, e := net.SplitHostPort(w.RemoteAddr().String())
+ if e != nil {
+ go incrementStats("CLIENT: "+remIP, 1)
+ }
+
+ msg := dns.Msg{}
+ msg.SetReply(r)
+
+ switch r.Question[0].Qtype {
+ case dns.TypeA:
+ msg.Authoritative = true
+ domain := msg.Question[0].Name
+ fqdn := strings.TrimRight(domain, ".")
+
+ if domainInKillfile(fqdn) {
+ go incrementStats("Killed", 1)
+
+ msg.Answer = append(msg.Answer, &dns.A{
+ Hdr: dns.RR_Header{Name: domain, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
+ A: net.ParseIP(ZabovAddBL),
+ })
+ } else {
+ ret := ForwardQuery(r)
+ w.WriteMsg(ret)
+ }
+ default:
+ ret := ForwardQuery(r)
+ w.WriteMsg(ret)
+ }
+ w.WriteMsg(&msg)
+
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..ea3296a
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,8 @@
+module zabov
+
+go 1.13
+
+require (
+ github.com/miekg/dns v1.1.27
+ github.com/syndtr/goleveldb v1.0.0
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..ddc346b
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,36 @@
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
+github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/hostfile.go b/hostfile.go
new file mode 100644
index 0000000..51e4508
--- /dev/null
+++ b/hostfile.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+)
+
+func init() {
+
+ fmt.Println("Ingesting local hosts file")
+ ingestLocalBlacklist()
+
+}
+
+func ingestLocalBlacklist() {
+
+ file, err := os.Open(ZabovHostsFile)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ d := scanner.Text()
+ DomainKill(d, ZabovHostsFile)
+ incrementStats("Blacklist", 1)
+
+ }
+
+ if err := scanner.Err(); err != nil {
+ fmt.Println(err.Error())
+ }
+
+}
+
+func fileByLines(filename string) (blurls []string) {
+
+ file, err := os.Open(filename)
+ if err != nil {
+ fmt.Println(err.Error())
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ d := scanner.Text()
+ blurls = append(blurls, d)
+
+ }
+
+ if err := scanner.Err(); err != nil {
+ fmt.Println(err.Error())
+ }
+
+ return
+
+}
diff --git a/main.go b/main.go
new file mode 100644
index 0000000..aaf1ebc
--- /dev/null
+++ b/main.go
@@ -0,0 +1,46 @@
+package main
+
+import (
+ "log"
+
+ "github.com/miekg/dns"
+)
+
+//MyDNS is my dns server
+var MyDNS *dns.Server
+
+//ZabovUpDNS keeps the name of upstream DNSs
+var ZabovUpDNS string
+
+//ZabovSingleBL list of urls returning a file with just names of domains
+var ZabovSingleBL string
+
+//ZabovDoubleBL list of urls returning a file with IPdomain
+var ZabovDoubleBL string
+
+//ZabovAddBL is the IP we want to send all the clients to. Usually is 127.0.0.1
+var ZabovAddBL string
+
+//ZabovCacheTTL is the amount of hours we cache records of DNS
+var ZabovCacheTTL int
+
+//ZabovKillTTL is the amount of hours we cache the killfile
+var ZabovKillTTL int
+
+//ZabovHostsFile is the file we use to keep our hosts
+var ZabovHostsFile string
+
+//ZabovDNSArray is the array containing all the DNS we mention
+var ZabovDNSArray []string
+
+type handler struct{}
+
+func main() {
+
+ MyDNS.Handler = &handler{}
+ if err := MyDNS.ListenAndServe(); err != nil {
+ log.Printf("Failed to set udp listener %s\n", err.Error())
+ } else {
+ log.Printf("Listener running \n")
+ }
+}
diff --git a/urls-domains.txt b/urls-domains.txt
new file mode 100644
index 0000000..579b04a
--- /dev/null
+++ b/urls-domains.txt
@@ -0,0 +1,37 @@
+https://mirror1.malwaredomains.com/files/justdomains
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/adaway.org/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/adblock-nocoin-list/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/adguard-simplified/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/anudeepnd-adservers/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-ad/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-malvertising/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-malware/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/disconnect.me-tracking/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/easylist/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/easyprivacy/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/eth-phishing-detect/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.2o7net/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.dead/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.risk/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/fademind-add.spam/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/kadhosts/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/malwaredomainlist.com/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/malwaredomains.com-immortaldomains/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/malwaredomains.com-justdomains/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/matomo.org-spammers/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/mitchellkrogza-badd-boyz-hosts/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/pgl.yoyo.org/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/ransomwaretracker.abuse.ch/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/someonewhocares.org/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/spam404.com/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/stevenblack/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/winhelp2002.mvps.org/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/zerodot1-coinblockerlists-browser/list.txt
+https://raw.githubusercontent.com/hectorm/hmirror/master/data/zeustracker.abuse.ch/list.txt
+https://raw.githubusercontent.com/CHEF-KOCH/Audio-fingerprint-pages/master/AudioFp.txt
+https://raw.githubusercontent.com/CHEF-KOCH/Canvas-fingerprinting-pages/master/Canvas.txt
+https://raw.githubusercontent.com/CHEF-KOCH/WebRTC-tracking/master/WebRTC.txt
+https://raw.githubusercontent.com/CHEF-KOCH/CKs-FilterList/master/Anti-Corp/hosts/NSABlocklist.txt
+https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-blocklist.txt
+https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt
+https://www.stopforumspam.com/downloads/toxic_domains_whole.txt
diff --git a/urls-hosts.txt b/urls-hosts.txt
new file mode 100644
index 0000000..5218eaa
--- /dev/null
+++ b/urls-hosts.txt
@@ -0,0 +1,12 @@
+http://sysctl.org/cameleon/hosts
+https://www.malwaredomainlist.com/hostslist/hosts.txt
+https://adaway.org/hosts.txt
+https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
+https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/fakenews/hosts
+https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/gambling/hosts
+https://someonewhocares.org/hosts/hosts
+https://getadhell.com/standard-package.txt
+https://raw.githubusercontent.com/hoshsadiq/adblock-nocoin-list/master/hosts.txt
+https://raw.githubusercontent.com/notracking/hosts-blocklists/master/hostnames.txt
+https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt
+https://raw.githubusercontent.com/anudeepND/blacklist/master/facebook.txt
\ No newline at end of file
diff --git a/urls-local.txt b/urls-local.txt
new file mode 100644
index 0000000..910276b
--- /dev/null
+++ b/urls-local.txt
@@ -0,0 +1,8 @@
+blc.vodafone.com
+gab.com
+gab.ai
+freespeechextremist.com
+neckbeard.xyz
+funkwhale.it
+social.byoblu.com
+
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml
new file mode 100644
index 0000000..f91e5c1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/.codecov.yml
@@ -0,0 +1,8 @@
+coverage:
+ status:
+ project:
+ default:
+ target: 40%
+ threshold: null
+ patch: false
+ changes: false
diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore
new file mode 100644
index 0000000..776cd95
--- /dev/null
+++ b/vendor/github.com/miekg/dns/.gitignore
@@ -0,0 +1,4 @@
+*.6
+tags
+test.out
+a.out
diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml
new file mode 100644
index 0000000..8eaa064
--- /dev/null
+++ b/vendor/github.com/miekg/dns/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+sudo: false
+
+go:
+ - "1.12.x"
+ - "1.13.x"
+ - tip
+
+env:
+ - GO111MODULE=on
+
+script:
+ - go generate ./... && test `git ls-files --modified | wc -l` = 0
+ - go test -race -v -bench=. -coverprofile=coverage.txt -covermode=atomic ./...
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS
new file mode 100644
index 0000000..1965683
--- /dev/null
+++ b/vendor/github.com/miekg/dns/AUTHORS
@@ -0,0 +1 @@
+Miek Gieben
diff --git a/vendor/github.com/miekg/dns/CODEOWNERS b/vendor/github.com/miekg/dns/CODEOWNERS
new file mode 100644
index 0000000..e091703
--- /dev/null
+++ b/vendor/github.com/miekg/dns/CODEOWNERS
@@ -0,0 +1 @@
+* @miekg @tmthrgd
diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS
new file mode 100644
index 0000000..5903779
--- /dev/null
+++ b/vendor/github.com/miekg/dns/CONTRIBUTORS
@@ -0,0 +1,10 @@
+Alex A. Skinner
+Andrew Tunnell-Jones
+Ask Bjørn Hansen
+Dave Cheney
+Dusty Wilson
+Marek Majkowski
+Peter van Dijk
+Omri Bahumi
+Alex Sergeyev
+James Hartig
diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT
new file mode 100644
index 0000000..35702b1
--- /dev/null
+++ b/vendor/github.com/miekg/dns/COPYRIGHT
@@ -0,0 +1,9 @@
+Copyright 2009 The Go Authors. All rights reserved. Use of this source code
+is governed by a BSD-style license that can be found in the LICENSE file.
+Extensions of the original work are copyright (c) 2011 Miek Gieben
+
+Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is
+governed by a BSD-style license that can be found in the LICENSE file.
+
+Copyright 2014 CloudFlare. All rights reserved. Use of this source code is
+governed by a BSD-style license that can be found in the LICENSE file.
diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE
new file mode 100644
index 0000000..55f12ab
--- /dev/null
+++ b/vendor/github.com/miekg/dns/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+As this is fork of the official Go code the same license applies.
+Extensions of the original work are copyright (c) 2011 Miek Gieben
diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz
new file mode 100644
index 0000000..dc158c4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/Makefile.fuzz
@@ -0,0 +1,33 @@
+# Makefile for fuzzing
+#
+# Use go-fuzz and needs the tools installed.
+# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/
+#
+# Installing go-fuzz:
+# $ make -f Makefile.fuzz get
+# Installs:
+# * github.com/dvyukov/go-fuzz/go-fuzz
+# * get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+all: build
+
+.PHONY: build
+build:
+ go-fuzz-build -tags fuzz github.com/miekg/dns
+
+.PHONY: build-newrr
+build-newrr:
+ go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns
+
+.PHONY: fuzz
+fuzz:
+ go-fuzz -bin=dns-fuzz.zip -workdir=fuzz
+
+.PHONY: get
+get:
+ go get github.com/dvyukov/go-fuzz/go-fuzz
+ go get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+.PHONY: clean
+clean:
+ rm *-fuzz.zip
diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release
new file mode 100644
index 0000000..8fb748e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/Makefile.release
@@ -0,0 +1,52 @@
+# Makefile for releasing.
+#
+# The release is controlled from version.go. The version found there is
+# used to tag the git repo, we're not building any artifects so there is nothing
+# to upload to github.
+#
+# * Up the version in version.go
+# * Run: make -f Makefile.release release
+# * will *commit* your change with 'Release $VERSION'
+# * push to github
+#
+
+define GO
+//+build ignore
+
+package main
+
+import (
+ "fmt"
+
+ "github.com/miekg/dns"
+)
+
+func main() {
+ fmt.Println(dns.Version.String())
+}
+endef
+
+$(file > version_release.go,$(GO))
+VERSION:=$(shell go run version_release.go)
+TAG="v$(VERSION)"
+
+all:
+ @echo Use the \'release\' target to start a release $(VERSION)
+ rm -f version_release.go
+
+.PHONY: release
+release: commit push
+ @echo Released $(VERSION)
+ rm -f version_release.go
+
+.PHONY: commit
+commit:
+ @echo Committing release $(VERSION)
+ git commit -am"Release $(VERSION)"
+ git tag $(TAG)
+
+.PHONY: push
+push:
+ @echo Pushing release $(VERSION) to master
+ git push --tags
+ git push
diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md
new file mode 100644
index 0000000..126fe62
--- /dev/null
+++ b/vendor/github.com/miekg/dns/README.md
@@ -0,0 +1,175 @@
+[](https://travis-ci.org/miekg/dns)
+[](https://codecov.io/github/miekg/dns?branch=master)
+[](https://goreportcard.com/report/miekg/dns)
+[](https://godoc.org/github.com/miekg/dns)
+
+# Alternative (more granular) approach to a DNS library
+
+> Less is more.
+
+Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types.
+It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there
+isn't a convenience function for it. Server side and client side programming is supported, i.e. you
+can build servers and resolvers with it.
+
+We try to keep the "master" branch as sane as possible and at the bleeding edge of standards,
+avoiding breaking changes wherever reasonable. We support the last two versions of Go.
+
+# Goals
+
+* KISS;
+* Fast;
+* Small API. If it's easy to code in Go, don't make a function for it.
+
+# Users
+
+A not-so-up-to-date-list-that-may-be-actually-current:
+
+* https://github.com/coredns/coredns
+* https://cloudflare.com
+* https://github.com/abh/geodns
+* http://www.statdns.com/
+* http://www.dnsinspect.com/
+* https://github.com/chuangbo/jianbing-dictionary-dns
+* http://www.dns-lg.com/
+* https://github.com/fcambus/rrda
+* https://github.com/kenshinx/godns
+* https://github.com/skynetservices/skydns
+* https://github.com/hashicorp/consul
+* https://github.com/DevelopersPL/godnsagent
+* https://github.com/duedil-ltd/discodns
+* https://github.com/StalkR/dns-reverse-proxy
+* https://github.com/tianon/rawdns
+* https://mesosphere.github.io/mesos-dns/
+* https://pulse.turbobytes.com/
+* https://github.com/fcambus/statzone
+* https://github.com/benschw/dns-clb-go
+* https://github.com/corny/dnscheck for
+* https://namesmith.io
+* https://github.com/miekg/unbound
+* https://github.com/miekg/exdns
+* https://dnslookup.org
+* https://github.com/looterz/grimd
+* https://github.com/phamhongviet/serf-dns
+* https://github.com/mehrdadrad/mylg
+* https://github.com/bamarni/dockness
+* https://github.com/fffaraz/microdns
+* http://kelda.io
+* https://github.com/ipdcode/hades
+* https://github.com/StackExchange/dnscontrol/
+* https://www.dnsperf.com/
+* https://dnssectest.net/
+* https://dns.apebits.com
+* https://github.com/oif/apex
+* https://github.com/jedisct1/dnscrypt-proxy
+* https://github.com/jedisct1/rpdns
+* https://github.com/xor-gate/sshfp
+* https://github.com/rs/dnstrace
+* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss))
+* https://github.com/semihalev/sdns
+* https://render.com
+* https://github.com/peterzen/goresolver
+* https://github.com/folbricht/routedns
+
+Send pull request if you want to be listed here.
+
+# Features
+
+* UDP/TCP queries, IPv4 and IPv6
+* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported
+* Fast
+* Server side programming (mimicking the net/http package)
+* Client side programming
+* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519
+* EDNS0, NSID, Cookies
+* AXFR/IXFR
+* TSIG, SIG(0)
+* DNS over TLS (DoT): encrypted connection between client and server over TCP
+* DNS name compression
+
+Have fun!
+
+Miek Gieben - 2010-2012 -
+DNS Authors 2012-
+
+# Building
+
+This library uses Go modules and uses semantic versioning. Building is done with the `go` tool, so
+the following should work:
+
+ go get github.com/miekg/dns
+ go build github.com/miekg/dns
+
+## Examples
+
+A short "how to use the API" is at the beginning of doc.go (this also will show when you call `godoc
+github.com/miekg/dns`).
+
+Example programs can be found in the `github.com/miekg/exdns` repository.
+
+## Supported RFCs
+
+*all of them*
+
+* 103{4,5} - DNS standard
+* 1348 - NSAP record (removed the record)
+* 1982 - Serial Arithmetic
+* 1876 - LOC record
+* 1995 - IXFR
+* 1996 - DNS notify
+* 2136 - DNS Update (dynamic updates)
+* 2181 - RRset definition - there is no RRset type though, just []RR
+* 2537 - RSAMD5 DNS keys
+* 2065 - DNSSEC (updated in later RFCs)
+* 2671 - EDNS record
+* 2782 - SRV record
+* 2845 - TSIG record
+* 2915 - NAPTR record
+* 2929 - DNS IANA Considerations
+* 3110 - RSASHA1 DNS keys
+* 3123 - APL record
+* 3225 - DO bit (DNSSEC OK)
+* 340{1,2,3} - NAPTR record
+* 3445 - Limiting the scope of (DNS)KEY
+* 3597 - Unknown RRs
+* 403{3,4,5} - DNSSEC + validation functions
+* 4255 - SSHFP record
+* 4343 - Case insensitivity
+* 4408 - SPF record
+* 4509 - SHA256 Hash in DS
+* 4592 - Wildcards in the DNS
+* 4635 - HMAC SHA TSIG
+* 4701 - DHCID
+* 4892 - id.server
+* 5001 - NSID
+* 5155 - NSEC3 record
+* 5205 - HIP record
+* 5702 - SHA2 in the DNS
+* 5936 - AXFR
+* 5966 - TCP implementation recommendations
+* 6605 - ECDSA
+* 6725 - IANA Registry Update
+* 6742 - ILNP DNS
+* 6840 - Clarifications and Implementation Notes for DNS Security
+* 6844 - CAA record
+* 6891 - EDNS0 update
+* 6895 - DNS IANA considerations
+* 6944 - DNSSEC DNSKEY Algorithm Status
+* 6975 - Algorithm Understanding in DNSSEC
+* 7043 - EUI48/EUI64 records
+* 7314 - DNS (EDNS) EXPIRE Option
+* 7477 - CSYNC RR
+* 7828 - edns-tcp-keepalive EDNS0 Option
+* 7553 - URI record
+* 7858 - DNS over TLS: Initiation and Performance Considerations
+* 7871 - EDNS0 Client Subnet
+* 7873 - Domain Name System (DNS) Cookies
+* 8080 - EdDSA for DNSSEC
+* 8499 - DNS Terminology
+
+## Loosely Based Upon
+
+* ldns -
+* NSD -
+* Net::DNS -
+* GRONG -
diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go
new file mode 100644
index 0000000..825617f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/acceptfunc.go
@@ -0,0 +1,61 @@
+package dns
+
+// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError.
+// It returns a MsgAcceptAction to indicate what should happen with the message.
+type MsgAcceptFunc func(dh Header) MsgAcceptAction
+
+// DefaultMsgAcceptFunc checks the request and will reject if:
+//
+// * isn't a request (don't respond in that case)
+//
+// * opcode isn't OpcodeQuery or OpcodeNotify
+//
+// * Zero bit isn't zero
+//
+// * has more than 1 question in the question section
+//
+// * has more than 1 RR in the Answer section
+//
+// * has more than 0 RRs in the Authority section
+//
+// * has more than 2 RRs in the Additional section
+//
+var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
+
+// MsgAcceptAction represents the action to be taken.
+type MsgAcceptAction int
+
+const (
+ MsgAccept MsgAcceptAction = iota // Accept the message
+ MsgReject // Reject the message with a RcodeFormatError
+ MsgIgnore // Ignore the error and send nothing back.
+ MsgRejectNotImplemented // Reject the message with a RcodeNotImplemented
+)
+
+func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
+ if isResponse := dh.Bits&_QR != 0; isResponse {
+ return MsgIgnore
+ }
+
+ // Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs.
+ opcode := int(dh.Bits>>11) & 0xF
+ if opcode != OpcodeQuery && opcode != OpcodeNotify {
+ return MsgRejectNotImplemented
+ }
+
+ if dh.Qdcount != 1 {
+ return MsgReject
+ }
+ // NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
+ if dh.Ancount > 1 {
+ return MsgReject
+ }
+ // IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
+ if dh.Nscount > 1 {
+ return MsgReject
+ }
+ if dh.Arcount > 2 {
+ return MsgReject
+ }
+ return MsgAccept
+}
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
new file mode 100644
index 0000000..db2761d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/client.go
@@ -0,0 +1,415 @@
+package dns
+
+// A client implementation.
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "time"
+)
+
+const (
+ dnsTimeout time.Duration = 2 * time.Second
+ tcpIdleTimeout time.Duration = 8 * time.Second
+)
+
+// A Conn represents a connection to a DNS server.
+type Conn struct {
+ net.Conn // a net.Conn holding the connection
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
+ tsigRequestMAC string
+}
+
+// A Client defines parameters for a DNS client.
+type Client struct {
+ Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
+ UDPSize uint16 // minimum receive buffer for UDP messages
+ TLSConfig *tls.Config // TLS connection configuration
+ Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
+ // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
+ // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
+ // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
+ Timeout time.Duration
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
+ TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
+ SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
+ group singleflight
+}
+
+// Exchange performs a synchronous UDP query. It sends the message m to the address
+// contained in a and waits for a reply. Exchange does not retry a failed query, nor
+// will it fall back to TCP in case of truncation.
+// See client.Exchange for more information on setting larger buffer sizes.
+func Exchange(m *Msg, a string) (r *Msg, err error) {
+ client := Client{Net: "udp"}
+ r, _, err = client.Exchange(m, a)
+ return r, err
+}
+
+func (c *Client) dialTimeout() time.Duration {
+ if c.Timeout != 0 {
+ return c.Timeout
+ }
+ if c.DialTimeout != 0 {
+ return c.DialTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) readTimeout() time.Duration {
+ if c.ReadTimeout != 0 {
+ return c.ReadTimeout
+ }
+ return dnsTimeout
+}
+
+func (c *Client) writeTimeout() time.Duration {
+ if c.WriteTimeout != 0 {
+ return c.WriteTimeout
+ }
+ return dnsTimeout
+}
+
+// Dial connects to the address on the named network.
+func (c *Client) Dial(address string) (conn *Conn, err error) {
+ // create a new dialer with the appropriate timeout
+ var d net.Dialer
+ if c.Dialer == nil {
+ d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())}
+ } else {
+ d = *c.Dialer
+ }
+
+ network := c.Net
+ if network == "" {
+ network = "udp"
+ }
+
+ useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls")
+
+ conn = new(Conn)
+ if useTLS {
+ network = strings.TrimSuffix(network, "-tls")
+
+ conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
+ } else {
+ conn.Conn, err = d.Dial(network, address)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// Exchange performs a synchronous query. It sends the message m to the address
+// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
+//
+// c := new(dns.Client)
+// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
+//
+// Exchange does not retry a failed query, nor will it fall back to TCP in
+// case of truncation.
+// It is up to the caller to create a message that allows for larger responses to be
+// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
+// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
+// of 512 bytes
+// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
+// attribute appropriately
+func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
+ if !c.SingleInflight {
+ return c.exchange(m, address)
+ }
+
+ q := m.Question[0]
+ key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
+ r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
+ return c.exchange(m, address)
+ })
+ if r != nil && shared {
+ r = r.Copy()
+ }
+
+ return r, rtt, err
+}
+
+func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ var co *Conn
+
+ co, err = c.Dial(a)
+
+ if err != nil {
+ return nil, 0, err
+ }
+ defer co.Close()
+
+ opt := m.IsEdns0()
+ // If EDNS0 is used use that for size.
+ if opt != nil && opt.UDPSize() >= MinMsgSize {
+ co.UDPSize = opt.UDPSize()
+ }
+ // Otherwise use the client's configured UDP size.
+ if opt == nil && c.UDPSize >= MinMsgSize {
+ co.UDPSize = c.UDPSize
+ }
+
+ co.TsigSecret = c.TsigSecret
+ t := time.Now()
+ // write with the appropriate write timeout
+ co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout())))
+ if err = co.WriteMsg(m); err != nil {
+ return nil, 0, err
+ }
+
+ co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ rtt = time.Since(t)
+ return r, rtt, err
+}
+
+// ReadMsg reads a message from the connection co.
+// If the received message contains a TSIG record the transaction signature
+// is verified. This method always tries to return the message, however if an
+// error is returned there are no guarantees that the returned message is a
+// valid representation of the packet read.
+func (co *Conn) ReadMsg() (*Msg, error) {
+ p, err := co.ReadMsgHeader(nil)
+ if err != nil {
+ return nil, err
+ }
+
+ m := new(Msg)
+ if err := m.Unpack(p); err != nil {
+ // If an error was returned, we still want to allow the user to use
+ // the message, but naively they can just check err if they don't want
+ // to use an erroneous message
+ return m, err
+ }
+ if t := m.IsTsig(); t != nil {
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
+ }
+ return m, err
+}
+
+// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil).
+// Returns message as a byte slice to be parsed with Msg.Unpack later on.
+// Note that error handling on the message body is not possible as only the header is parsed.
+func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
+ var (
+ p []byte
+ n int
+ err error
+ )
+
+ if _, ok := co.Conn.(net.PacketConn); ok {
+ if co.UDPSize > MinMsgSize {
+ p = make([]byte, co.UDPSize)
+ } else {
+ p = make([]byte, MinMsgSize)
+ }
+ n, err = co.Read(p)
+ } else {
+ var length uint16
+ if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+
+ p = make([]byte, length)
+ n, err = io.ReadFull(co.Conn, p)
+ }
+
+ if err != nil {
+ return nil, err
+ } else if n < headerSize {
+ return nil, ErrShortRead
+ }
+
+ p = p[:n]
+ if hdr != nil {
+ dh, _, err := unpackMsgHdr(p, 0)
+ if err != nil {
+ return nil, err
+ }
+ *hdr = dh
+ }
+ return p, err
+}
+
+// Read implements the net.Conn read method.
+func (co *Conn) Read(p []byte) (n int, err error) {
+ if co.Conn == nil {
+ return 0, ErrConnEmpty
+ }
+
+ if _, ok := co.Conn.(net.PacketConn); ok {
+ // UDP connection
+ return co.Conn.Read(p)
+ }
+
+ var length uint16
+ if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
+ return 0, err
+ }
+ if int(length) > len(p) {
+ return 0, io.ErrShortBuffer
+ }
+
+ return io.ReadFull(co.Conn, p[:length])
+}
+
+// WriteMsg sends a message through the connection co.
+// If the message m contains a TSIG record the transaction
+// signature is calculated.
+func (co *Conn) WriteMsg(m *Msg) (err error) {
+ var out []byte
+ if t := m.IsTsig(); t != nil {
+ mac := ""
+ if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false)
+ // Set for the next read, although only used in zone transfers
+ co.tsigRequestMAC = mac
+ } else {
+ out, err = m.Pack()
+ }
+ if err != nil {
+ return err
+ }
+ _, err = co.Write(out)
+ return err
+}
+
+// Write implements the net.Conn Write method.
+func (co *Conn) Write(p []byte) (int, error) {
+ if len(p) > MaxMsgSize {
+ return 0, &Error{err: "message too large"}
+ }
+
+ if _, ok := co.Conn.(net.PacketConn); ok {
+ return co.Conn.Write(p)
+ }
+
+ l := make([]byte, 2)
+ binary.BigEndian.PutUint16(l, uint16(len(p)))
+
+ n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
+ return int(n), err
+}
+
+// Return the appropriate timeout for a specific request
+func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
+ var requestTimeout time.Duration
+ if c.Timeout != 0 {
+ requestTimeout = c.Timeout
+ } else {
+ requestTimeout = timeout
+ }
+ // net.Dialer.Timeout has priority if smaller than the timeouts computed so
+ // far
+ if c.Dialer != nil && c.Dialer.Timeout != 0 {
+ if c.Dialer.Timeout < requestTimeout {
+ requestTimeout = c.Dialer.Timeout
+ }
+ }
+ return requestTimeout
+}
+
+// Dial connects to the address on the named network.
+func Dial(network, address string) (conn *Conn, err error) {
+ conn = new(Conn)
+ conn.Conn, err = net.Dial(network, address)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+// ExchangeContext performs a synchronous UDP query, like Exchange. It
+// additionally obeys deadlines from the passed Context.
+func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
+ client := Client{Net: "udp"}
+ r, _, err = client.ExchangeContext(ctx, m, a)
+ // ignorint rtt to leave the original ExchangeContext API unchanged, but
+ // this function will go away
+ return r, err
+}
+
+// ExchangeConn performs a synchronous query. It sends the message m via the connection
+// c and waits for a reply. The connection c is not closed by ExchangeConn.
+// Deprecated: This function is going away, but can easily be mimicked:
+//
+// co := &dns.Conn{Conn: c} // c is your net.Conn
+// co.WriteMsg(m)
+// in, _ := co.ReadMsg()
+// co.Close()
+//
+func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
+ println("dns: ExchangeConn: this function is deprecated")
+ co := new(Conn)
+ co.Conn = c
+ if err = co.WriteMsg(m); err != nil {
+ return nil, err
+ }
+ r, err = co.ReadMsg()
+ if err == nil && r.Id != m.Id {
+ err = ErrId
+ }
+ return r, err
+}
+
+// DialTimeout acts like Dial but takes a timeout.
+func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
+ client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
+ return client.Dial(address)
+}
+
+// DialWithTLS connects to the address on the named network with TLS.
+func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
+ if !strings.HasSuffix(network, "-tls") {
+ network += "-tls"
+ }
+ client := Client{Net: network, TLSConfig: tlsConfig}
+ return client.Dial(address)
+}
+
+// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
+func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
+ if !strings.HasSuffix(network, "-tls") {
+ network += "-tls"
+ }
+ client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
+ return client.Dial(address)
+}
+
+// ExchangeContext acts like Exchange, but honors the deadline on the provided
+// context, if present. If there is both a context deadline and a configured
+// timeout on the client, the earliest of the two takes effect.
+func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
+ var timeout time.Duration
+ if deadline, ok := ctx.Deadline(); !ok {
+ timeout = 0
+ } else {
+ timeout = time.Until(deadline)
+ }
+ // not passing the context to the underlying calls, as the API does not support
+ // context. For timeouts you should set up Client.Dialer and call Client.Exchange.
+ // TODO(tmthrgd,miekg): this is a race condition.
+ c.Dialer = &net.Dialer{Timeout: timeout}
+ return c.Exchange(m, a)
+}
diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go
new file mode 100644
index 0000000..e11b630
--- /dev/null
+++ b/vendor/github.com/miekg/dns/clientconfig.go
@@ -0,0 +1,135 @@
+package dns
+
+import (
+ "bufio"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// ClientConfig wraps the contents of the /etc/resolv.conf file.
+type ClientConfig struct {
+ Servers []string // servers to use
+ Search []string // suffixes to append to local name
+ Port string // what port to use
+ Ndots int // number of dots in name to trigger absolute lookup
+ Timeout int // seconds before giving up on packet
+ Attempts int // lost packets before giving up on server, not used in the package dns
+}
+
+// ClientConfigFromFile parses a resolv.conf(5) like file and returns
+// a *ClientConfig.
+func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
+ file, err := os.Open(resolvconf)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ return ClientConfigFromReader(file)
+}
+
+// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
+func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
+ c := new(ClientConfig)
+ scanner := bufio.NewScanner(resolvconf)
+ c.Servers = make([]string, 0)
+ c.Search = make([]string, 0)
+ c.Port = "53"
+ c.Ndots = 1
+ c.Timeout = 5
+ c.Attempts = 2
+
+ for scanner.Scan() {
+ if err := scanner.Err(); err != nil {
+ return nil, err
+ }
+ line := scanner.Text()
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ continue
+ }
+ switch f[0] {
+ case "nameserver": // add one name server
+ if len(f) > 1 {
+ // One more check: make sure server name is
+ // just an IP address. Otherwise we need DNS
+ // to look it up.
+ name := f[1]
+ c.Servers = append(c.Servers, name)
+ }
+
+ case "domain": // set search path to just this domain
+ if len(f) > 1 {
+ c.Search = make([]string, 1)
+ c.Search[0] = f[1]
+ } else {
+ c.Search = make([]string, 0)
+ }
+
+ case "search": // set search path to given servers
+ c.Search = append([]string(nil), f[1:]...)
+
+ case "options": // magic options
+ for _, s := range f[1:] {
+ switch {
+ case len(s) >= 6 && s[:6] == "ndots:":
+ n, _ := strconv.Atoi(s[6:])
+ if n < 0 {
+ n = 0
+ } else if n > 15 {
+ n = 15
+ }
+ c.Ndots = n
+ case len(s) >= 8 && s[:8] == "timeout:":
+ n, _ := strconv.Atoi(s[8:])
+ if n < 1 {
+ n = 1
+ }
+ c.Timeout = n
+ case len(s) >= 9 && s[:9] == "attempts:":
+ n, _ := strconv.Atoi(s[9:])
+ if n < 1 {
+ n = 1
+ }
+ c.Attempts = n
+ case s == "rotate":
+ /* not imp */
+ }
+ }
+ }
+ }
+ return c, nil
+}
+
+// NameList returns all of the names that should be queried based on the
+// config. It is based off of go's net/dns name building, but it does not
+// check the length of the resulting names.
+func (c *ClientConfig) NameList(name string) []string {
+ // if this domain is already fully qualified, no append needed.
+ if IsFqdn(name) {
+ return []string{name}
+ }
+
+ // Check to see if the name has more labels than Ndots. Do this before making
+ // the domain fully qualified.
+ hasNdots := CountLabel(name) > c.Ndots
+ // Make the domain fully qualified.
+ name = Fqdn(name)
+
+ // Make a list of names based off search.
+ names := []string{}
+
+ // If name has enough dots, try that first.
+ if hasNdots {
+ names = append(names, name)
+ }
+ for _, s := range c.Search {
+ names = append(names, Fqdn(name+s))
+ }
+ // If we didn't have enough dots, try after suffixes.
+ if !hasNdots {
+ names = append(names, name)
+ }
+ return names
+}
diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go
new file mode 100644
index 0000000..8c4a14e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dane.go
@@ -0,0 +1,43 @@
+package dns
+
+import (
+ "crypto/sha256"
+ "crypto/sha512"
+ "crypto/x509"
+ "encoding/hex"
+ "errors"
+)
+
+// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records.
+func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) {
+ switch matchingType {
+ case 0:
+ switch selector {
+ case 0:
+ return hex.EncodeToString(cert.Raw), nil
+ case 1:
+ return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil
+ }
+ case 1:
+ h := sha256.New()
+ switch selector {
+ case 0:
+ h.Write(cert.Raw)
+ return hex.EncodeToString(h.Sum(nil)), nil
+ case 1:
+ h.Write(cert.RawSubjectPublicKeyInfo)
+ return hex.EncodeToString(h.Sum(nil)), nil
+ }
+ case 2:
+ h := sha512.New()
+ switch selector {
+ case 0:
+ h.Write(cert.Raw)
+ return hex.EncodeToString(h.Sum(nil)), nil
+ case 1:
+ h.Write(cert.RawSubjectPublicKeyInfo)
+ return hex.EncodeToString(h.Sum(nil)), nil
+ }
+ }
+ return "", errors.New("dns: bad MatchingType or Selector")
+}
diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go
new file mode 100644
index 0000000..b059f6f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/defaults.go
@@ -0,0 +1,378 @@
+package dns
+
+import (
+ "errors"
+ "net"
+ "strconv"
+ "strings"
+)
+
+const hexDigit = "0123456789abcdef"
+
+// Everything is assumed in ClassINET.
+
+// SetReply creates a reply message from a request message.
+func (dns *Msg) SetReply(request *Msg) *Msg {
+ dns.Id = request.Id
+ dns.Response = true
+ dns.Opcode = request.Opcode
+ if dns.Opcode == OpcodeQuery {
+ dns.RecursionDesired = request.RecursionDesired // Copy rd bit
+ dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
+ }
+ dns.Rcode = RcodeSuccess
+ if len(request.Question) > 0 {
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = request.Question[0]
+ }
+ return dns
+}
+
+// SetQuestion creates a question message, it sets the Question
+// section, generates an Id and sets the RecursionDesired (RD)
+// bit to true.
+func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
+ dns.Id = Id()
+ dns.RecursionDesired = true
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, t, ClassINET}
+ return dns
+}
+
+// SetNotify creates a notify message, it sets the Question
+// section, generates an Id and sets the Authoritative (AA)
+// bit to true.
+func (dns *Msg) SetNotify(z string) *Msg {
+ dns.Opcode = OpcodeNotify
+ dns.Authoritative = true
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeSOA, ClassINET}
+ return dns
+}
+
+// SetRcode creates an error message suitable for the request.
+func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
+ dns.SetReply(request)
+ dns.Rcode = rcode
+ return dns
+}
+
+// SetRcodeFormatError creates a message with FormError set.
+func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
+ dns.Rcode = RcodeFormatError
+ dns.Opcode = OpcodeQuery
+ dns.Response = true
+ dns.Authoritative = false
+ dns.Id = request.Id
+ return dns
+}
+
+// SetUpdate makes the message a dynamic update message. It
+// sets the ZONE section to: z, TypeSOA, ClassINET.
+func (dns *Msg) SetUpdate(z string) *Msg {
+ dns.Id = Id()
+ dns.Response = false
+ dns.Opcode = OpcodeUpdate
+ dns.Compress = false // BIND9 cannot handle compression
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeSOA, ClassINET}
+ return dns
+}
+
+// SetIxfr creates message for requesting an IXFR.
+func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Ns = make([]RR, 1)
+ s := new(SOA)
+ s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
+ s.Serial = serial
+ s.Ns = ns
+ s.Mbox = mbox
+ dns.Question[0] = Question{z, TypeIXFR, ClassINET}
+ dns.Ns[0] = s
+ return dns
+}
+
+// SetAxfr creates message for requesting an AXFR.
+func (dns *Msg) SetAxfr(z string) *Msg {
+ dns.Id = Id()
+ dns.Question = make([]Question, 1)
+ dns.Question[0] = Question{z, TypeAXFR, ClassINET}
+ return dns
+}
+
+// SetTsig appends a TSIG RR to the message.
+// This is only a skeleton TSIG RR that is added as the last RR in the
+// additional section. The Tsig is calculated when the message is being send.
+func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg {
+ t := new(TSIG)
+ t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
+ t.Algorithm = algo
+ t.Fudge = fudge
+ t.TimeSigned = uint64(timesigned)
+ t.OrigId = dns.Id
+ dns.Extra = append(dns.Extra, t)
+ return dns
+}
+
+// SetEdns0 appends a EDNS0 OPT RR to the message.
+// TSIG should always the last RR in a message.
+func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
+ e := new(OPT)
+ e.Hdr.Name = "."
+ e.Hdr.Rrtype = TypeOPT
+ e.SetUDPSize(udpsize)
+ if do {
+ e.SetDo()
+ }
+ dns.Extra = append(dns.Extra, e)
+ return dns
+}
+
+// IsTsig checks if the message has a TSIG record as the last record
+// in the additional section. It returns the TSIG record found or nil.
+func (dns *Msg) IsTsig() *TSIG {
+ if len(dns.Extra) > 0 {
+ if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
+ return dns.Extra[len(dns.Extra)-1].(*TSIG)
+ }
+ }
+ return nil
+}
+
+// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
+// record in the additional section will do. It returns the OPT record
+// found or nil.
+func (dns *Msg) IsEdns0() *OPT {
+ // RFC 6891, Section 6.1.1 allows the OPT record to appear
+ // anywhere in the additional record section, but it's usually at
+ // the end so start there.
+ for i := len(dns.Extra) - 1; i >= 0; i-- {
+ if dns.Extra[i].Header().Rrtype == TypeOPT {
+ return dns.Extra[i].(*OPT)
+ }
+ }
+ return nil
+}
+
+// popEdns0 is like IsEdns0, but it removes the record from the message.
+func (dns *Msg) popEdns0() *OPT {
+ // RFC 6891, Section 6.1.1 allows the OPT record to appear
+ // anywhere in the additional record section, but it's usually at
+ // the end so start there.
+ for i := len(dns.Extra) - 1; i >= 0; i-- {
+ if dns.Extra[i].Header().Rrtype == TypeOPT {
+ opt := dns.Extra[i].(*OPT)
+ dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
+ return opt
+ }
+ }
+ return nil
+}
+
+// IsDomainName checks if s is a valid domain name, it returns the number of
+// labels and true, when a domain name is valid. Note that non fully qualified
+// domain name is considered valid, in this case the last label is counted in
+// the number of labels. When false is returned the number of labels is not
+// defined. Also note that this function is extremely liberal; almost any
+// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
+// label fits in 63 characters and that the entire name will fit into the 255
+// octet wire format limit.
+func IsDomainName(s string) (labels int, ok bool) {
+ // XXX: The logic in this function was copied from packDomainName and
+ // should be kept in sync with that function.
+
+ const lenmsg = 256
+
+ if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
+ return 0, false
+ }
+
+ s = Fqdn(s)
+
+ // Each dot ends a segment of the name. Except for escaped dots (\.), which
+ // are normal dots.
+
+ var (
+ off int
+ begin int
+ wasDot bool
+ )
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '\\':
+ if off+1 > lenmsg {
+ return labels, false
+ }
+
+ // check for \DDD
+ if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
+ i += 3
+ begin += 3
+ } else {
+ i++
+ begin++
+ }
+
+ wasDot = false
+ case '.':
+ if wasDot {
+ // two dots back to back is not legal
+ return labels, false
+ }
+ wasDot = true
+
+ labelLen := i - begin
+ if labelLen >= 1<<6 { // top two bits of length must be clear
+ return labels, false
+ }
+
+ // off can already (we're in a loop) be bigger than lenmsg
+ // this happens when a name isn't fully qualified
+ off += 1 + labelLen
+ if off > lenmsg {
+ return labels, false
+ }
+
+ labels++
+ begin = i + 1
+ default:
+ wasDot = false
+ }
+ }
+
+ return labels, true
+}
+
+// IsSubDomain checks if child is indeed a child of the parent. If child and parent
+// are the same domain true is returned as well.
+func IsSubDomain(parent, child string) bool {
+ // Entire child is contained in parent
+ return CompareDomainName(parent, child) == CountLabel(parent)
+}
+
+// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
+// The checking is performed on the binary payload.
+func IsMsg(buf []byte) error {
+ // Header
+ if len(buf) < headerSize {
+ return errors.New("dns: bad message header")
+ }
+ // Header: Opcode
+ // TODO(miek): more checks here, e.g. check all header bits.
+ return nil
+}
+
+// IsFqdn checks if a domain name is fully qualified.
+func IsFqdn(s string) bool {
+ s2 := strings.TrimSuffix(s, ".")
+ if s == s2 {
+ return false
+ }
+
+ i := strings.LastIndexFunc(s2, func(r rune) bool {
+ return r != '\\'
+ })
+
+ // Test whether we have an even number of escape sequences before
+ // the dot or none.
+ return (len(s2)-i)%2 != 0
+}
+
+// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
+// This means the RRs need to have the same type, name, and class. Returns true
+// if the RR set is valid, otherwise false.
+func IsRRset(rrset []RR) bool {
+ if len(rrset) == 0 {
+ return false
+ }
+ if len(rrset) == 1 {
+ return true
+ }
+ rrHeader := rrset[0].Header()
+ rrType := rrHeader.Rrtype
+ rrClass := rrHeader.Class
+ rrName := rrHeader.Name
+
+ for _, rr := range rrset[1:] {
+ curRRHeader := rr.Header()
+ if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
+ // Mismatch between the records, so this is not a valid rrset for
+ //signing/verifying
+ return false
+ }
+ }
+
+ return true
+}
+
+// Fqdn return the fully qualified domain name from s.
+// If s is already fully qualified, it behaves as the identity function.
+func Fqdn(s string) string {
+ if IsFqdn(s) {
+ return s
+ }
+ return s + "."
+}
+
+// Copied from the official Go code.
+
+// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
+// address suitable for reverse DNS (PTR) record lookups or an error if it fails
+// to parse the IP address.
+func ReverseAddr(addr string) (arpa string, err error) {
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ return "", &Error{err: "unrecognized address: " + addr}
+ }
+ if v4 := ip.To4(); v4 != nil {
+ buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
+ // Add it, in reverse, to the buffer
+ for i := len(v4) - 1; i >= 0; i-- {
+ buf = strconv.AppendInt(buf, int64(v4[i]), 10)
+ buf = append(buf, '.')
+ }
+ // Append "in-addr.arpa." and return (buf already has the final .)
+ buf = append(buf, "in-addr.arpa."...)
+ return string(buf), nil
+ }
+ // Must be IPv6
+ buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
+ // Add it, in reverse, to the buffer
+ for i := len(ip) - 1; i >= 0; i-- {
+ v := ip[i]
+ buf = append(buf, hexDigit[v&0xF])
+ buf = append(buf, '.')
+ buf = append(buf, hexDigit[v>>4])
+ buf = append(buf, '.')
+ }
+ // Append "ip6.arpa." and return (buf already has the final .)
+ buf = append(buf, "ip6.arpa."...)
+ return string(buf), nil
+}
+
+// String returns the string representation for the type t.
+func (t Type) String() string {
+ if t1, ok := TypeToString[uint16(t)]; ok {
+ return t1
+ }
+ return "TYPE" + strconv.Itoa(int(t))
+}
+
+// String returns the string representation for the class c.
+func (c Class) String() string {
+ if s, ok := ClassToString[uint16(c)]; ok {
+ // Only emit mnemonics when they are unambiguous, specically ANY is in both.
+ if _, ok := StringToType[s]; !ok {
+ return s
+ }
+ }
+ return "CLASS" + strconv.Itoa(int(c))
+}
+
+// String returns the string representation for the name n.
+func (n Name) String() string {
+ return sprintName(string(n))
+}
diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go
new file mode 100644
index 0000000..ad83a27
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dns.go
@@ -0,0 +1,134 @@
+package dns
+
+import "strconv"
+
+const (
+ year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
+ defaultTtl = 3600 // Default internal TTL.
+
+ // DefaultMsgSize is the standard default for messages larger than 512 bytes.
+ DefaultMsgSize = 4096
+ // MinMsgSize is the minimal size of a DNS packet.
+ MinMsgSize = 512
+ // MaxMsgSize is the largest possible DNS packet.
+ MaxMsgSize = 65535
+)
+
+// Error represents a DNS error.
+type Error struct{ err string }
+
+func (e *Error) Error() string {
+ if e == nil {
+ return "dns: "
+ }
+ return "dns: " + e.err
+}
+
+// An RR represents a resource record.
+type RR interface {
+ // Header returns the header of an resource record. The header contains
+ // everything up to the rdata.
+ Header() *RR_Header
+ // String returns the text representation of the resource record.
+ String() string
+
+ // copy returns a copy of the RR
+ copy() RR
+
+ // len returns the length (in octets) of the compressed or uncompressed RR in wire format.
+ //
+ // If compression is nil, the uncompressed size will be returned, otherwise the compressed
+ // size will be returned and domain names will be added to the map for future compression.
+ len(off int, compression map[string]struct{}) int
+
+ // pack packs the records RDATA into wire format. The header will
+ // already have been packed into msg.
+ pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
+
+ // unpack unpacks an RR from wire format.
+ //
+ // This will only be called on a new and empty RR type with only the header populated. It
+ // will only be called if the record's RDATA is non-empty.
+ unpack(msg []byte, off int) (off1 int, err error)
+
+ // parse parses an RR from zone file format.
+ //
+ // This will only be called on a new and empty RR type with only the header populated.
+ parse(c *zlexer, origin string) *ParseError
+
+ // isDuplicate returns whether the two RRs are duplicates.
+ isDuplicate(r2 RR) bool
+}
+
+// RR_Header is the header all DNS resource records share.
+type RR_Header struct {
+ Name string `dns:"cdomain-name"`
+ Rrtype uint16
+ Class uint16
+ Ttl uint32
+ Rdlength uint16 // Length of data after header.
+}
+
+// Header returns itself. This is here to make RR_Header implements the RR interface.
+func (h *RR_Header) Header() *RR_Header { return h }
+
+// Just to implement the RR interface.
+func (h *RR_Header) copy() RR { return nil }
+
+func (h *RR_Header) String() string {
+ var s string
+
+ if h.Rrtype == TypeOPT {
+ s = ";"
+ // and maybe other things
+ }
+
+ s += sprintName(h.Name) + "\t"
+ s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
+ s += Class(h.Class).String() + "\t"
+ s += Type(h.Rrtype).String() + "\t"
+ return s
+}
+
+func (h *RR_Header) len(off int, compression map[string]struct{}) int {
+ l := domainNameLen(h.Name, off, compression, true)
+ l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
+ return l
+}
+
+func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ // RR_Header has no RDATA to pack.
+ return off, nil
+}
+
+func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
+ panic("dns: internal error: unpack should never be called on RR_Header")
+}
+
+func (h *RR_Header) parse(c *zlexer, origin string) *ParseError {
+ panic("dns: internal error: parse should never be called on RR_Header")
+}
+
+// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
+func (rr *RFC3597) ToRFC3597(r RR) error {
+ buf := make([]byte, Len(r)*2)
+ headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
+ if err != nil {
+ return err
+ }
+ buf = buf[:off]
+
+ *rr = RFC3597{Hdr: *r.Header()}
+ rr.Hdr.Rdlength = uint16(off - headerEnd)
+
+ if noRdata(rr.Hdr) {
+ return nil
+ }
+
+ _, err = rr.unpack(buf, headerEnd)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go
new file mode 100644
index 0000000..12a693f
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec.go
@@ -0,0 +1,794 @@
+package dns
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ _ "crypto/md5"
+ "crypto/rand"
+ "crypto/rsa"
+ _ "crypto/sha1"
+ _ "crypto/sha256"
+ _ "crypto/sha512"
+ "encoding/asn1"
+ "encoding/binary"
+ "encoding/hex"
+ "math/big"
+ "sort"
+ "strings"
+ "time"
+
+ "golang.org/x/crypto/ed25519"
+)
+
+// DNSSEC encryption algorithm codes.
+const (
+ _ uint8 = iota
+ RSAMD5
+ DH
+ DSA
+ _ // Skip 4, RFC 6725, section 2.1
+ RSASHA1
+ DSANSEC3SHA1
+ RSASHA1NSEC3SHA1
+ RSASHA256
+ _ // Skip 9, RFC 6725, section 2.1
+ RSASHA512
+ _ // Skip 11, RFC 6725, section 2.1
+ ECCGOST
+ ECDSAP256SHA256
+ ECDSAP384SHA384
+ ED25519
+ ED448
+ INDIRECT uint8 = 252
+ PRIVATEDNS uint8 = 253 // Private (experimental keys)
+ PRIVATEOID uint8 = 254
+)
+
+// AlgorithmToString is a map of algorithm IDs to algorithm names.
+var AlgorithmToString = map[uint8]string{
+ RSAMD5: "RSAMD5",
+ DH: "DH",
+ DSA: "DSA",
+ RSASHA1: "RSASHA1",
+ DSANSEC3SHA1: "DSA-NSEC3-SHA1",
+ RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1",
+ RSASHA256: "RSASHA256",
+ RSASHA512: "RSASHA512",
+ ECCGOST: "ECC-GOST",
+ ECDSAP256SHA256: "ECDSAP256SHA256",
+ ECDSAP384SHA384: "ECDSAP384SHA384",
+ ED25519: "ED25519",
+ ED448: "ED448",
+ INDIRECT: "INDIRECT",
+ PRIVATEDNS: "PRIVATEDNS",
+ PRIVATEOID: "PRIVATEOID",
+}
+
+// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
+var AlgorithmToHash = map[uint8]crypto.Hash{
+ RSAMD5: crypto.MD5, // Deprecated in RFC 6725
+ DSA: crypto.SHA1,
+ RSASHA1: crypto.SHA1,
+ RSASHA1NSEC3SHA1: crypto.SHA1,
+ RSASHA256: crypto.SHA256,
+ ECDSAP256SHA256: crypto.SHA256,
+ ECDSAP384SHA384: crypto.SHA384,
+ RSASHA512: crypto.SHA512,
+ ED25519: crypto.Hash(0),
+}
+
+// DNSSEC hashing algorithm codes.
+const (
+ _ uint8 = iota
+ SHA1 // RFC 4034
+ SHA256 // RFC 4509
+ GOST94 // RFC 5933
+ SHA384 // Experimental
+ SHA512 // Experimental
+)
+
+// HashToString is a map of hash IDs to names.
+var HashToString = map[uint8]string{
+ SHA1: "SHA1",
+ SHA256: "SHA256",
+ GOST94: "GOST94",
+ SHA384: "SHA384",
+ SHA512: "SHA512",
+}
+
+// DNSKEY flag values.
+const (
+ SEP = 1
+ REVOKE = 1 << 7
+ ZONE = 1 << 8
+)
+
+// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing.
+type rrsigWireFmt struct {
+ TypeCovered uint16
+ Algorithm uint8
+ Labels uint8
+ OrigTtl uint32
+ Expiration uint32
+ Inception uint32
+ KeyTag uint16
+ SignerName string `dns:"domain-name"`
+ /* No Signature */
+}
+
+// Used for converting DNSKEY's rdata to wirefmt.
+type dnskeyWireFmt struct {
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+ /* Nothing is left out */
+}
+
+func divRoundUp(a, b int) int {
+ return (a + b - 1) / b
+}
+
+// KeyTag calculates the keytag (or key-id) of the DNSKEY.
+func (k *DNSKEY) KeyTag() uint16 {
+ if k == nil {
+ return 0
+ }
+ var keytag int
+ switch k.Algorithm {
+ case RSAMD5:
+ // Look at the bottom two bytes of the modules, which the last
+ // item in the pubkey.
+ // This algorithm has been deprecated, but keep this key-tag calculation.
+ modulus, _ := fromBase64([]byte(k.PublicKey))
+ if len(modulus) > 1 {
+ x := binary.BigEndian.Uint16(modulus[len(modulus)-2:])
+ keytag = int(x)
+ }
+ default:
+ keywire := new(dnskeyWireFmt)
+ keywire.Flags = k.Flags
+ keywire.Protocol = k.Protocol
+ keywire.Algorithm = k.Algorithm
+ keywire.PublicKey = k.PublicKey
+ wire := make([]byte, DefaultMsgSize)
+ n, err := packKeyWire(keywire, wire)
+ if err != nil {
+ return 0
+ }
+ wire = wire[:n]
+ for i, v := range wire {
+ if i&1 != 0 {
+ keytag += int(v) // must be larger than uint32
+ } else {
+ keytag += int(v) << 8
+ }
+ }
+ keytag += keytag >> 16 & 0xFFFF
+ keytag &= 0xFFFF
+ }
+ return uint16(keytag)
+}
+
+// ToDS converts a DNSKEY record to a DS record.
+func (k *DNSKEY) ToDS(h uint8) *DS {
+ if k == nil {
+ return nil
+ }
+ ds := new(DS)
+ ds.Hdr.Name = k.Hdr.Name
+ ds.Hdr.Class = k.Hdr.Class
+ ds.Hdr.Rrtype = TypeDS
+ ds.Hdr.Ttl = k.Hdr.Ttl
+ ds.Algorithm = k.Algorithm
+ ds.DigestType = h
+ ds.KeyTag = k.KeyTag()
+
+ keywire := new(dnskeyWireFmt)
+ keywire.Flags = k.Flags
+ keywire.Protocol = k.Protocol
+ keywire.Algorithm = k.Algorithm
+ keywire.PublicKey = k.PublicKey
+ wire := make([]byte, DefaultMsgSize)
+ n, err := packKeyWire(keywire, wire)
+ if err != nil {
+ return nil
+ }
+ wire = wire[:n]
+
+ owner := make([]byte, 255)
+ off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false)
+ if err1 != nil {
+ return nil
+ }
+ owner = owner[:off]
+ // RFC4034:
+ // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA);
+ // "|" denotes concatenation
+ // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key.
+
+ var hash crypto.Hash
+ switch h {
+ case SHA1:
+ hash = crypto.SHA1
+ case SHA256:
+ hash = crypto.SHA256
+ case SHA384:
+ hash = crypto.SHA384
+ case SHA512:
+ hash = crypto.SHA512
+ default:
+ return nil
+ }
+
+ s := hash.New()
+ s.Write(owner)
+ s.Write(wire)
+ ds.Digest = hex.EncodeToString(s.Sum(nil))
+ return ds
+}
+
+// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record.
+func (k *DNSKEY) ToCDNSKEY() *CDNSKEY {
+ c := &CDNSKEY{DNSKEY: *k}
+ c.Hdr = k.Hdr
+ c.Hdr.Rrtype = TypeCDNSKEY
+ return c
+}
+
+// ToCDS converts a DS record to a CDS record.
+func (d *DS) ToCDS() *CDS {
+ c := &CDS{DS: *d}
+ c.Hdr = d.Hdr
+ c.Hdr.Rrtype = TypeCDS
+ return c
+}
+
+// Sign signs an RRSet. The signature needs to be filled in with the values:
+// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied
+// from the RRset. Sign returns a non-nill error when the signing went OK.
+// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non
+// zero, it is used as-is, otherwise the TTL of the RRset is used as the
+// OrigTTL.
+func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
+ if k == nil {
+ return ErrPrivKey
+ }
+ // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return ErrKey
+ }
+
+ h0 := rrset[0].Header()
+ rr.Hdr.Rrtype = TypeRRSIG
+ rr.Hdr.Name = h0.Name
+ rr.Hdr.Class = h0.Class
+ if rr.OrigTtl == 0 { // If set don't override
+ rr.OrigTtl = h0.Ttl
+ }
+ rr.TypeCovered = h0.Rrtype
+ rr.Labels = uint8(CountLabel(h0.Name))
+
+ if strings.HasPrefix(h0.Name, "*") {
+ rr.Labels-- // wildcard, remove from label count
+ }
+
+ sigwire := new(rrsigWireFmt)
+ sigwire.TypeCovered = rr.TypeCovered
+ sigwire.Algorithm = rr.Algorithm
+ sigwire.Labels = rr.Labels
+ sigwire.OrigTtl = rr.OrigTtl
+ sigwire.Expiration = rr.Expiration
+ sigwire.Inception = rr.Inception
+ sigwire.KeyTag = rr.KeyTag
+ // For signing, lowercase this name
+ sigwire.SignerName = strings.ToLower(rr.SignerName)
+
+ // Create the desired binary blob
+ signdata := make([]byte, DefaultMsgSize)
+ n, err := packSigWire(sigwire, signdata)
+ if err != nil {
+ return err
+ }
+ signdata = signdata[:n]
+ wire, err := rawSignatureData(rrset, rr)
+ if err != nil {
+ return err
+ }
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return ErrAlg
+ }
+
+ switch rr.Algorithm {
+ case ED25519:
+ // ed25519 signs the raw message and performs hashing internally.
+ // All other supported signature schemes operate over the pre-hashed
+ // message, and thus ed25519 must be handled separately here.
+ //
+ // The raw message is passed directly into sign and crypto.Hash(0) is
+ // used to signal to the crypto.Signer that the data has not been hashed.
+ signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
+ if err != nil {
+ return err
+ }
+
+ rr.Signature = toBase64(signature)
+ case RSAMD5, DSA, DSANSEC3SHA1:
+ // See RFC 6944.
+ return ErrAlg
+ default:
+ h := hash.New()
+ h.Write(signdata)
+ h.Write(wire)
+
+ signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
+ if err != nil {
+ return err
+ }
+
+ rr.Signature = toBase64(signature)
+ }
+
+ return nil
+}
+
+func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) {
+ signature, err := k.Sign(rand.Reader, hashed, hash)
+ if err != nil {
+ return nil, err
+ }
+
+ switch alg {
+ case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512:
+ return signature, nil
+
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ ecdsaSignature := &struct {
+ R, S *big.Int
+ }{}
+ if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil {
+ return nil, err
+ }
+
+ var intlen int
+ switch alg {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+
+ signature := intToBytes(ecdsaSignature.R, intlen)
+ signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...)
+ return signature, nil
+
+ // There is no defined interface for what a DSA backed crypto.Signer returns
+ case DSA, DSANSEC3SHA1:
+ // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8)
+ // signature := []byte{byte(t)}
+ // signature = append(signature, intToBytes(r1, 20)...)
+ // signature = append(signature, intToBytes(s1, 20)...)
+ // rr.Signature = signature
+
+ case ED25519:
+ return signature, nil
+ }
+
+ return nil, ErrAlg
+}
+
+// Verify validates an RRSet with the signature and key. This is only the
+// cryptographic test, the signature validity period must be checked separately.
+// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work.
+func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
+ // First the easy checks
+ if !IsRRset(rrset) {
+ return ErrRRset
+ }
+ if rr.KeyTag != k.KeyTag() {
+ return ErrKey
+ }
+ if rr.Hdr.Class != k.Hdr.Class {
+ return ErrKey
+ }
+ if rr.Algorithm != k.Algorithm {
+ return ErrKey
+ }
+ if !strings.EqualFold(rr.SignerName, k.Hdr.Name) {
+ return ErrKey
+ }
+ if k.Protocol != 3 {
+ return ErrKey
+ }
+
+ // IsRRset checked that we have at least one RR and that the RRs in
+ // the set have consistent type, class, and name. Also check that type and
+ // class matches the RRSIG record.
+ if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
+ return ErrRRset
+ }
+
+ // RFC 4035 5.3.2. Reconstructing the Signed Data
+ // Copy the sig, except the rrsig data
+ sigwire := new(rrsigWireFmt)
+ sigwire.TypeCovered = rr.TypeCovered
+ sigwire.Algorithm = rr.Algorithm
+ sigwire.Labels = rr.Labels
+ sigwire.OrigTtl = rr.OrigTtl
+ sigwire.Expiration = rr.Expiration
+ sigwire.Inception = rr.Inception
+ sigwire.KeyTag = rr.KeyTag
+ sigwire.SignerName = strings.ToLower(rr.SignerName)
+ // Create the desired binary blob
+ signeddata := make([]byte, DefaultMsgSize)
+ n, err := packSigWire(sigwire, signeddata)
+ if err != nil {
+ return err
+ }
+ signeddata = signeddata[:n]
+ wire, err := rawSignatureData(rrset, rr)
+ if err != nil {
+ return err
+ }
+
+ sigbuf := rr.sigBuf() // Get the binary signature data
+ if rr.Algorithm == PRIVATEDNS { // PRIVATEOID
+ // TODO(miek)
+ // remove the domain name and assume its ours?
+ }
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return ErrAlg
+ }
+
+ switch rr.Algorithm {
+ case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5:
+ // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere??
+ pubkey := k.publicKeyRSA() // Get the key
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ h := hash.New()
+ h.Write(signeddata)
+ h.Write(wire)
+ return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf)
+
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ pubkey := k.publicKeyECDSA()
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ // Split sigbuf into the r and s coordinates
+ r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2])
+ s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:])
+
+ h := hash.New()
+ h.Write(signeddata)
+ h.Write(wire)
+ if ecdsa.Verify(pubkey, h.Sum(nil), r, s) {
+ return nil
+ }
+ return ErrSig
+
+ case ED25519:
+ pubkey := k.publicKeyED25519()
+ if pubkey == nil {
+ return ErrKey
+ }
+
+ if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
+ return nil
+ }
+ return ErrSig
+
+ default:
+ return ErrAlg
+ }
+}
+
+// ValidityPeriod uses RFC1982 serial arithmetic to calculate
+// if a signature period is valid. If t is the zero time, the
+// current time is taken other t is. Returns true if the signature
+// is valid at the given time, otherwise returns false.
+func (rr *RRSIG) ValidityPeriod(t time.Time) bool {
+ var utc int64
+ if t.IsZero() {
+ utc = time.Now().UTC().Unix()
+ } else {
+ utc = t.UTC().Unix()
+ }
+ modi := (int64(rr.Inception) - utc) / year68
+ mode := (int64(rr.Expiration) - utc) / year68
+ ti := int64(rr.Inception) + modi*year68
+ te := int64(rr.Expiration) + mode*year68
+ return ti <= utc && utc <= te
+}
+
+// Return the signatures base64 encodedig sigdata as a byte slice.
+func (rr *RRSIG) sigBuf() []byte {
+ sigbuf, err := fromBase64([]byte(rr.Signature))
+ if err != nil {
+ return nil
+ }
+ return sigbuf
+}
+
+// publicKeyRSA returns the RSA public key from a DNSKEY record.
+func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+
+ if len(keybuf) < 1+1+64 {
+ // Exponent must be at least 1 byte and modulus at least 64
+ return nil
+ }
+
+ // RFC 2537/3110, section 2. RSA Public KEY Resource Records
+ // Length is in the 0th byte, unless its zero, then it
+ // it in bytes 1 and 2 and its a 16 bit number
+ explen := uint16(keybuf[0])
+ keyoff := 1
+ if explen == 0 {
+ explen = uint16(keybuf[1])<<8 | uint16(keybuf[2])
+ keyoff = 3
+ }
+
+ if explen > 4 || explen == 0 || keybuf[keyoff] == 0 {
+ // Exponent larger than supported by the crypto package,
+ // empty, or contains prohibited leading zero.
+ return nil
+ }
+
+ modoff := keyoff + int(explen)
+ modlen := len(keybuf) - modoff
+ if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 {
+ // Modulus is too small, large, or contains prohibited leading zero.
+ return nil
+ }
+
+ pubkey := new(rsa.PublicKey)
+
+ var expo uint64
+ // The exponent of length explen is between keyoff and modoff.
+ for _, v := range keybuf[keyoff:modoff] {
+ expo <<= 8
+ expo |= uint64(v)
+ }
+ if expo > 1<<31-1 {
+ // Larger exponent than supported by the crypto package.
+ return nil
+ }
+
+ pubkey.E = int(expo)
+ pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
+ return pubkey
+}
+
+// publicKeyECDSA returns the Curve public key from the DNSKEY record.
+func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ pubkey := new(ecdsa.PublicKey)
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ pubkey.Curve = elliptic.P256()
+ if len(keybuf) != 64 {
+ // wrongly encoded key
+ return nil
+ }
+ case ECDSAP384SHA384:
+ pubkey.Curve = elliptic.P384()
+ if len(keybuf) != 96 {
+ // Wrongly encoded key
+ return nil
+ }
+ }
+ pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
+ pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
+ return pubkey
+}
+
+func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ if len(keybuf) < 22 {
+ return nil
+ }
+ t, keybuf := int(keybuf[0]), keybuf[1:]
+ size := 64 + t*8
+ q, keybuf := keybuf[:20], keybuf[20:]
+ if len(keybuf) != 3*size {
+ return nil
+ }
+ p, keybuf := keybuf[:size], keybuf[size:]
+ g, y := keybuf[:size], keybuf[size:]
+ pubkey := new(dsa.PublicKey)
+ pubkey.Parameters.Q = new(big.Int).SetBytes(q)
+ pubkey.Parameters.P = new(big.Int).SetBytes(p)
+ pubkey.Parameters.G = new(big.Int).SetBytes(g)
+ pubkey.Y = new(big.Int).SetBytes(y)
+ return pubkey
+}
+
+func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
+ keybuf, err := fromBase64([]byte(k.PublicKey))
+ if err != nil {
+ return nil
+ }
+ if len(keybuf) != ed25519.PublicKeySize {
+ return nil
+ }
+ return keybuf
+}
+
+type wireSlice [][]byte
+
+func (p wireSlice) Len() int { return len(p) }
+func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p wireSlice) Less(i, j int) bool {
+ _, ioff, _ := UnpackDomainName(p[i], 0)
+ _, joff, _ := UnpackDomainName(p[j], 0)
+ return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0
+}
+
+// Return the raw signature data.
+func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
+ wires := make(wireSlice, len(rrset))
+ for i, r := range rrset {
+ r1 := r.copy()
+ h := r1.Header()
+ h.Ttl = s.OrigTtl
+ labels := SplitDomainName(h.Name)
+ // 6.2. Canonical RR Form. (4) - wildcards
+ if len(labels) > int(s.Labels) {
+ // Wildcard
+ h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
+ }
+ // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
+ h.Name = strings.ToLower(h.Name)
+ // 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
+ // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
+ // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
+ // SRV, DNAME, A6
+ //
+ // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC):
+ // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record
+ // that needs conversion to lowercase, and twice at that. Since HINFO
+ // records contain no domain names, they are not subject to case
+ // conversion.
+ switch x := r1.(type) {
+ case *NS:
+ x.Ns = strings.ToLower(x.Ns)
+ case *MD:
+ x.Md = strings.ToLower(x.Md)
+ case *MF:
+ x.Mf = strings.ToLower(x.Mf)
+ case *CNAME:
+ x.Target = strings.ToLower(x.Target)
+ case *SOA:
+ x.Ns = strings.ToLower(x.Ns)
+ x.Mbox = strings.ToLower(x.Mbox)
+ case *MB:
+ x.Mb = strings.ToLower(x.Mb)
+ case *MG:
+ x.Mg = strings.ToLower(x.Mg)
+ case *MR:
+ x.Mr = strings.ToLower(x.Mr)
+ case *PTR:
+ x.Ptr = strings.ToLower(x.Ptr)
+ case *MINFO:
+ x.Rmail = strings.ToLower(x.Rmail)
+ x.Email = strings.ToLower(x.Email)
+ case *MX:
+ x.Mx = strings.ToLower(x.Mx)
+ case *RP:
+ x.Mbox = strings.ToLower(x.Mbox)
+ x.Txt = strings.ToLower(x.Txt)
+ case *AFSDB:
+ x.Hostname = strings.ToLower(x.Hostname)
+ case *RT:
+ x.Host = strings.ToLower(x.Host)
+ case *SIG:
+ x.SignerName = strings.ToLower(x.SignerName)
+ case *PX:
+ x.Map822 = strings.ToLower(x.Map822)
+ x.Mapx400 = strings.ToLower(x.Mapx400)
+ case *NAPTR:
+ x.Replacement = strings.ToLower(x.Replacement)
+ case *KX:
+ x.Exchanger = strings.ToLower(x.Exchanger)
+ case *SRV:
+ x.Target = strings.ToLower(x.Target)
+ case *DNAME:
+ x.Target = strings.ToLower(x.Target)
+ }
+ // 6.2. Canonical RR Form. (5) - origTTL
+ wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
+ off, err1 := PackRR(r1, wire, 0, nil, false)
+ if err1 != nil {
+ return nil, err1
+ }
+ wire = wire[:off]
+ wires[i] = wire
+ }
+ sort.Sort(wires)
+ for i, wire := range wires {
+ if i > 0 && bytes.Equal(wire, wires[i-1]) {
+ continue
+ }
+ buf = append(buf, wire...)
+ }
+ return buf, nil
+}
+
+func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) {
+ // copied from zmsg.go RRSIG packing
+ off, err := packUint16(sw.TypeCovered, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(sw.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(sw.Labels, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(sw.OrigTtl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(sw.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(sw.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(sw.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = PackDomainName(sw.SignerName, msg, off, nil, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) {
+ // copied from zmsg.go DNSKEY packing
+ off, err := packUint16(dw.Flags, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(dw.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(dw.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(dw.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go
new file mode 100644
index 0000000..60737e5
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_keygen.go
@@ -0,0 +1,140 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/rsa"
+ "math/big"
+
+ "golang.org/x/crypto/ed25519"
+)
+
+// Generate generates a DNSKEY of the given bit size.
+// The public part is put inside the DNSKEY record.
+// The Algorithm in the key must be set as this will define
+// what kind of DNSKEY will be generated.
+// The ECDSA algorithms imply a fixed keysize, in that case
+// bits should be set to the size of the algorithm.
+func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
+ switch k.Algorithm {
+ case RSAMD5, DSA, DSANSEC3SHA1:
+ return nil, ErrAlg
+ case RSASHA1, RSASHA256, RSASHA1NSEC3SHA1:
+ if bits < 512 || bits > 4096 {
+ return nil, ErrKeySize
+ }
+ case RSASHA512:
+ if bits < 1024 || bits > 4096 {
+ return nil, ErrKeySize
+ }
+ case ECDSAP256SHA256:
+ if bits != 256 {
+ return nil, ErrKeySize
+ }
+ case ECDSAP384SHA384:
+ if bits != 384 {
+ return nil, ErrKeySize
+ }
+ case ED25519:
+ if bits != 256 {
+ return nil, ErrKeySize
+ }
+ }
+
+ switch k.Algorithm {
+ case RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1:
+ priv, err := rsa.GenerateKey(rand.Reader, bits)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N)
+ return priv, nil
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ var c elliptic.Curve
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ c = elliptic.P256()
+ case ECDSAP384SHA384:
+ c = elliptic.P384()
+ }
+ priv, err := ecdsa.GenerateKey(c, rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
+ return priv, nil
+ case ED25519:
+ pub, priv, err := ed25519.GenerateKey(rand.Reader)
+ if err != nil {
+ return nil, err
+ }
+ k.setPublicKeyED25519(pub)
+ return priv, nil
+ default:
+ return nil, ErrAlg
+ }
+}
+
+// Set the public key (the value E and N)
+func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool {
+ if _E == 0 || _N == nil {
+ return false
+ }
+ buf := exponentToBuf(_E)
+ buf = append(buf, _N.Bytes()...)
+ k.PublicKey = toBase64(buf)
+ return true
+}
+
+// Set the public key for Elliptic Curves
+func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool {
+ if _X == nil || _Y == nil {
+ return false
+ }
+ var intlen int
+ switch k.Algorithm {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+ k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen))
+ return true
+}
+
+// Set the public key for Ed25519
+func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
+ if _K == nil {
+ return false
+ }
+ k.PublicKey = toBase64(_K)
+ return true
+}
+
+// Set the public key (the values E and N) for RSA
+// RFC 3110: Section 2. RSA Public KEY Resource Records
+func exponentToBuf(_E int) []byte {
+ var buf []byte
+ i := big.NewInt(int64(_E)).Bytes()
+ if len(i) < 256 {
+ buf = make([]byte, 1, 1+len(i))
+ buf[0] = uint8(len(i))
+ } else {
+ buf = make([]byte, 3, 3+len(i))
+ buf[0] = 0
+ buf[1] = uint8(len(i) >> 8)
+ buf[2] = uint8(len(i))
+ }
+ buf = append(buf, i...)
+ return buf
+}
+
+// Set the public key for X and Y for Curve. The two
+// values are just concatenated.
+func curveToBuf(_X, _Y *big.Int, intlen int) []byte {
+ buf := intToBytes(_X, intlen)
+ buf = append(buf, intToBytes(_Y, intlen)...)
+ return buf
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go
new file mode 100644
index 0000000..0e6f320
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go
@@ -0,0 +1,322 @@
+package dns
+
+import (
+ "bufio"
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+
+ "golang.org/x/crypto/ed25519"
+)
+
+// NewPrivateKey returns a PrivateKey by parsing the string s.
+// s should be in the same form of the BIND private key files.
+func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) {
+ if s == "" || s[len(s)-1] != '\n' { // We need a closing newline
+ return k.ReadPrivateKey(strings.NewReader(s+"\n"), "")
+ }
+ return k.ReadPrivateKey(strings.NewReader(s), "")
+}
+
+// ReadPrivateKey reads a private key from the io.Reader q. The string file is
+// only used in error reporting.
+// The public key must be known, because some cryptographic algorithms embed
+// the public inside the privatekey.
+func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) {
+ m, err := parseKey(q, file)
+ if m == nil {
+ return nil, err
+ }
+ if _, ok := m["private-key-format"]; !ok {
+ return nil, ErrPrivKey
+ }
+ if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" {
+ return nil, ErrPrivKey
+ }
+ // TODO(mg): check if the pubkey matches the private key
+ algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
+ if err != nil {
+ return nil, ErrPrivKey
+ }
+ switch uint8(algo) {
+ case RSAMD5, DSA, DSANSEC3SHA1:
+ return nil, ErrAlg
+ case RSASHA1:
+ fallthrough
+ case RSASHA1NSEC3SHA1:
+ fallthrough
+ case RSASHA256:
+ fallthrough
+ case RSASHA512:
+ priv, err := readPrivateKeyRSA(m)
+ if err != nil {
+ return nil, err
+ }
+ pub := k.publicKeyRSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, nil
+ case ECCGOST:
+ return nil, ErrPrivKey
+ case ECDSAP256SHA256:
+ fallthrough
+ case ECDSAP384SHA384:
+ priv, err := readPrivateKeyECDSA(m)
+ if err != nil {
+ return nil, err
+ }
+ pub := k.publicKeyECDSA()
+ if pub == nil {
+ return nil, ErrKey
+ }
+ priv.PublicKey = *pub
+ return priv, nil
+ case ED25519:
+ return readPrivateKeyED25519(m)
+ default:
+ return nil, ErrPrivKey
+ }
+}
+
+// Read a private key (file) string and create a public key. Return the private key.
+func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
+ p := new(rsa.PrivateKey)
+ p.Primes = []*big.Int{nil, nil}
+ for k, v := range m {
+ switch k {
+ case "modulus", "publicexponent", "privateexponent", "prime1", "prime2":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ switch k {
+ case "modulus":
+ p.PublicKey.N = new(big.Int).SetBytes(v1)
+ case "publicexponent":
+ i := new(big.Int).SetBytes(v1)
+ p.PublicKey.E = int(i.Int64()) // int64 should be large enough
+ case "privateexponent":
+ p.D = new(big.Int).SetBytes(v1)
+ case "prime1":
+ p.Primes[0] = new(big.Int).SetBytes(v1)
+ case "prime2":
+ p.Primes[1] = new(big.Int).SetBytes(v1)
+ }
+ case "exponent1", "exponent2", "coefficient":
+ // not used in Go (yet)
+ case "created", "publish", "activate":
+ // not used in Go (yet)
+ }
+ }
+ return p, nil
+}
+
+func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
+ p := new(ecdsa.PrivateKey)
+ p.D = new(big.Int)
+ // TODO: validate that the required flags are present
+ for k, v := range m {
+ switch k {
+ case "privatekey":
+ v1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ p.D.SetBytes(v1)
+ case "created", "publish", "activate":
+ /* not used in Go (yet) */
+ }
+ }
+ return p, nil
+}
+
+func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
+ var p ed25519.PrivateKey
+ // TODO: validate that the required flags are present
+ for k, v := range m {
+ switch k {
+ case "privatekey":
+ p1, err := fromBase64([]byte(v))
+ if err != nil {
+ return nil, err
+ }
+ if len(p1) != ed25519.SeedSize {
+ return nil, ErrPrivKey
+ }
+ p = ed25519.NewKeyFromSeed(p1)
+ case "created", "publish", "activate":
+ /* not used in Go (yet) */
+ }
+ }
+ return p, nil
+}
+
+// parseKey reads a private key from r. It returns a map[string]string,
+// with the key-value pairs, or an error when the file is not correct.
+func parseKey(r io.Reader, file string) (map[string]string, error) {
+ m := make(map[string]string)
+ var k string
+
+ c := newKLexer(r)
+
+ for l, ok := c.Next(); ok; l, ok = c.Next() {
+ // It should alternate
+ switch l.value {
+ case zKey:
+ k = l.token
+ case zValue:
+ if k == "" {
+ return nil, &ParseError{file, "no private key seen", l}
+ }
+
+ m[strings.ToLower(k)] = l.token
+ k = ""
+ }
+ }
+
+ // Surface any read errors from r.
+ if err := c.Err(); err != nil {
+ return nil, &ParseError{file: file, err: err.Error()}
+ }
+
+ return m, nil
+}
+
+type klexer struct {
+ br io.ByteReader
+
+ readErr error
+
+ line int
+ column int
+
+ key bool
+
+ eol bool // end-of-line
+}
+
+func newKLexer(r io.Reader) *klexer {
+ br, ok := r.(io.ByteReader)
+ if !ok {
+ br = bufio.NewReaderSize(r, 1024)
+ }
+
+ return &klexer{
+ br: br,
+
+ line: 1,
+
+ key: true,
+ }
+}
+
+func (kl *klexer) Err() error {
+ if kl.readErr == io.EOF {
+ return nil
+ }
+
+ return kl.readErr
+}
+
+// readByte returns the next byte from the input
+func (kl *klexer) readByte() (byte, bool) {
+ if kl.readErr != nil {
+ return 0, false
+ }
+
+ c, err := kl.br.ReadByte()
+ if err != nil {
+ kl.readErr = err
+ return 0, false
+ }
+
+ // delay the newline handling until the next token is delivered,
+ // fixes off-by-one errors when reporting a parse error.
+ if kl.eol {
+ kl.line++
+ kl.column = 0
+ kl.eol = false
+ }
+
+ if c == '\n' {
+ kl.eol = true
+ } else {
+ kl.column++
+ }
+
+ return c, true
+}
+
+func (kl *klexer) Next() (lex, bool) {
+ var (
+ l lex
+
+ str strings.Builder
+
+ commt bool
+ )
+
+ for x, ok := kl.readByte(); ok; x, ok = kl.readByte() {
+ l.line, l.column = kl.line, kl.column
+
+ switch x {
+ case ':':
+ if commt || !kl.key {
+ break
+ }
+
+ kl.key = false
+
+ // Next token is a space, eat it
+ kl.readByte()
+
+ l.value = zKey
+ l.token = str.String()
+ return l, true
+ case ';':
+ commt = true
+ case '\n':
+ if commt {
+ // Reset a comment
+ commt = false
+ }
+
+ if kl.key && str.Len() == 0 {
+ // ignore empty lines
+ break
+ }
+
+ kl.key = true
+
+ l.value = zValue
+ l.token = str.String()
+ return l, true
+ default:
+ if commt {
+ break
+ }
+
+ str.WriteByte(x)
+ }
+ }
+
+ if kl.readErr != nil && kl.readErr != io.EOF {
+ // Don't return any tokens after a read error occurs.
+ return lex{value: zEOF}, false
+ }
+
+ if str.Len() > 0 {
+ // Send remainder
+ l.value = zValue
+ l.token = str.String()
+ return l, true
+ }
+
+ return lex{value: zEOF}, false
+}
diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go
new file mode 100644
index 0000000..4493c9d
--- /dev/null
+++ b/vendor/github.com/miekg/dns/dnssec_privkey.go
@@ -0,0 +1,94 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "math/big"
+ "strconv"
+
+ "golang.org/x/crypto/ed25519"
+)
+
+const format = "Private-key-format: v1.3\n"
+
+var bigIntOne = big.NewInt(1)
+
+// PrivateKeyString converts a PrivateKey to a string. This string has the same
+// format as the private-key-file of BIND9 (Private-key-format: v1.3).
+// It needs some info from the key (the algorithm), so its a method of the DNSKEY
+// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey
+func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
+ algorithm := strconv.Itoa(int(r.Algorithm))
+ algorithm += " (" + AlgorithmToString[r.Algorithm] + ")"
+
+ switch p := p.(type) {
+ case *rsa.PrivateKey:
+ modulus := toBase64(p.PublicKey.N.Bytes())
+ e := big.NewInt(int64(p.PublicKey.E))
+ publicExponent := toBase64(e.Bytes())
+ privateExponent := toBase64(p.D.Bytes())
+ prime1 := toBase64(p.Primes[0].Bytes())
+ prime2 := toBase64(p.Primes[1].Bytes())
+ // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
+ // and from: http://code.google.com/p/go/issues/detail?id=987
+ p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
+ q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
+ exp1 := new(big.Int).Mod(p.D, p1)
+ exp2 := new(big.Int).Mod(p.D, q1)
+ coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
+
+ exponent1 := toBase64(exp1.Bytes())
+ exponent2 := toBase64(exp2.Bytes())
+ coefficient := toBase64(coeff.Bytes())
+
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "Modulus: " + modulus + "\n" +
+ "PublicExponent: " + publicExponent + "\n" +
+ "PrivateExponent: " + privateExponent + "\n" +
+ "Prime1: " + prime1 + "\n" +
+ "Prime2: " + prime2 + "\n" +
+ "Exponent1: " + exponent1 + "\n" +
+ "Exponent2: " + exponent2 + "\n" +
+ "Coefficient: " + coefficient + "\n"
+
+ case *ecdsa.PrivateKey:
+ var intlen int
+ switch r.Algorithm {
+ case ECDSAP256SHA256:
+ intlen = 32
+ case ECDSAP384SHA384:
+ intlen = 48
+ }
+ private := toBase64(intToBytes(p.D, intlen))
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "PrivateKey: " + private + "\n"
+
+ case *dsa.PrivateKey:
+ T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8)
+ prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8))
+ subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20))
+ base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8))
+ priv := toBase64(intToBytes(p.X, 20))
+ pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8))
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "Prime(p): " + prime + "\n" +
+ "Subprime(q): " + subprime + "\n" +
+ "Base(g): " + base + "\n" +
+ "Private_value(x): " + priv + "\n" +
+ "Public_value(y): " + pub + "\n"
+
+ case ed25519.PrivateKey:
+ private := toBase64(p.Seed())
+ return format +
+ "Algorithm: " + algorithm + "\n" +
+ "PrivateKey: " + private + "\n"
+
+ default:
+ return ""
+ }
+}
diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go
new file mode 100644
index 0000000..3318b77
--- /dev/null
+++ b/vendor/github.com/miekg/dns/doc.go
@@ -0,0 +1,268 @@
+/*
+Package dns implements a full featured interface to the Domain Name System.
+Both server- and client-side programming is supported. The package allows
+complete control over what is sent out to the DNS. The API follows the
+less-is-more principle, by presenting a small, clean interface.
+
+It supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
+TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing.
+
+Note that domain names MUST be fully qualified before sending them, unqualified
+names in a message will result in a packing failure.
+
+Resource records are native types. They are not stored in wire format. Basic
+usage pattern for creating a new resource record:
+
+ r := new(dns.MX)
+ r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}
+ r.Preference = 10
+ r.Mx = "mx.miek.nl."
+
+Or directly from a string:
+
+ mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
+
+Or when the default origin (.) and TTL (3600) and class (IN) suit you:
+
+ mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
+
+Or even:
+
+ mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
+
+In the DNS messages are exchanged, these messages contain resource records
+(sets). Use pattern for creating a message:
+
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+
+Or when not certain if the domain name is fully qualified:
+
+ m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX)
+
+The message m is now a message with the question section set to ask the MX
+records for the miek.nl. zone.
+
+The following is slightly more verbose, but more flexible:
+
+ m1 := new(dns.Msg)
+ m1.Id = dns.Id()
+ m1.RecursionDesired = true
+ m1.Question = make([]dns.Question, 1)
+ m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
+
+After creating a message it can be sent. Basic use pattern for synchronous
+querying the DNS at a server configured on 127.0.0.1 and port 53:
+
+ c := new(dns.Client)
+ in, rtt, err := c.Exchange(m1, "127.0.0.1:53")
+
+Suppressing multiple outstanding queries (with the same question, type and
+class) is as easy as setting:
+
+ c.SingleInflight = true
+
+More advanced options are available using a net.Dialer and the corresponding API.
+For example it is possible to set a timeout, or to specify a source IP address
+and port to use for the connection:
+
+ c := new(dns.Client)
+ laddr := net.UDPAddr{
+ IP: net.ParseIP("[::1]"),
+ Port: 12345,
+ Zone: "",
+ }
+ c.Dialer := &net.Dialer{
+ Timeout: 200 * time.Millisecond,
+ LocalAddr: &laddr,
+ }
+ in, rtt, err := c.Exchange(m1, "8.8.8.8:53")
+
+If these "advanced" features are not needed, a simple UDP query can be sent,
+with:
+
+ in, err := dns.Exchange(m1, "127.0.0.1:53")
+
+When this functions returns you will get DNS message. A DNS message consists
+out of four sections.
+The question section: in.Question, the answer section: in.Answer,
+the authority section: in.Ns and the additional section: in.Extra.
+
+Each of these sections (except the Question section) contain a []RR. Basic
+use pattern for accessing the rdata of a TXT RR as the first RR in
+the Answer section:
+
+ if t, ok := in.Answer[0].(*dns.TXT); ok {
+ // do something with t.Txt
+ }
+
+Domain Name and TXT Character String Representations
+
+Both domain names and TXT character strings are converted to presentation form
+both when unpacked and when converted to strings.
+
+For TXT character strings, tabs, carriage returns and line feeds will be
+converted to \t, \r and \n respectively. Back slashes and quotations marks will
+be escaped. Bytes below 32 and above 127 will be converted to \DDD form.
+
+For domain names, in addition to the above rules brackets, periods, spaces,
+semicolons and the at symbol are escaped.
+
+DNSSEC
+
+DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses
+public key cryptography to sign resource records. The public keys are stored in
+DNSKEY records and the signatures in RRSIG records.
+
+Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK)
+bit to a request.
+
+ m := new(dns.Msg)
+ m.SetEdns0(4096, true)
+
+Signature generation, signature verification and key generation are all supported.
+
+DYNAMIC UPDATES
+
+Dynamic updates reuses the DNS message format, but renames three of the
+sections. Question is Zone, Answer is Prerequisite, Authority is Update, only
+the Additional is not renamed. See RFC 2136 for the gory details.
+
+You can set a rather complex set of rules for the existence of absence of
+certain resource records or names in a zone to specify if resource records
+should be added or removed. The table from RFC 2136 supplemented with the Go
+DNS function shows which functions exist to specify the prerequisites.
+
+ 3.2.4 - Table Of Metavalues Used In Prerequisite Section
+
+ CLASS TYPE RDATA Meaning Function
+ --------------------------------------------------------------
+ ANY ANY empty Name is in use dns.NameUsed
+ ANY rrset empty RRset exists (value indep) dns.RRsetUsed
+ NONE ANY empty Name is not in use dns.NameNotUsed
+ NONE rrset empty RRset does not exist dns.RRsetNotUsed
+ zone rrset rr RRset exists (value dep) dns.Used
+
+The prerequisite section can also be left empty. If you have decided on the
+prerequisites you can tell what RRs should be added or deleted. The next table
+shows the options you have and what functions to call.
+
+ 3.4.2.6 - Table Of Metavalues Used In Update Section
+
+ CLASS TYPE RDATA Meaning Function
+ ---------------------------------------------------------------
+ ANY ANY empty Delete all RRsets from name dns.RemoveName
+ ANY rrset empty Delete an RRset dns.RemoveRRset
+ NONE rrset rr Delete an RR from RRset dns.Remove
+ zone rrset rr Add to an RRset dns.Insert
+
+TRANSACTION SIGNATURE
+
+An TSIG or transaction signature adds a HMAC TSIG record to each message sent.
+The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512.
+
+Basic use pattern when querying with a TSIG name "axfr." (note that these key names
+must be fully qualified - as they are domain names) and the base64 secret
+"so6ZGir4GPAqINNh9U5c3A==":
+
+If an incoming message contains a TSIG record it MUST be the last record in
+the additional section (RFC2845 3.2). This means that you should make the
+call to SetTsig last, right before executing the query. If you make any
+changes to the RRset after calling SetTsig() the signature will be incorrect.
+
+ c := new(dns.Client)
+ c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ m := new(dns.Msg)
+ m.SetQuestion("miek.nl.", dns.TypeMX)
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ ...
+ // When sending the TSIG RR is calculated and filled in before sending
+
+When requesting an zone transfer (almost all TSIG usage is when requesting zone
+transfers), with TSIG, this is the basic use pattern. In this example we
+request an AXFR for miek.nl. with TSIG key named "axfr." and secret
+"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54:
+
+ t := new(dns.Transfer)
+ m := new(dns.Msg)
+ t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ m.SetAxfr("miek.nl.")
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ c, err := t.In(m, "176.58.119.54:53")
+ for r := range c { ... }
+
+You can now read the records from the transfer as they come in. Each envelope
+is checked with TSIG. If something is not correct an error is returned.
+
+Basic use pattern validating and replying to a message that has TSIG set.
+
+ server := &dns.Server{Addr: ":53", Net: "udp"}
+ server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
+ go server.ListenAndServe()
+ dns.HandleFunc(".", handleRequest)
+
+ func handleRequest(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ if r.IsTsig() != nil {
+ if w.TsigStatus() == nil {
+ // *Msg r has an TSIG record and it was validated
+ m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix())
+ } else {
+ // *Msg r has an TSIG records and it was not valided
+ }
+ }
+ w.WriteMsg(m)
+ }
+
+PRIVATE RRS
+
+RFC 6895 sets aside a range of type codes for private use. This range is 65,280
+- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these
+can be used, before requesting an official type code from IANA.
+
+See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more
+information.
+
+EDNS0
+
+EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by
+RFC 6891. It defines an new RR type, the OPT RR, which is then completely
+abused.
+
+Basic use pattern for creating an (empty) OPT RR:
+
+ o := new(dns.OPT)
+ o.Hdr.Name = "." // MUST be the root zone, per definition.
+ o.Hdr.Rrtype = dns.TypeOPT
+
+The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces.
+Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and
+EDNS0_SUBNET (RFC 7871). Note that these options may be combined in an OPT RR.
+Basic use pattern for a server to check if (and which) options are set:
+
+ // o is a dns.OPT
+ for _, s := range o.Option {
+ switch e := s.(type) {
+ case *dns.EDNS0_NSID:
+ // do stuff with e.Nsid
+ case *dns.EDNS0_SUBNET:
+ // access e.Family, e.Address, etc.
+ }
+ }
+
+SIG(0)
+
+From RFC 2931:
+
+ SIG(0) provides protection for DNS transactions and requests ....
+ ... protection for glue records, DNS requests, protection for message headers
+ on requests and responses, and protection of the overall integrity of a response.
+
+It works like TSIG, except that SIG(0) uses public key cryptography, instead of
+the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256,
+ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512.
+
+Signing subsequent messages in multi-message sessions is not implemented.
+*/
+package dns
diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go
new file mode 100644
index 0000000..49e6940
--- /dev/null
+++ b/vendor/github.com/miekg/dns/duplicate.go
@@ -0,0 +1,38 @@
+package dns
+
+//go:generate go run duplicate_generate.go
+
+// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL.
+// So this means the header data is equal *and* the RDATA is the same. Return true
+// is so, otherwise false.
+// It's a protocol violation to have identical RRs in a message.
+func IsDuplicate(r1, r2 RR) bool {
+ // Check whether the record header is identical.
+ if !r1.Header().isDuplicate(r2.Header()) {
+ return false
+ }
+
+ // Check whether the RDATA is identical.
+ return r1.isDuplicate(r2)
+}
+
+func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RR_Header)
+ if !ok {
+ return false
+ }
+ if r1.Class != r2.Class {
+ return false
+ }
+ if r1.Rrtype != r2.Rrtype {
+ return false
+ }
+ if !isDuplicateName(r1.Name, r2.Name) {
+ return false
+ }
+ // ignore TTL
+ return true
+}
+
+// isDuplicateName checks if the domain names s1 and s2 are equal.
+func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }
diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go
new file mode 100644
index 0000000..04808d5
--- /dev/null
+++ b/vendor/github.com/miekg/dns/edns.go
@@ -0,0 +1,675 @@
+package dns
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// EDNS0 Option codes.
+const (
+ EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
+ EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
+ EDNS0NSID = 0x3 // nsid (See RFC 5001)
+ EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
+ EDNS0DHU = 0x6 // DS Hash Understood
+ EDNS0N3U = 0x7 // NSEC3 Hash Understood
+ EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
+ EDNS0EXPIRE = 0x9 // EDNS0 expire
+ EDNS0COOKIE = 0xa // EDNS0 Cookie
+ EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
+ EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
+ EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
+ EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
+ _DO = 1 << 15 // DNSSEC OK
+)
+
+// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
+// See RFC 6891.
+type OPT struct {
+ Hdr RR_Header
+ Option []EDNS0 `dns:"opt"`
+}
+
+func (rr *OPT) String() string {
+ s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; "
+ if rr.Do() {
+ s += "flags: do; "
+ } else {
+ s += "flags: ; "
+ }
+ s += "udp: " + strconv.Itoa(int(rr.UDPSize()))
+
+ for _, o := range rr.Option {
+ switch o.(type) {
+ case *EDNS0_NSID:
+ s += "\n; NSID: " + o.String()
+ h, e := o.pack()
+ var r string
+ if e == nil {
+ for _, c := range h {
+ r += "(" + string(c) + ")"
+ }
+ s += " " + r
+ }
+ case *EDNS0_SUBNET:
+ s += "\n; SUBNET: " + o.String()
+ case *EDNS0_COOKIE:
+ s += "\n; COOKIE: " + o.String()
+ case *EDNS0_UL:
+ s += "\n; UPDATE LEASE: " + o.String()
+ case *EDNS0_LLQ:
+ s += "\n; LONG LIVED QUERIES: " + o.String()
+ case *EDNS0_DAU:
+ s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String()
+ case *EDNS0_DHU:
+ s += "\n; DS HASH UNDERSTOOD: " + o.String()
+ case *EDNS0_N3U:
+ s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
+ case *EDNS0_LOCAL:
+ s += "\n; LOCAL OPT: " + o.String()
+ case *EDNS0_PADDING:
+ s += "\n; PADDING: " + o.String()
+ }
+ }
+ return s
+}
+
+func (rr *OPT) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, o := range rr.Option {
+ l += 4 // Account for 2-byte option code and 2-byte option length.
+ lo, _ := o.pack()
+ l += len(lo)
+ }
+ return l
+}
+
+func (rr *OPT) parse(c *zlexer, origin string) *ParseError {
+ panic("dns: internal error: parse should never be called on OPT")
+}
+
+func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
+
+// return the old value -> delete SetVersion?
+
+// Version returns the EDNS version used. Only zero is defined.
+func (rr *OPT) Version() uint8 {
+ return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16)
+}
+
+// SetVersion sets the version of EDNS. This is usually zero.
+func (rr *OPT) SetVersion(v uint8) {
+ rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16
+}
+
+// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
+func (rr *OPT) ExtendedRcode() int {
+ return int(rr.Hdr.Ttl&0xFF000000>>24) << 4
+}
+
+// SetExtendedRcode sets the EDNS extended RCODE field.
+//
+// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0.
+func (rr *OPT) SetExtendedRcode(v uint16) {
+ rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24
+}
+
+// UDPSize returns the UDP buffer size.
+func (rr *OPT) UDPSize() uint16 {
+ return rr.Hdr.Class
+}
+
+// SetUDPSize sets the UDP buffer size.
+func (rr *OPT) SetUDPSize(size uint16) {
+ rr.Hdr.Class = size
+}
+
+// Do returns the value of the DO (DNSSEC OK) bit.
+func (rr *OPT) Do() bool {
+ return rr.Hdr.Ttl&_DO == _DO
+}
+
+// SetDo sets the DO (DNSSEC OK) bit.
+// If we pass an argument, set the DO bit to that value.
+// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored.
+func (rr *OPT) SetDo(do ...bool) {
+ if len(do) == 1 {
+ if do[0] {
+ rr.Hdr.Ttl |= _DO
+ } else {
+ rr.Hdr.Ttl &^= _DO
+ }
+ } else {
+ rr.Hdr.Ttl |= _DO
+ }
+}
+
+// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it.
+type EDNS0 interface {
+ // Option returns the option code for the option.
+ Option() uint16
+ // pack returns the bytes of the option data.
+ pack() ([]byte, error)
+ // unpack sets the data as found in the buffer. Is also sets
+ // the length of the slice as the length of the option data.
+ unpack([]byte) error
+ // String returns the string representation of the option.
+ String() string
+ // copy returns a deep-copy of the option.
+ copy() EDNS0
+}
+
+// EDNS0_NSID option is used to retrieve a nameserver
+// identifier. When sending a request Nsid must be set to the empty string
+// The identifier is an opaque string encoded as hex.
+// Basic use pattern for creating an nsid option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_NSID)
+// e.Code = dns.EDNS0NSID
+// e.Nsid = "AA"
+// o.Option = append(o.Option, e)
+type EDNS0_NSID struct {
+ Code uint16 // Always EDNS0NSID
+ Nsid string // This string needs to be hex encoded
+}
+
+func (e *EDNS0_NSID) pack() ([]byte, error) {
+ h, err := hex.DecodeString(e.Nsid)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
+func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
+func (e *EDNS0_NSID) String() string { return e.Nsid }
+func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} }
+
+// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
+// an idea of where the client lives. See RFC 7871. It can then give back a different
+// answer depending on the location or network topology.
+// Basic use pattern for creating an subnet option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_SUBNET)
+// e.Code = dns.EDNS0SUBNET
+// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
+// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
+// e.SourceScope = 0
+// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
+// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
+// o.Option = append(o.Option, e)
+//
+// This code will parse all the available bits when unpacking (up to optlen).
+// When packing it will apply SourceNetmask. If you need more advanced logic,
+// patches welcome and good luck.
+type EDNS0_SUBNET struct {
+ Code uint16 // Always EDNS0SUBNET
+ Family uint16 // 1 for IP, 2 for IP6
+ SourceNetmask uint8
+ SourceScope uint8
+ Address net.IP
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
+
+func (e *EDNS0_SUBNET) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ binary.BigEndian.PutUint16(b[0:], e.Family)
+ b[2] = e.SourceNetmask
+ b[3] = e.SourceScope
+ switch e.Family {
+ case 0:
+ // "dig" sets AddressFamily to 0 if SourceNetmask is also 0
+ // We might don't need to complain either
+ if e.SourceNetmask != 0 {
+ return nil, errors.New("dns: bad address family")
+ }
+ case 1:
+ if e.SourceNetmask > net.IPv4len*8 {
+ return nil, errors.New("dns: bad netmask")
+ }
+ if len(e.Address.To4()) != net.IPv4len {
+ return nil, errors.New("dns: bad address")
+ }
+ ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8))
+ needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
+ b = append(b, ip[:needLength]...)
+ case 2:
+ if e.SourceNetmask > net.IPv6len*8 {
+ return nil, errors.New("dns: bad netmask")
+ }
+ if len(e.Address) != net.IPv6len {
+ return nil, errors.New("dns: bad address")
+ }
+ ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8))
+ needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up
+ b = append(b, ip[:needLength]...)
+ default:
+ return nil, errors.New("dns: bad address family")
+ }
+ return b, nil
+}
+
+func (e *EDNS0_SUBNET) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Family = binary.BigEndian.Uint16(b)
+ e.SourceNetmask = b[2]
+ e.SourceScope = b[3]
+ switch e.Family {
+ case 0:
+ // "dig" sets AddressFamily to 0 if SourceNetmask is also 0
+ // It's okay to accept such a packet
+ if e.SourceNetmask != 0 {
+ return errors.New("dns: bad address family")
+ }
+ e.Address = net.IPv4(0, 0, 0, 0)
+ case 1:
+ if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
+ return errors.New("dns: bad netmask")
+ }
+ addr := make(net.IP, net.IPv4len)
+ copy(addr, b[4:])
+ e.Address = addr.To16()
+ case 2:
+ if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 {
+ return errors.New("dns: bad netmask")
+ }
+ addr := make(net.IP, net.IPv6len)
+ copy(addr, b[4:])
+ e.Address = addr
+ default:
+ return errors.New("dns: bad address family")
+ }
+ return nil
+}
+
+func (e *EDNS0_SUBNET) String() (s string) {
+ if e.Address == nil {
+ s = ""
+ } else if e.Address.To4() != nil {
+ s = e.Address.String()
+ } else {
+ s = "[" + e.Address.String() + "]"
+ }
+ s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope))
+ return
+}
+
+func (e *EDNS0_SUBNET) copy() EDNS0 {
+ return &EDNS0_SUBNET{
+ e.Code,
+ e.Family,
+ e.SourceNetmask,
+ e.SourceScope,
+ e.Address,
+ }
+}
+
+// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_COOKIE)
+// e.Code = dns.EDNS0COOKIE
+// e.Cookie = "24a5ac.."
+// o.Option = append(o.Option, e)
+//
+// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is
+// always 8 bytes. It may then optionally be followed by the server cookie. The server
+// cookie is of variable length, 8 to a maximum of 32 bytes. In other words:
+//
+// cCookie := o.Cookie[:16]
+// sCookie := o.Cookie[16:]
+//
+// There is no guarantee that the Cookie string has a specific length.
+type EDNS0_COOKIE struct {
+ Code uint16 // Always EDNS0COOKIE
+ Cookie string // Hex-encoded cookie data
+}
+
+func (e *EDNS0_COOKIE) pack() ([]byte, error) {
+ h, err := hex.DecodeString(e.Cookie)
+ if err != nil {
+ return nil, err
+ }
+ return h, nil
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
+func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
+func (e *EDNS0_COOKIE) String() string { return e.Cookie }
+func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} }
+
+// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
+// an expiration on an update RR. This is helpful for clients that cannot clean
+// up after themselves. This is a draft RFC and more information can be found at
+// https://tools.ietf.org/html/draft-sekar-dns-ul-02
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_UL)
+// e.Code = dns.EDNS0UL
+// e.Lease = 120 // in seconds
+// o.Option = append(o.Option, e)
+type EDNS0_UL struct {
+ Code uint16 // Always EDNS0UL
+ Lease uint32
+ KeyLease uint32
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
+func (e *EDNS0_UL) String() string { return fmt.Sprintf("%d %d", e.Lease, e.KeyLease) }
+func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease, e.KeyLease} }
+
+// Copied: http://golang.org/src/pkg/net/dnsmsg.go
+func (e *EDNS0_UL) pack() ([]byte, error) {
+ var b []byte
+ if e.KeyLease == 0 {
+ b = make([]byte, 4)
+ } else {
+ b = make([]byte, 8)
+ binary.BigEndian.PutUint32(b[4:], e.KeyLease)
+ }
+ binary.BigEndian.PutUint32(b, e.Lease)
+ return b, nil
+}
+
+func (e *EDNS0_UL) unpack(b []byte) error {
+ switch len(b) {
+ case 4:
+ e.KeyLease = 0
+ case 8:
+ e.KeyLease = binary.BigEndian.Uint32(b[4:])
+ default:
+ return ErrBuf
+ }
+ e.Lease = binary.BigEndian.Uint32(b)
+ return nil
+}
+
+// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
+// Implemented for completeness, as the EDNS0 type code is assigned.
+type EDNS0_LLQ struct {
+ Code uint16 // Always EDNS0LLQ
+ Version uint16
+ Opcode uint16
+ Error uint16
+ Id uint64
+ LeaseLife uint32
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
+
+func (e *EDNS0_LLQ) pack() ([]byte, error) {
+ b := make([]byte, 18)
+ binary.BigEndian.PutUint16(b[0:], e.Version)
+ binary.BigEndian.PutUint16(b[2:], e.Opcode)
+ binary.BigEndian.PutUint16(b[4:], e.Error)
+ binary.BigEndian.PutUint64(b[6:], e.Id)
+ binary.BigEndian.PutUint32(b[14:], e.LeaseLife)
+ return b, nil
+}
+
+func (e *EDNS0_LLQ) unpack(b []byte) error {
+ if len(b) < 18 {
+ return ErrBuf
+ }
+ e.Version = binary.BigEndian.Uint16(b[0:])
+ e.Opcode = binary.BigEndian.Uint16(b[2:])
+ e.Error = binary.BigEndian.Uint16(b[4:])
+ e.Id = binary.BigEndian.Uint64(b[6:])
+ e.LeaseLife = binary.BigEndian.Uint32(b[14:])
+ return nil
+}
+
+func (e *EDNS0_LLQ) String() string {
+ s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
+ " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
+ " " + strconv.FormatUint(uint64(e.LeaseLife), 10)
+ return s
+}
+func (e *EDNS0_LLQ) copy() EDNS0 {
+ return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
+}
+
+// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
+type EDNS0_DAU struct {
+ Code uint16 // Always EDNS0DAU
+ AlgCode []uint8
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
+func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_DAU) String() string {
+ s := ""
+ for _, alg := range e.AlgCode {
+ if a, ok := AlgorithmToString[alg]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(alg))
+ }
+ }
+ return s
+}
+func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
+
+// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
+type EDNS0_DHU struct {
+ Code uint16 // Always EDNS0DHU
+ AlgCode []uint8
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
+func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_DHU) String() string {
+ s := ""
+ for _, alg := range e.AlgCode {
+ if a, ok := HashToString[alg]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(alg))
+ }
+ }
+ return s
+}
+func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
+
+// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
+type EDNS0_N3U struct {
+ Code uint16 // Always EDNS0N3U
+ AlgCode []uint8
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
+func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
+func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
+
+func (e *EDNS0_N3U) String() string {
+ // Re-use the hash map
+ s := ""
+ for _, alg := range e.AlgCode {
+ if a, ok := HashToString[alg]; ok {
+ s += " " + a
+ } else {
+ s += " " + strconv.Itoa(int(alg))
+ }
+ }
+ return s
+}
+func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
+
+// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
+type EDNS0_EXPIRE struct {
+ Code uint16 // Always EDNS0EXPIRE
+ Expire uint32
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
+func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
+func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} }
+
+func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
+ b := make([]byte, 4)
+ binary.BigEndian.PutUint32(b, e.Expire)
+ return b, nil
+}
+
+func (e *EDNS0_EXPIRE) unpack(b []byte) error {
+ if len(b) == 0 {
+ // zero-length EXPIRE query, see RFC 7314 Section 2
+ return nil
+ }
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Expire = binary.BigEndian.Uint32(b)
+ return nil
+}
+
+// The EDNS0_LOCAL option is used for local/experimental purposes. The option
+// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND]
+// (RFC6891), although any unassigned code can actually be used. The content of
+// the option is made available in Data, unaltered.
+// Basic use pattern for creating a local option:
+//
+// o := new(dns.OPT)
+// o.Hdr.Name = "."
+// o.Hdr.Rrtype = dns.TypeOPT
+// e := new(dns.EDNS0_LOCAL)
+// e.Code = dns.EDNS0LOCALSTART
+// e.Data = []byte{72, 82, 74}
+// o.Option = append(o.Option, e)
+type EDNS0_LOCAL struct {
+ Code uint16
+ Data []byte
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
+func (e *EDNS0_LOCAL) String() string {
+ return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
+}
+func (e *EDNS0_LOCAL) copy() EDNS0 {
+ b := make([]byte, len(e.Data))
+ copy(b, e.Data)
+ return &EDNS0_LOCAL{e.Code, b}
+}
+
+func (e *EDNS0_LOCAL) pack() ([]byte, error) {
+ b := make([]byte, len(e.Data))
+ copied := copy(b, e.Data)
+ if copied != len(e.Data) {
+ return nil, ErrBuf
+ }
+ return b, nil
+}
+
+func (e *EDNS0_LOCAL) unpack(b []byte) error {
+ e.Data = make([]byte, len(b))
+ copied := copy(e.Data, b)
+ if copied != len(b) {
+ return ErrBuf
+ }
+ return nil
+}
+
+// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
+// the TCP connection alive. See RFC 7828.
+type EDNS0_TCP_KEEPALIVE struct {
+ Code uint16 // Always EDNSTCPKEEPALIVE
+ Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
+ Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
+
+func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
+ if e.Timeout != 0 && e.Length != 2 {
+ return nil, errors.New("dns: timeout specified but length is not 2")
+ }
+ if e.Timeout == 0 && e.Length != 0 {
+ return nil, errors.New("dns: timeout not specified but length is not 0")
+ }
+ b := make([]byte, 4+e.Length)
+ binary.BigEndian.PutUint16(b[0:], e.Code)
+ binary.BigEndian.PutUint16(b[2:], e.Length)
+ if e.Length == 2 {
+ binary.BigEndian.PutUint16(b[4:], e.Timeout)
+ }
+ return b, nil
+}
+
+func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error {
+ if len(b) < 4 {
+ return ErrBuf
+ }
+ e.Length = binary.BigEndian.Uint16(b[2:4])
+ if e.Length != 0 && e.Length != 2 {
+ return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10))
+ }
+ if e.Length == 2 {
+ if len(b) < 6 {
+ return ErrBuf
+ }
+ e.Timeout = binary.BigEndian.Uint16(b[4:6])
+ }
+ return nil
+}
+
+func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
+ s = "use tcp keep-alive"
+ if e.Length == 0 {
+ s += ", timeout omitted"
+ } else {
+ s += fmt.Sprintf(", timeout %dms", e.Timeout*100)
+ }
+ return
+}
+func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} }
+
+// EDNS0_PADDING option is used to add padding to a request/response. The default
+// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
+// compression is applied before encryption which may break signatures.
+type EDNS0_PADDING struct {
+ Padding []byte
+}
+
+// Option implements the EDNS0 interface.
+func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
+func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
+func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
+func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
+func (e *EDNS0_PADDING) copy() EDNS0 {
+ b := make([]byte, len(e.Padding))
+ copy(b, e.Padding)
+ return &EDNS0_PADDING{b}
+}
diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go
new file mode 100644
index 0000000..0ec79f2
--- /dev/null
+++ b/vendor/github.com/miekg/dns/format.go
@@ -0,0 +1,93 @@
+package dns
+
+import (
+ "net"
+ "reflect"
+ "strconv"
+)
+
+// NumField returns the number of rdata fields r has.
+func NumField(r RR) int {
+ return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header
+}
+
+// Field returns the rdata field i as a string. Fields are indexed starting from 1.
+// RR types that holds slice data, for instance the NSEC type bitmap will return a single
+// string where the types are concatenated using a space.
+// Accessing non existing fields will cause a panic.
+func Field(r RR, i int) string {
+ if i == 0 {
+ return ""
+ }
+ d := reflect.ValueOf(r).Elem().Field(i)
+ switch d.Kind() {
+ case reflect.String:
+ return d.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(d.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(d.Uint(), 10)
+ case reflect.Slice:
+ switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
+ case `dns:"a"`:
+ // TODO(miek): Hmm store this as 16 bytes
+ if d.Len() < net.IPv4len {
+ return ""
+ }
+ if d.Len() < net.IPv6len {
+ return net.IPv4(byte(d.Index(0).Uint()),
+ byte(d.Index(1).Uint()),
+ byte(d.Index(2).Uint()),
+ byte(d.Index(3).Uint())).String()
+ }
+ return net.IPv4(byte(d.Index(12).Uint()),
+ byte(d.Index(13).Uint()),
+ byte(d.Index(14).Uint()),
+ byte(d.Index(15).Uint())).String()
+ case `dns:"aaaa"`:
+ if d.Len() < net.IPv6len {
+ return ""
+ }
+ return net.IP{
+ byte(d.Index(0).Uint()),
+ byte(d.Index(1).Uint()),
+ byte(d.Index(2).Uint()),
+ byte(d.Index(3).Uint()),
+ byte(d.Index(4).Uint()),
+ byte(d.Index(5).Uint()),
+ byte(d.Index(6).Uint()),
+ byte(d.Index(7).Uint()),
+ byte(d.Index(8).Uint()),
+ byte(d.Index(9).Uint()),
+ byte(d.Index(10).Uint()),
+ byte(d.Index(11).Uint()),
+ byte(d.Index(12).Uint()),
+ byte(d.Index(13).Uint()),
+ byte(d.Index(14).Uint()),
+ byte(d.Index(15).Uint()),
+ }.String()
+ case `dns:"nsec"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := Type(d.Index(0).Uint()).String()
+ for i := 1; i < d.Len(); i++ {
+ s += " " + Type(d.Index(i).Uint()).String()
+ }
+ return s
+ default:
+ // if it does not have a tag its a string slice
+ fallthrough
+ case `dns:"txt"`:
+ if d.Len() == 0 {
+ return ""
+ }
+ s := d.Index(0).String()
+ for i := 1; i < d.Len(); i++ {
+ s += " " + d.Index(i).String()
+ }
+ return s
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go
new file mode 100644
index 0000000..57410ac
--- /dev/null
+++ b/vendor/github.com/miekg/dns/fuzz.go
@@ -0,0 +1,32 @@
+// +build fuzz
+
+package dns
+
+import "strings"
+
+func Fuzz(data []byte) int {
+ msg := new(Msg)
+
+ if err := msg.Unpack(data); err != nil {
+ return 0
+ }
+ if _, err := msg.Pack(); err != nil {
+ return 0
+ }
+
+ return 1
+}
+
+func FuzzNewRR(data []byte) int {
+ str := string(data)
+ // Do not fuzz lines that include the $INCLUDE keyword and hint the fuzzer
+ // at avoiding them.
+ // See GH#1025 for context.
+ if strings.Contains(strings.ToUpper(str), "$INCLUDE") {
+ return -1
+ }
+ if _, err := NewRR(str); err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go
new file mode 100644
index 0000000..f7e91a2
--- /dev/null
+++ b/vendor/github.com/miekg/dns/generate.go
@@ -0,0 +1,247 @@
+package dns
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Parse the $GENERATE statement as used in BIND9 zones.
+// See http://www.zytrax.com/books/dns/ch8/generate.html for instance.
+// We are called after '$GENERATE '. After which we expect:
+// * the range (12-24/2)
+// * lhs (ownername)
+// * [[ttl][class]]
+// * type
+// * rhs (rdata)
+// But we are lazy here, only the range is parsed *all* occurrences
+// of $ after that are interpreted.
+func (zp *ZoneParser) generate(l lex) (RR, bool) {
+ token := l.token
+ step := 1
+ if i := strings.IndexByte(token, '/'); i >= 0 {
+ if i+1 == len(token) {
+ return zp.setParseError("bad step in $GENERATE range", l)
+ }
+
+ s, err := strconv.Atoi(token[i+1:])
+ if err != nil || s <= 0 {
+ return zp.setParseError("bad step in $GENERATE range", l)
+ }
+
+ step = s
+ token = token[:i]
+ }
+
+ sx := strings.SplitN(token, "-", 2)
+ if len(sx) != 2 {
+ return zp.setParseError("bad start-stop in $GENERATE range", l)
+ }
+
+ start, err := strconv.Atoi(sx[0])
+ if err != nil {
+ return zp.setParseError("bad start in $GENERATE range", l)
+ }
+
+ end, err := strconv.Atoi(sx[1])
+ if err != nil {
+ return zp.setParseError("bad stop in $GENERATE range", l)
+ }
+ if end < 0 || start < 0 || end < start || (end-start)/step > 65535 {
+ return zp.setParseError("bad range in $GENERATE range", l)
+ }
+
+ // _BLANK
+ l, ok := zp.c.Next()
+ if !ok || l.value != zBlank {
+ return zp.setParseError("garbage after $GENERATE range", l)
+ }
+
+ // Create a complete new string, which we then parse again.
+ var s string
+ for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
+ if l.err {
+ return zp.setParseError("bad data in $GENERATE directive", l)
+ }
+ if l.value == zNewline {
+ break
+ }
+
+ s += l.token
+ }
+
+ r := &generateReader{
+ s: s,
+
+ cur: start,
+ start: start,
+ end: end,
+ step: step,
+
+ file: zp.file,
+ lex: &l,
+ }
+ zp.sub = NewZoneParser(r, zp.origin, zp.file)
+ zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed
+ zp.sub.generateDisallowed = true
+ zp.sub.SetDefaultTTL(defaultTtl)
+ return zp.subNext()
+}
+
+type generateReader struct {
+ s string
+ si int
+
+ cur int
+ start int
+ end int
+ step int
+
+ mod bytes.Buffer
+
+ escape bool
+
+ eof bool
+
+ file string
+ lex *lex
+}
+
+func (r *generateReader) parseError(msg string, end int) *ParseError {
+ r.eof = true // Make errors sticky.
+
+ l := *r.lex
+ l.token = r.s[r.si-1 : end]
+ l.column += r.si // l.column starts one zBLANK before r.s
+
+ return &ParseError{r.file, msg, l}
+}
+
+func (r *generateReader) Read(p []byte) (int, error) {
+ // NewZLexer, through NewZoneParser, should use ReadByte and
+ // not end up here.
+
+ panic("not implemented")
+}
+
+func (r *generateReader) ReadByte() (byte, error) {
+ if r.eof {
+ return 0, io.EOF
+ }
+ if r.mod.Len() > 0 {
+ return r.mod.ReadByte()
+ }
+
+ if r.si >= len(r.s) {
+ r.si = 0
+ r.cur += r.step
+
+ r.eof = r.cur > r.end || r.cur < 0
+ return '\n', nil
+ }
+
+ si := r.si
+ r.si++
+
+ switch r.s[si] {
+ case '\\':
+ if r.escape {
+ r.escape = false
+ return '\\', nil
+ }
+
+ r.escape = true
+ return r.ReadByte()
+ case '$':
+ if r.escape {
+ r.escape = false
+ return '$', nil
+ }
+
+ mod := "%d"
+
+ if si >= len(r.s)-1 {
+ // End of the string
+ fmt.Fprintf(&r.mod, mod, r.cur)
+ return r.mod.ReadByte()
+ }
+
+ if r.s[si+1] == '$' {
+ r.si++
+ return '$', nil
+ }
+
+ var offset int
+
+ // Search for { and }
+ if r.s[si+1] == '{' {
+ // Modifier block
+ sep := strings.Index(r.s[si+2:], "}")
+ if sep < 0 {
+ return 0, r.parseError("bad modifier in $GENERATE", len(r.s))
+ }
+
+ var errMsg string
+ mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep])
+ if errMsg != "" {
+ return 0, r.parseError(errMsg, si+3+sep)
+ }
+ if r.start+offset < 0 || r.end+offset > 1<<31-1 {
+ return 0, r.parseError("bad offset in $GENERATE", si+3+sep)
+ }
+
+ r.si += 2 + sep // Jump to it
+ }
+
+ fmt.Fprintf(&r.mod, mod, r.cur+offset)
+ return r.mod.ReadByte()
+ default:
+ if r.escape { // Pretty useless here
+ r.escape = false
+ return r.ReadByte()
+ }
+
+ return r.s[si], nil
+ }
+}
+
+// Convert a $GENERATE modifier 0,0,d to something Printf can deal with.
+func modToPrintf(s string) (string, int, string) {
+ // Modifier is { offset [ ,width [ ,base ] ] } - provide default
+ // values for optional width and type, if necessary.
+ var offStr, widthStr, base string
+ switch xs := strings.Split(s, ","); len(xs) {
+ case 1:
+ offStr, widthStr, base = xs[0], "0", "d"
+ case 2:
+ offStr, widthStr, base = xs[0], xs[1], "d"
+ case 3:
+ offStr, widthStr, base = xs[0], xs[1], xs[2]
+ default:
+ return "", 0, "bad modifier in $GENERATE"
+ }
+
+ switch base {
+ case "o", "d", "x", "X":
+ default:
+ return "", 0, "bad base in $GENERATE"
+ }
+
+ offset, err := strconv.Atoi(offStr)
+ if err != nil {
+ return "", 0, "bad offset in $GENERATE"
+ }
+
+ width, err := strconv.Atoi(widthStr)
+ if err != nil || width < 0 || width > 255 {
+ return "", 0, "bad width in $GENERATE"
+ }
+
+ if width == 0 {
+ return "%" + base, offset, ""
+ }
+
+ return "%0" + widthStr + base, offset, ""
+}
diff --git a/vendor/github.com/miekg/dns/go.mod b/vendor/github.com/miekg/dns/go.mod
new file mode 100644
index 0000000..6003d05
--- /dev/null
+++ b/vendor/github.com/miekg/dns/go.mod
@@ -0,0 +1,11 @@
+module github.com/miekg/dns
+
+go 1.12
+
+require (
+ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
+ golang.org/x/net v0.0.0-20190923162816-aa69164e4478
+ golang.org/x/sync v0.0.0-20190423024810-112230192c58
+ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe
+ golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 // indirect
+)
diff --git a/vendor/github.com/miekg/dns/go.sum b/vendor/github.com/miekg/dns/go.sum
new file mode 100644
index 0000000..96bda3a
--- /dev/null
+++ b/vendor/github.com/miekg/dns/go.sum
@@ -0,0 +1,39 @@
+golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4 h1:Vk3wNqEZwyGyei9yq5ekj7frek2u7HUfffJ1/opblzc=
+golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472 h1:Gv7RPwsi3eZ2Fgewe3CBsuOebPwO27PoXzRpJPsvSSM=
+golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3 h1:dgd4x4kJt7G4k4m93AYLzM8Ni6h2qLTfh9n9vXJT3/0=
+golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611 h1:O33LKL7WyJgjN9CvxfTIomjIClbd/Kq86/iipowHQU0=
+golang.org/x/sys v0.0.0-20180928133829-e4b3c5e90611/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425 h1:VvQyQJN0tSuecqgcIxMWnnfG5kSmgy9KZR9sW3W5QeA=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go
new file mode 100644
index 0000000..10d8247
--- /dev/null
+++ b/vendor/github.com/miekg/dns/labels.go
@@ -0,0 +1,212 @@
+package dns
+
+// Holds a bunch of helper functions for dealing with labels.
+
+// SplitDomainName splits a name string into it's labels.
+// www.miek.nl. returns []string{"www", "miek", "nl"}
+// .www.miek.nl. returns []string{"", "www", "miek", "nl"},
+// The root label (.) returns nil. Note that using
+// strings.Split(s) will work in most cases, but does not handle
+// escaped dots (\.) for instance.
+// s must be a syntactically valid domain name, see IsDomainName.
+func SplitDomainName(s string) (labels []string) {
+ if len(s) == 0 {
+ return nil
+ }
+ fqdnEnd := 0 // offset of the final '.' or the length of the name
+ idx := Split(s)
+ begin := 0
+ if IsFqdn(s) {
+ fqdnEnd = len(s) - 1
+ } else {
+ fqdnEnd = len(s)
+ }
+
+ switch len(idx) {
+ case 0:
+ return nil
+ case 1:
+ // no-op
+ default:
+ for _, end := range idx[1:] {
+ labels = append(labels, s[begin:end-1])
+ begin = end
+ }
+ }
+
+ return append(labels, s[begin:fqdnEnd])
+}
+
+// CompareDomainName compares the names s1 and s2 and
+// returns how many labels they have in common starting from the *right*.
+// The comparison stops at the first inequality. The names are downcased
+// before the comparison.
+//
+// www.miek.nl. and miek.nl. have two labels in common: miek and nl
+// www.miek.nl. and www.bla.nl. have one label in common: nl
+//
+// s1 and s2 must be syntactically valid domain names.
+func CompareDomainName(s1, s2 string) (n int) {
+ // the first check: root label
+ if s1 == "." || s2 == "." {
+ return 0
+ }
+
+ l1 := Split(s1)
+ l2 := Split(s2)
+
+ j1 := len(l1) - 1 // end
+ i1 := len(l1) - 2 // start
+ j2 := len(l2) - 1
+ i2 := len(l2) - 2
+ // the second check can be done here: last/only label
+ // before we fall through into the for-loop below
+ if equal(s1[l1[j1]:], s2[l2[j2]:]) {
+ n++
+ } else {
+ return
+ }
+ for {
+ if i1 < 0 || i2 < 0 {
+ break
+ }
+ if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
+ n++
+ } else {
+ break
+ }
+ j1--
+ i1--
+ j2--
+ i2--
+ }
+ return
+}
+
+// CountLabel counts the the number of labels in the string s.
+// s must be a syntactically valid domain name.
+func CountLabel(s string) (labels int) {
+ if s == "." {
+ return
+ }
+ off := 0
+ end := false
+ for {
+ off, end = NextLabel(s, off)
+ labels++
+ if end {
+ return
+ }
+ }
+}
+
+// Split splits a name s into its label indexes.
+// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}.
+// The root name (.) returns nil. Also see SplitDomainName.
+// s must be a syntactically valid domain name.
+func Split(s string) []int {
+ if s == "." {
+ return nil
+ }
+ idx := make([]int, 1, 3)
+ off := 0
+ end := false
+
+ for {
+ off, end = NextLabel(s, off)
+ if end {
+ return idx
+ }
+ idx = append(idx, off)
+ }
+}
+
+// NextLabel returns the index of the start of the next label in the
+// string s starting at offset.
+// The bool end is true when the end of the string has been reached.
+// Also see PrevLabel.
+func NextLabel(s string, offset int) (i int, end bool) {
+ if s == "" {
+ return 0, true
+ }
+ for i = offset; i < len(s)-1; i++ {
+ if s[i] != '.' {
+ continue
+ }
+ j := i - 1
+ for j >= 0 && s[j] == '\\' {
+ j--
+ }
+
+ if (j-i)%2 == 0 {
+ continue
+ }
+
+ return i + 1, false
+ }
+ return i + 1, true
+}
+
+// PrevLabel returns the index of the label when starting from the right and
+// jumping n labels to the left.
+// The bool start is true when the start of the string has been overshot.
+// Also see NextLabel.
+func PrevLabel(s string, n int) (i int, start bool) {
+ if s == "" {
+ return 0, true
+ }
+ if n == 0 {
+ return len(s), false
+ }
+
+ l := len(s) - 1
+ if s[l] == '.' {
+ l--
+ }
+
+ for ; l >= 0 && n > 0; l-- {
+ if s[l] != '.' {
+ continue
+ }
+ j := l - 1
+ for j >= 0 && s[j] == '\\' {
+ j--
+ }
+
+ if (j-l)%2 == 0 {
+ continue
+ }
+
+ n--
+ if n == 0 {
+ return l + 1, false
+ }
+ }
+
+ return 0, n > 1
+}
+
+// equal compares a and b while ignoring case. It returns true when equal otherwise false.
+func equal(a, b string) bool {
+ // might be lifted into API function.
+ la := len(a)
+ lb := len(b)
+ if la != lb {
+ return false
+ }
+
+ for i := la - 1; i >= 0; i-- {
+ ai := a[i]
+ bi := b[i]
+ if ai >= 'A' && ai <= 'Z' {
+ ai |= 'a' - 'A'
+ }
+ if bi >= 'A' && bi <= 'Z' {
+ bi |= 'a' - 'A'
+ }
+ if ai != bi {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go
new file mode 100644
index 0000000..fad195c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/listen_go111.go
@@ -0,0 +1,44 @@
+// +build go1.11
+// +build aix darwin dragonfly freebsd linux netbsd openbsd
+
+package dns
+
+import (
+ "context"
+ "net"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+const supportsReusePort = true
+
+func reuseportControl(network, address string, c syscall.RawConn) error {
+ var opErr error
+ err := c.Control(func(fd uintptr) {
+ opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+ })
+ if err != nil {
+ return err
+ }
+
+ return opErr
+}
+
+func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
+ var lc net.ListenConfig
+ if reuseport {
+ lc.Control = reuseportControl
+ }
+
+ return lc.Listen(context.Background(), network, addr)
+}
+
+func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
+ var lc net.ListenConfig
+ if reuseport {
+ lc.Control = reuseportControl
+ }
+
+ return lc.ListenPacket(context.Background(), network, addr)
+}
diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go
new file mode 100644
index 0000000..b920141
--- /dev/null
+++ b/vendor/github.com/miekg/dns/listen_go_not111.go
@@ -0,0 +1,23 @@
+// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
+
+package dns
+
+import "net"
+
+const supportsReusePort = false
+
+func listenTCP(network, addr string, reuseport bool) (net.Listener, error) {
+ if reuseport {
+ // TODO(tmthrgd): return an error?
+ }
+
+ return net.Listen(network, addr)
+}
+
+func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) {
+ if reuseport {
+ // TODO(tmthrgd): return an error?
+ }
+
+ return net.ListenPacket(network, addr)
+}
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
new file mode 100644
index 0000000..2938130
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -0,0 +1,1196 @@
+// DNS packet assembly, see RFC 1035. Converting from - Unpack() -
+// and to - Pack() - wire format.
+// All the packers and unpackers take a (msg []byte, off int)
+// and return (off1 int, ok bool). If they return ok==false, they
+// also return off1==len(msg), so that the next unpacker will
+// also fail. This lets us avoid checks of ok until the end of a
+// packing sequence.
+
+package dns
+
+//go:generate go run msg_generate.go
+
+import (
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+)
+
+const (
+ maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
+ maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4
+
+ // This is the maximum number of compression pointers that should occur in a
+ // semantically valid message. Each label in a domain name must be at least one
+ // octet and is separated by a period. The root label won't be represented by a
+ // compression pointer to a compression pointer, hence the -2 to exclude the
+ // smallest valid root label.
+ //
+ // It is possible to construct a valid message that has more compression pointers
+ // than this, and still doesn't loop, by pointing to a previous pointer. This is
+ // not something a well written implementation should ever do, so we leave them
+ // to trip the maximum compression pointer check.
+ maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2
+
+ // This is the maximum length of a domain name in presentation format. The
+ // maximum wire length of a domain name is 255 octets (see above), with the
+ // maximum label length being 63. The wire format requires one extra byte over
+ // the presentation format, reducing the number of octets by 1. Each label in
+ // the name will be separated by a single period, with each octet in the label
+ // expanding to at most 4 bytes (\DDD). If all other labels are of the maximum
+ // length, then the final label can only be 61 octets long to not exceed the
+ // maximum allowed wire length.
+ maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1
+)
+
+// Errors defined in this package.
+var (
+ ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
+ ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
+ ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message.
+ ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized.
+ ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
+ ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
+ ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
+ ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
+ ErrKey error = &Error{err: "bad key"}
+ ErrKeySize error = &Error{err: "bad key size"}
+ ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
+ ErrNoSig error = &Error{err: "no signature found"}
+ ErrPrivKey error = &Error{err: "bad private key"}
+ ErrRcode error = &Error{err: "bad rcode"}
+ ErrRdata error = &Error{err: "bad rdata"}
+ ErrRRset error = &Error{err: "bad rrset"}
+ ErrSecret error = &Error{err: "no secrets defined"}
+ ErrShortRead error = &Error{err: "short read"}
+ ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated.
+ ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers.
+ ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication.
+)
+
+// Id by default returns a 16-bit random number to be used as a message id. The
+// number is drawn from a cryptographically secure random number generator.
+// This being a variable the function can be reassigned to a custom function.
+// For instance, to make it return a static value for testing:
+//
+// dns.Id = func() uint16 { return 3 }
+var Id = id
+
+// id returns a 16 bits random number to be used as a
+// message id. The random provided should be good enough.
+func id() uint16 {
+ var output uint16
+ err := binary.Read(rand.Reader, binary.BigEndian, &output)
+ if err != nil {
+ panic("dns: reading random id failed: " + err.Error())
+ }
+ return output
+}
+
+// MsgHdr is a a manually-unpacked version of (id, bits).
+type MsgHdr struct {
+ Id uint16
+ Response bool
+ Opcode int
+ Authoritative bool
+ Truncated bool
+ RecursionDesired bool
+ RecursionAvailable bool
+ Zero bool
+ AuthenticatedData bool
+ CheckingDisabled bool
+ Rcode int
+}
+
+// Msg contains the layout of a DNS message.
+type Msg struct {
+ MsgHdr
+ Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format.
+ Question []Question // Holds the RR(s) of the question section.
+ Answer []RR // Holds the RR(s) of the answer section.
+ Ns []RR // Holds the RR(s) of the authority section.
+ Extra []RR // Holds the RR(s) of the additional section.
+}
+
+// ClassToString is a maps Classes to strings for each CLASS wire type.
+var ClassToString = map[uint16]string{
+ ClassINET: "IN",
+ ClassCSNET: "CS",
+ ClassCHAOS: "CH",
+ ClassHESIOD: "HS",
+ ClassNONE: "NONE",
+ ClassANY: "ANY",
+}
+
+// OpcodeToString maps Opcodes to strings.
+var OpcodeToString = map[int]string{
+ OpcodeQuery: "QUERY",
+ OpcodeIQuery: "IQUERY",
+ OpcodeStatus: "STATUS",
+ OpcodeNotify: "NOTIFY",
+ OpcodeUpdate: "UPDATE",
+}
+
+// RcodeToString maps Rcodes to strings.
+var RcodeToString = map[int]string{
+ RcodeSuccess: "NOERROR",
+ RcodeFormatError: "FORMERR",
+ RcodeServerFailure: "SERVFAIL",
+ RcodeNameError: "NXDOMAIN",
+ RcodeNotImplemented: "NOTIMP",
+ RcodeRefused: "REFUSED",
+ RcodeYXDomain: "YXDOMAIN", // See RFC 2136
+ RcodeYXRrset: "YXRRSET",
+ RcodeNXRrset: "NXRRSET",
+ RcodeNotAuth: "NOTAUTH",
+ RcodeNotZone: "NOTZONE",
+ RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891
+ // RcodeBadVers: "BADVERS",
+ RcodeBadKey: "BADKEY",
+ RcodeBadTime: "BADTIME",
+ RcodeBadMode: "BADMODE",
+ RcodeBadName: "BADNAME",
+ RcodeBadAlg: "BADALG",
+ RcodeBadTrunc: "BADTRUNC",
+ RcodeBadCookie: "BADCOOKIE",
+}
+
+// compressionMap is used to allow a more efficient compression map
+// to be used for internal packDomainName calls without changing the
+// signature or functionality of public API.
+//
+// In particular, map[string]uint16 uses 25% less per-entry memory
+// than does map[string]int.
+type compressionMap struct {
+ ext map[string]int // external callers
+ int map[string]uint16 // internal callers
+}
+
+func (m compressionMap) valid() bool {
+ return m.int != nil || m.ext != nil
+}
+
+func (m compressionMap) insert(s string, pos int) {
+ if m.ext != nil {
+ m.ext[s] = pos
+ } else {
+ m.int[s] = uint16(pos)
+ }
+}
+
+func (m compressionMap) find(s string) (int, bool) {
+ if m.ext != nil {
+ pos, ok := m.ext[s]
+ return pos, ok
+ }
+
+ pos, ok := m.int[s]
+ return int(pos), ok
+}
+
+// Domain names are a sequence of counted strings
+// split at the dots. They end with a zero-length string.
+
+// PackDomainName packs a domain name s into msg[off:].
+// If compression is wanted compress must be true and the compression
+// map needs to hold a mapping between domain names and offsets
+// pointing into msg.
+func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ return packDomainName(s, msg, off, compressionMap{ext: compression}, compress)
+}
+
+func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ // XXX: A logical copy of this function exists in IsDomainName and
+ // should be kept in sync with this function.
+
+ ls := len(s)
+ if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
+ return off, nil
+ }
+
+ // If not fully qualified, error out.
+ if !IsFqdn(s) {
+ return len(msg), ErrFqdn
+ }
+
+ // Each dot ends a segment of the name.
+ // We trade each dot byte for a length byte.
+ // Except for escaped dots (\.), which are normal dots.
+ // There is also a trailing zero.
+
+ // Compression
+ pointer := -1
+
+ // Emit sequence of counted strings, chopping at dots.
+ var (
+ begin int
+ compBegin int
+ compOff int
+ bs []byte
+ wasDot bool
+ )
+loop:
+ for i := 0; i < ls; i++ {
+ var c byte
+ if bs == nil {
+ c = s[i]
+ } else {
+ c = bs[i]
+ }
+
+ switch c {
+ case '\\':
+ if off+1 > len(msg) {
+ return len(msg), ErrBuf
+ }
+
+ if bs == nil {
+ bs = []byte(s)
+ }
+
+ // check for \DDD
+ if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) {
+ bs[i] = dddToByte(bs[i+1:])
+ copy(bs[i+1:ls-3], bs[i+4:])
+ ls -= 3
+ compOff += 3
+ } else {
+ copy(bs[i:ls-1], bs[i+1:])
+ ls--
+ compOff++
+ }
+
+ wasDot = false
+ case '.':
+ if wasDot {
+ // two dots back to back is not legal
+ return len(msg), ErrRdata
+ }
+ wasDot = true
+
+ labelLen := i - begin
+ if labelLen >= 1<<6 { // top two bits of length must be clear
+ return len(msg), ErrRdata
+ }
+
+ // off can already (we're in a loop) be bigger than len(msg)
+ // this happens when a name isn't fully qualified
+ if off+1+labelLen > len(msg) {
+ return len(msg), ErrBuf
+ }
+
+ // Don't try to compress '.'
+ // We should only compress when compress is true, but we should also still pick
+ // up names that can be used for *future* compression(s).
+ if compression.valid() && !isRootLabel(s, bs, begin, ls) {
+ if p, ok := compression.find(s[compBegin:]); ok {
+ // The first hit is the longest matching dname
+ // keep the pointer offset we get back and store
+ // the offset of the current name, because that's
+ // where we need to insert the pointer later
+
+ // If compress is true, we're allowed to compress this dname
+ if compress {
+ pointer = p // Where to point to
+ break loop
+ }
+ } else if off < maxCompressionOffset {
+ // Only offsets smaller than maxCompressionOffset can be used.
+ compression.insert(s[compBegin:], off)
+ }
+ }
+
+ // The following is covered by the length check above.
+ msg[off] = byte(labelLen)
+
+ if bs == nil {
+ copy(msg[off+1:], s[begin:i])
+ } else {
+ copy(msg[off+1:], bs[begin:i])
+ }
+ off += 1 + labelLen
+
+ begin = i + 1
+ compBegin = begin + compOff
+ default:
+ wasDot = false
+ }
+ }
+
+ // Root label is special
+ if isRootLabel(s, bs, 0, ls) {
+ return off, nil
+ }
+
+ // If we did compression and we find something add the pointer here
+ if pointer != -1 {
+ // We have two bytes (14 bits) to put the pointer in
+ binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
+ return off + 2, nil
+ }
+
+ if off < len(msg) {
+ msg[off] = 0
+ }
+
+ return off + 1, nil
+}
+
+// isRootLabel returns whether s or bs, from off to end, is the root
+// label ".".
+//
+// If bs is nil, s will be checked, otherwise bs will be checked.
+func isRootLabel(s string, bs []byte, off, end int) bool {
+ if bs == nil {
+ return s[off:end] == "."
+ }
+
+ return end-off == 1 && bs[off] == '.'
+}
+
+// Unpack a domain name.
+// In addition to the simple sequences of counted strings above,
+// domain names are allowed to refer to strings elsewhere in the
+// packet, to avoid repeating common suffixes when returning
+// many entries in a single domain. The pointers are marked
+// by a length byte with the top two bits set. Ignoring those
+// two bits, that byte and the next give a 14 bit offset from msg[0]
+// where we should pick up the trail.
+// Note that if we jump elsewhere in the packet,
+// we return off1 == the offset after the first pointer we found,
+// which is where the next record will start.
+// In theory, the pointers are only allowed to jump backward.
+// We let them jump anywhere and stop jumping after a while.
+
+// UnpackDomainName unpacks a domain name into a string. It returns
+// the name, the new offset into msg and any error that occurred.
+//
+// When an error is encountered, the unpacked name will be discarded
+// and len(msg) will be returned as the offset.
+func UnpackDomainName(msg []byte, off int) (string, int, error) {
+ s := make([]byte, 0, maxDomainNamePresentationLength)
+ off1 := 0
+ lenmsg := len(msg)
+ budget := maxDomainNameWireOctets
+ ptr := 0 // number of pointers followed
+Loop:
+ for {
+ if off >= lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ c := int(msg[off])
+ off++
+ switch c & 0xC0 {
+ case 0x00:
+ if c == 0x00 {
+ // end of name
+ break Loop
+ }
+ // literal string
+ if off+c > lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ budget -= c + 1 // +1 for the label separator
+ if budget <= 0 {
+ return "", lenmsg, ErrLongDomain
+ }
+ for _, b := range msg[off : off+c] {
+ switch b {
+ case '.', '(', ')', ';', ' ', '@':
+ fallthrough
+ case '"', '\\':
+ s = append(s, '\\', b)
+ default:
+ if b < ' ' || b > '~' { // unprintable, use \DDD
+ s = append(s, escapeByte(b)...)
+ } else {
+ s = append(s, b)
+ }
+ }
+ }
+ s = append(s, '.')
+ off += c
+ case 0xC0:
+ // pointer to somewhere else in msg.
+ // remember location after first ptr,
+ // since that's how many bytes we consumed.
+ // also, don't follow too many pointers --
+ // maybe there's a loop.
+ if off >= lenmsg {
+ return "", lenmsg, ErrBuf
+ }
+ c1 := msg[off]
+ off++
+ if ptr == 0 {
+ off1 = off
+ }
+ if ptr++; ptr > maxCompressionPointers {
+ return "", lenmsg, &Error{err: "too many compression pointers"}
+ }
+ // pointer should guarantee that it advances and points forwards at least
+ // but the condition on previous three lines guarantees that it's
+ // at least loop-free
+ off = (c^0xC0)<<8 | int(c1)
+ default:
+ // 0x80 and 0x40 are reserved
+ return "", lenmsg, ErrRdata
+ }
+ }
+ if ptr == 0 {
+ off1 = off
+ }
+ if len(s) == 0 {
+ return ".", off1, nil
+ }
+ return string(s), off1, nil
+}
+
+func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
+ if len(txt) == 0 {
+ if offset >= len(msg) {
+ return offset, ErrBuf
+ }
+ msg[offset] = 0
+ return offset, nil
+ }
+ var err error
+ for _, s := range txt {
+ if len(s) > len(tmp) {
+ return offset, ErrBuf
+ }
+ offset, err = packTxtString(s, msg, offset, tmp)
+ if err != nil {
+ return offset, err
+ }
+ }
+ return offset, nil
+}
+
+func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+ lenByteOffset := offset
+ if offset >= len(msg) || len(s) > len(tmp) {
+ return offset, ErrBuf
+ }
+ offset++
+ bs := tmp[:len(s)]
+ copy(bs, s)
+ for i := 0; i < len(bs); i++ {
+ if len(msg) <= offset {
+ return offset, ErrBuf
+ }
+ if bs[i] == '\\' {
+ i++
+ if i == len(bs) {
+ break
+ }
+ // check for \DDD
+ if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ msg[offset] = dddToByte(bs[i:])
+ i += 2
+ } else {
+ msg[offset] = bs[i]
+ }
+ } else {
+ msg[offset] = bs[i]
+ }
+ offset++
+ }
+ l := offset - lenByteOffset - 1
+ if l > 255 {
+ return offset, &Error{err: "string exceeded 255 bytes in txt"}
+ }
+ msg[lenByteOffset] = byte(l)
+ return offset, nil
+}
+
+func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) {
+ if offset >= len(msg) || len(s) > len(tmp) {
+ return offset, ErrBuf
+ }
+ bs := tmp[:len(s)]
+ copy(bs, s)
+ for i := 0; i < len(bs); i++ {
+ if len(msg) <= offset {
+ return offset, ErrBuf
+ }
+ if bs[i] == '\\' {
+ i++
+ if i == len(bs) {
+ break
+ }
+ // check for \DDD
+ if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) {
+ msg[offset] = dddToByte(bs[i:])
+ i += 2
+ } else {
+ msg[offset] = bs[i]
+ }
+ } else {
+ msg[offset] = bs[i]
+ }
+ offset++
+ }
+ return offset, nil
+}
+
+func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
+ off = off0
+ var s string
+ for off < len(msg) && err == nil {
+ s, off, err = unpackString(msg, off)
+ if err == nil {
+ ss = append(ss, s)
+ }
+ }
+ return
+}
+
+// Helpers for dealing with escaped bytes
+func isDigit(b byte) bool { return b >= '0' && b <= '9' }
+
+func dddToByte(s []byte) byte {
+ _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
+ return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
+}
+
+func dddStringToByte(s string) byte {
+ _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808
+ return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
+}
+
+// Helper function for packing and unpacking
+func intToBytes(i *big.Int, length int) []byte {
+ buf := i.Bytes()
+ if len(buf) < length {
+ b := make([]byte, length)
+ copy(b[length-len(buf):], buf)
+ return b
+ }
+ return buf
+}
+
+// PackRR packs a resource record rr into msg[off:].
+// See PackDomainName for documentation about the compression.
+func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
+ headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress)
+ if err == nil {
+ // packRR no longer sets the Rdlength field on the rr, but
+ // callers might be expecting it so we set it here.
+ rr.Header().Rdlength = uint16(off1 - headerEnd)
+ }
+ return off1, err
+}
+
+func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) {
+ if rr == nil {
+ return len(msg), len(msg), &Error{err: "nil rr"}
+ }
+
+ headerEnd, err = rr.Header().packHeader(msg, off, compression, compress)
+ if err != nil {
+ return headerEnd, len(msg), err
+ }
+
+ off1, err = rr.pack(msg, headerEnd, compression, compress)
+ if err != nil {
+ return headerEnd, len(msg), err
+ }
+
+ rdlength := off1 - headerEnd
+ if int(uint16(rdlength)) != rdlength { // overflow
+ return headerEnd, len(msg), ErrRdata
+ }
+
+ // The RDLENGTH field is the last field in the header and we set it here.
+ binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength))
+ return headerEnd, off1, nil
+}
+
+// UnpackRR unpacks msg[off:] into an RR.
+func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
+ h, off, msg, err := unpackHeader(msg, off)
+ if err != nil {
+ return nil, len(msg), err
+ }
+
+ return UnpackRRWithHeader(h, msg, off)
+}
+
+// UnpackRRWithHeader unpacks the record type specific payload given an existing
+// RR_Header.
+func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
+ if newFn, ok := TypeToRR[h.Rrtype]; ok {
+ rr = newFn()
+ *rr.Header() = h
+ } else {
+ rr = &RFC3597{Hdr: h}
+ }
+
+ if noRdata(h) {
+ return rr, off, nil
+ }
+
+ end := off + int(h.Rdlength)
+
+ off, err = rr.unpack(msg, off)
+ if err != nil {
+ return nil, end, err
+ }
+ if off != end {
+ return &h, end, &Error{err: "bad rdlength"}
+ }
+
+ return rr, off, nil
+}
+
+// unpackRRslice unpacks msg[off:] into an []RR.
+// If we cannot unpack the whole array, then it will return nil
+func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
+ var r RR
+ // Don't pre-allocate, l may be under attacker control
+ var dst []RR
+ for i := 0; i < l; i++ {
+ off1 := off
+ r, off, err = UnpackRR(msg, off)
+ if err != nil {
+ off = len(msg)
+ break
+ }
+ // If offset does not increase anymore, l is a lie
+ if off1 == off {
+ l = i
+ break
+ }
+ dst = append(dst, r)
+ }
+ if err != nil && off == len(msg) {
+ dst = nil
+ }
+ return dst, off, err
+}
+
+// Convert a MsgHdr to a string, with dig-like headers:
+//
+//;; opcode: QUERY, status: NOERROR, id: 48404
+//
+//;; flags: qr aa rd ra;
+func (h *MsgHdr) String() string {
+ if h == nil {
+ return " MsgHdr"
+ }
+
+ s := ";; opcode: " + OpcodeToString[h.Opcode]
+ s += ", status: " + RcodeToString[h.Rcode]
+ s += ", id: " + strconv.Itoa(int(h.Id)) + "\n"
+
+ s += ";; flags:"
+ if h.Response {
+ s += " qr"
+ }
+ if h.Authoritative {
+ s += " aa"
+ }
+ if h.Truncated {
+ s += " tc"
+ }
+ if h.RecursionDesired {
+ s += " rd"
+ }
+ if h.RecursionAvailable {
+ s += " ra"
+ }
+ if h.Zero { // Hmm
+ s += " z"
+ }
+ if h.AuthenticatedData {
+ s += " ad"
+ }
+ if h.CheckingDisabled {
+ s += " cd"
+ }
+
+ s += ";"
+ return s
+}
+
+// Pack packs a Msg: it is converted to to wire format.
+// If the dns.Compress is true the message will be in compressed wire format.
+func (dns *Msg) Pack() (msg []byte, err error) {
+ return dns.PackBuffer(nil)
+}
+
+// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated.
+func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
+ // If this message can't be compressed, avoid filling the
+ // compression map and creating garbage.
+ if dns.Compress && dns.isCompressible() {
+ compression := make(map[string]uint16) // Compression pointer mappings.
+ return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true)
+ }
+
+ return dns.packBufferWithCompressionMap(buf, compressionMap{}, false)
+}
+
+// packBufferWithCompressionMap packs a Msg, using the given buffer buf.
+func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) {
+ if dns.Rcode < 0 || dns.Rcode > 0xFFF {
+ return nil, ErrRcode
+ }
+
+ // Set extended rcode unconditionally if we have an opt, this will allow
+ // reseting the extended rcode bits if they need to.
+ if opt := dns.IsEdns0(); opt != nil {
+ opt.SetExtendedRcode(uint16(dns.Rcode))
+ } else if dns.Rcode > 0xF {
+ // If Rcode is an extended one and opt is nil, error out.
+ return nil, ErrExtendedRcode
+ }
+
+ // Convert convenient Msg into wire-like Header.
+ var dh Header
+ dh.Id = dns.Id
+ dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF)
+ if dns.Response {
+ dh.Bits |= _QR
+ }
+ if dns.Authoritative {
+ dh.Bits |= _AA
+ }
+ if dns.Truncated {
+ dh.Bits |= _TC
+ }
+ if dns.RecursionDesired {
+ dh.Bits |= _RD
+ }
+ if dns.RecursionAvailable {
+ dh.Bits |= _RA
+ }
+ if dns.Zero {
+ dh.Bits |= _Z
+ }
+ if dns.AuthenticatedData {
+ dh.Bits |= _AD
+ }
+ if dns.CheckingDisabled {
+ dh.Bits |= _CD
+ }
+
+ dh.Qdcount = uint16(len(dns.Question))
+ dh.Ancount = uint16(len(dns.Answer))
+ dh.Nscount = uint16(len(dns.Ns))
+ dh.Arcount = uint16(len(dns.Extra))
+
+ // We need the uncompressed length here, because we first pack it and then compress it.
+ msg = buf
+ uncompressedLen := msgLenWithCompressionMap(dns, nil)
+ if packLen := uncompressedLen + 1; len(msg) < packLen {
+ msg = make([]byte, packLen)
+ }
+
+ // Pack it in: header and then the pieces.
+ off := 0
+ off, err = dh.pack(msg, off, compression, compress)
+ if err != nil {
+ return nil, err
+ }
+ for _, r := range dns.Question {
+ off, err = r.pack(msg, off, compression, compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, r := range dns.Answer {
+ _, off, err = packRR(r, msg, off, compression, compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, r := range dns.Ns {
+ _, off, err = packRR(r, msg, off, compression, compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ for _, r := range dns.Extra {
+ _, off, err = packRR(r, msg, off, compression, compress)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msg[:off], nil
+}
+
+func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) {
+ // If we are at the end of the message we should return *just* the
+ // header. This can still be useful to the caller. 9.9.9.9 sends these
+ // when responding with REFUSED for instance.
+ if off == len(msg) {
+ // reset sections before returning
+ dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
+ return nil
+ }
+
+ // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
+ // attacker controlled. This means we can't use them to pre-allocate
+ // slices.
+ dns.Question = nil
+ for i := 0; i < int(dh.Qdcount); i++ {
+ off1 := off
+ var q Question
+ q, off, err = unpackQuestion(msg, off)
+ if err != nil {
+ return err
+ }
+ if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie!
+ dh.Qdcount = uint16(i)
+ break
+ }
+ dns.Question = append(dns.Question, q)
+ }
+
+ dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off)
+ // The header counts might have been wrong so we need to update it
+ dh.Ancount = uint16(len(dns.Answer))
+ if err == nil {
+ dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off)
+ }
+ // The header counts might have been wrong so we need to update it
+ dh.Nscount = uint16(len(dns.Ns))
+ if err == nil {
+ dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off)
+ }
+ // The header counts might have been wrong so we need to update it
+ dh.Arcount = uint16(len(dns.Extra))
+
+ // Set extended Rcode
+ if opt := dns.IsEdns0(); opt != nil {
+ dns.Rcode |= opt.ExtendedRcode()
+ }
+
+ if off != len(msg) {
+ // TODO(miek) make this an error?
+ // use PackOpt to let people tell how detailed the error reporting should be?
+ // println("dns: extra bytes in dns packet", off, "<", len(msg))
+ }
+ return err
+
+}
+
+// Unpack unpacks a binary message to a Msg structure.
+func (dns *Msg) Unpack(msg []byte) (err error) {
+ dh, off, err := unpackMsgHdr(msg, 0)
+ if err != nil {
+ return err
+ }
+
+ dns.setHdr(dh)
+ return dns.unpack(dh, msg, off)
+}
+
+// Convert a complete message to a string with dig-like output.
+func (dns *Msg) String() string {
+ if dns == nil {
+ return " MsgHdr"
+ }
+ s := dns.MsgHdr.String() + " "
+ s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", "
+ s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", "
+ s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", "
+ s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
+ if len(dns.Question) > 0 {
+ s += "\n;; QUESTION SECTION:\n"
+ for _, r := range dns.Question {
+ s += r.String() + "\n"
+ }
+ }
+ if len(dns.Answer) > 0 {
+ s += "\n;; ANSWER SECTION:\n"
+ for _, r := range dns.Answer {
+ if r != nil {
+ s += r.String() + "\n"
+ }
+ }
+ }
+ if len(dns.Ns) > 0 {
+ s += "\n;; AUTHORITY SECTION:\n"
+ for _, r := range dns.Ns {
+ if r != nil {
+ s += r.String() + "\n"
+ }
+ }
+ }
+ if len(dns.Extra) > 0 {
+ s += "\n;; ADDITIONAL SECTION:\n"
+ for _, r := range dns.Extra {
+ if r != nil {
+ s += r.String() + "\n"
+ }
+ }
+ }
+ return s
+}
+
+// isCompressible returns whether the msg may be compressible.
+func (dns *Msg) isCompressible() bool {
+ // If we only have one question, there is nothing we can ever compress.
+ return len(dns.Question) > 1 || len(dns.Answer) > 0 ||
+ len(dns.Ns) > 0 || len(dns.Extra) > 0
+}
+
+// Len returns the message length when in (un)compressed wire format.
+// If dns.Compress is true compression it is taken into account. Len()
+// is provided to be a faster way to get the size of the resulting packet,
+// than packing it, measuring the size and discarding the buffer.
+func (dns *Msg) Len() int {
+ // If this message can't be compressed, avoid filling the
+ // compression map and creating garbage.
+ if dns.Compress && dns.isCompressible() {
+ compression := make(map[string]struct{})
+ return msgLenWithCompressionMap(dns, compression)
+ }
+
+ return msgLenWithCompressionMap(dns, nil)
+}
+
+func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int {
+ l := headerSize
+
+ for _, r := range dns.Question {
+ l += r.len(l, compression)
+ }
+ for _, r := range dns.Answer {
+ if r != nil {
+ l += r.len(l, compression)
+ }
+ }
+ for _, r := range dns.Ns {
+ if r != nil {
+ l += r.len(l, compression)
+ }
+ }
+ for _, r := range dns.Extra {
+ if r != nil {
+ l += r.len(l, compression)
+ }
+ }
+
+ return l
+}
+
+func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int {
+ if s == "" || s == "." {
+ return 1
+ }
+
+ escaped := strings.Contains(s, "\\")
+
+ if compression != nil && (compress || off < maxCompressionOffset) {
+ // compressionLenSearch will insert the entry into the compression
+ // map if it doesn't contain it.
+ if l, ok := compressionLenSearch(compression, s, off); ok && compress {
+ if escaped {
+ return escapedNameLen(s[:l]) + 2
+ }
+
+ return l + 2
+ }
+ }
+
+ if escaped {
+ return escapedNameLen(s) + 1
+ }
+
+ return len(s) + 1
+}
+
+func escapedNameLen(s string) int {
+ nameLen := len(s)
+ for i := 0; i < len(s); i++ {
+ if s[i] != '\\' {
+ continue
+ }
+
+ if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
+ nameLen -= 3
+ i += 3
+ } else {
+ nameLen--
+ i++
+ }
+ }
+
+ return nameLen
+}
+
+func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) {
+ for off, end := 0, false; !end; off, end = NextLabel(s, off) {
+ if _, ok := c[s[off:]]; ok {
+ return off, true
+ }
+
+ if msgOff+off < maxCompressionOffset {
+ c[s[off:]] = struct{}{}
+ }
+ }
+
+ return 0, false
+}
+
+// Copy returns a new RR which is a deep-copy of r.
+func Copy(r RR) RR { return r.copy() }
+
+// Len returns the length (in octets) of the uncompressed RR in wire format.
+func Len(r RR) int { return r.len(0, nil) }
+
+// Copy returns a new *Msg which is a deep-copy of dns.
+func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
+
+// CopyTo copies the contents to the provided message using a deep-copy and returns the copy.
+func (dns *Msg) CopyTo(r1 *Msg) *Msg {
+ r1.MsgHdr = dns.MsgHdr
+ r1.Compress = dns.Compress
+
+ if len(dns.Question) > 0 {
+ r1.Question = make([]Question, len(dns.Question))
+ copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy
+ }
+
+ rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
+ r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):]
+ r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):]
+ r1.Extra = rrArr[:0:len(dns.Extra)]
+
+ for _, r := range dns.Answer {
+ r1.Answer = append(r1.Answer, r.copy())
+ }
+
+ for _, r := range dns.Ns {
+ r1.Ns = append(r1.Ns, r.copy())
+ }
+
+ for _, r := range dns.Extra {
+ r1.Extra = append(r1.Extra, r.copy())
+ }
+
+ return r1
+}
+
+func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
+ off, err := packDomainName(q.Name, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(q.Qtype, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(q.Qclass, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func unpackQuestion(msg []byte, off int) (Question, int, error) {
+ var (
+ q Question
+ err error
+ )
+ q.Name, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return q, off, err
+ }
+ if off == len(msg) {
+ return q, off, nil
+ }
+ q.Qtype, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return q, off, err
+ }
+ if off == len(msg) {
+ return q, off, nil
+ }
+ q.Qclass, off, err = unpackUint16(msg, off)
+ if off == len(msg) {
+ return q, off, nil
+ }
+ return q, off, err
+}
+
+func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
+ off, err := packUint16(dh.Id, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Bits, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Qdcount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Ancount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Nscount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(dh.Arcount, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
+ var (
+ dh Header
+ err error
+ )
+ dh.Id, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Bits, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Qdcount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Ancount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Nscount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ dh.Arcount, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return dh, off, err
+ }
+ return dh, off, nil
+}
+
+// setHdr set the header in the dns using the binary data in dh.
+func (dns *Msg) setHdr(dh Header) {
+ dns.Id = dh.Id
+ dns.Response = dh.Bits&_QR != 0
+ dns.Opcode = int(dh.Bits>>11) & 0xF
+ dns.Authoritative = dh.Bits&_AA != 0
+ dns.Truncated = dh.Bits&_TC != 0
+ dns.RecursionDesired = dh.Bits&_RD != 0
+ dns.RecursionAvailable = dh.Bits&_RA != 0
+ dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite.
+ dns.AuthenticatedData = dh.Bits&_AD != 0
+ dns.CheckingDisabled = dh.Bits&_CD != 0
+ dns.Rcode = int(dh.Bits & 0xF)
+}
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
new file mode 100644
index 0000000..98fadc3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -0,0 +1,810 @@
+package dns
+
+import (
+ "encoding/base32"
+ "encoding/base64"
+ "encoding/binary"
+ "encoding/hex"
+ "net"
+ "strings"
+)
+
+// helper functions called from the generated zmsg.go
+
+// These function are named after the tag to help pack/unpack, if there is no tag it is the name
+// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or
+// packDataDomainName.
+
+func unpackDataA(msg []byte, off int) (net.IP, int, error) {
+ if off+net.IPv4len > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking a"}
+ }
+ a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...)
+ off += net.IPv4len
+ return a, off, nil
+}
+
+func packDataA(a net.IP, msg []byte, off int) (int, error) {
+ switch len(a) {
+ case net.IPv4len, net.IPv6len:
+ // It must be a slice of 4, even if it is 16, we encode only the first 4
+ if off+net.IPv4len > len(msg) {
+ return len(msg), &Error{err: "overflow packing a"}
+ }
+
+ copy(msg[off:], a.To4())
+ off += net.IPv4len
+ case 0:
+ // Allowed, for dynamic updates.
+ default:
+ return len(msg), &Error{err: "overflow packing a"}
+ }
+ return off, nil
+}
+
+func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
+ if off+net.IPv6len > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking aaaa"}
+ }
+ aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...)
+ off += net.IPv6len
+ return aaaa, off, nil
+}
+
+func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
+ switch len(aaaa) {
+ case net.IPv6len:
+ if off+net.IPv6len > len(msg) {
+ return len(msg), &Error{err: "overflow packing aaaa"}
+ }
+
+ copy(msg[off:], aaaa)
+ off += net.IPv6len
+ case 0:
+ // Allowed, dynamic updates.
+ default:
+ return len(msg), &Error{err: "overflow packing aaaa"}
+ }
+ return off, nil
+}
+
+// unpackHeader unpacks an RR header, returning the offset to the end of the header and a
+// re-sliced msg according to the expected length of the RR.
+func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) {
+ hdr := RR_Header{}
+ if off == len(msg) {
+ return hdr, off, msg, nil
+ }
+
+ hdr.Name, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Rrtype, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Class, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Ttl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ hdr.Rdlength, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return hdr, len(msg), msg, err
+ }
+ msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
+ return hdr, off, msg, err
+}
+
+// packHeader packs an RR header, returning the offset to the end of the header.
+// See PackDomainName for documentation about the compression.
+func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
+ if off == len(msg) {
+ return off, nil
+ }
+
+ off, err := packDomainName(hdr.Name, msg, off, compression, compress)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint16(hdr.Rrtype, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint16(hdr.Class, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint32(hdr.Ttl, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+// helper helper functions.
+
+// truncateMsgFromRdLength truncates msg to match the expected length of the RR.
+// Returns an error if msg is smaller than the expected size.
+func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) {
+ lenrd := off + int(rdlength)
+ if lenrd > len(msg) {
+ return msg, &Error{err: "overflowing header size"}
+ }
+ return msg[:lenrd], nil
+}
+
+var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
+
+func fromBase32(s []byte) (buf []byte, err error) {
+ for i, b := range s {
+ if b >= 'a' && b <= 'z' {
+ s[i] = b - 32
+ }
+ }
+ buflen := base32HexNoPadEncoding.DecodedLen(len(s))
+ buf = make([]byte, buflen)
+ n, err := base32HexNoPadEncoding.Decode(buf, s)
+ buf = buf[:n]
+ return
+}
+
+func toBase32(b []byte) string {
+ return base32HexNoPadEncoding.EncodeToString(b)
+}
+
+func fromBase64(s []byte) (buf []byte, err error) {
+ buflen := base64.StdEncoding.DecodedLen(len(s))
+ buf = make([]byte, buflen)
+ n, err := base64.StdEncoding.Decode(buf, s)
+ buf = buf[:n]
+ return
+}
+
+func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) }
+
+// dynamicUpdate returns true if the Rdlength is zero.
+func noRdata(h RR_Header) bool { return h.Rdlength == 0 }
+
+func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
+ if off+1 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint8"}
+ }
+ return msg[off], off + 1, nil
+}
+
+func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
+ if off+1 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint8"}
+ }
+ msg[off] = i
+ return off + 1, nil
+}
+
+func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) {
+ if off+2 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint16"}
+ }
+ return binary.BigEndian.Uint16(msg[off:]), off + 2, nil
+}
+
+func packUint16(i uint16, msg []byte, off int) (off1 int, err error) {
+ if off+2 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint16"}
+ }
+ binary.BigEndian.PutUint16(msg[off:], i)
+ return off + 2, nil
+}
+
+func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) {
+ if off+4 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint32"}
+ }
+ return binary.BigEndian.Uint32(msg[off:]), off + 4, nil
+}
+
+func packUint32(i uint32, msg []byte, off int) (off1 int, err error) {
+ if off+4 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint32"}
+ }
+ binary.BigEndian.PutUint32(msg[off:], i)
+ return off + 4, nil
+}
+
+func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) {
+ if off+6 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"}
+ }
+ // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes)
+ i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 |
+ uint64(msg[off+4])<<8 | uint64(msg[off+5])
+ off += 6
+ return i, off, nil
+}
+
+func packUint48(i uint64, msg []byte, off int) (off1 int, err error) {
+ if off+6 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint64 as uint48"}
+ }
+ msg[off] = byte(i >> 40)
+ msg[off+1] = byte(i >> 32)
+ msg[off+2] = byte(i >> 24)
+ msg[off+3] = byte(i >> 16)
+ msg[off+4] = byte(i >> 8)
+ msg[off+5] = byte(i)
+ off += 6
+ return off, nil
+}
+
+func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) {
+ if off+8 > len(msg) {
+ return 0, len(msg), &Error{err: "overflow unpacking uint64"}
+ }
+ return binary.BigEndian.Uint64(msg[off:]), off + 8, nil
+}
+
+func packUint64(i uint64, msg []byte, off int) (off1 int, err error) {
+ if off+8 > len(msg) {
+ return len(msg), &Error{err: "overflow packing uint64"}
+ }
+ binary.BigEndian.PutUint64(msg[off:], i)
+ off += 8
+ return off, nil
+}
+
+func unpackString(msg []byte, off int) (string, int, error) {
+ if off+1 > len(msg) {
+ return "", off, &Error{err: "overflow unpacking txt"}
+ }
+ l := int(msg[off])
+ off++
+ if off+l > len(msg) {
+ return "", off, &Error{err: "overflow unpacking txt"}
+ }
+ var s strings.Builder
+ consumed := 0
+ for i, b := range msg[off : off+l] {
+ switch {
+ case b == '"' || b == '\\':
+ if consumed == 0 {
+ s.Grow(l * 2)
+ }
+ s.Write(msg[off+consumed : off+i])
+ s.WriteByte('\\')
+ s.WriteByte(b)
+ consumed = i + 1
+ case b < ' ' || b > '~': // unprintable
+ if consumed == 0 {
+ s.Grow(l * 2)
+ }
+ s.Write(msg[off+consumed : off+i])
+ s.WriteString(escapeByte(b))
+ consumed = i + 1
+ }
+ }
+ if consumed == 0 { // no escaping needed
+ return string(msg[off : off+l]), off + l, nil
+ }
+ s.Write(msg[off+consumed : off+l])
+ return s.String(), off + l, nil
+}
+
+func packString(s string, msg []byte, off int) (int, error) {
+ txtTmp := make([]byte, 256*4+1)
+ off, err := packTxtString(s, msg, off, txtTmp)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+func unpackStringBase32(msg []byte, off, end int) (string, int, error) {
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking base32"}
+ }
+ s := toBase32(msg[off:end])
+ return s, end, nil
+}
+
+func packStringBase32(s string, msg []byte, off int) (int, error) {
+ b32, err := fromBase32([]byte(s))
+ if err != nil {
+ return len(msg), err
+ }
+ if off+len(b32) > len(msg) {
+ return len(msg), &Error{err: "overflow packing base32"}
+ }
+ copy(msg[off:off+len(b32)], b32)
+ off += len(b32)
+ return off, nil
+}
+
+func unpackStringBase64(msg []byte, off, end int) (string, int, error) {
+ // Rest of the RR is base64 encoded value, so we don't need an explicit length
+ // to be set. Thus far all RR's that have base64 encoded fields have those as their
+ // last one. What we do need is the end of the RR!
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking base64"}
+ }
+ s := toBase64(msg[off:end])
+ return s, end, nil
+}
+
+func packStringBase64(s string, msg []byte, off int) (int, error) {
+ b64, err := fromBase64([]byte(s))
+ if err != nil {
+ return len(msg), err
+ }
+ if off+len(b64) > len(msg) {
+ return len(msg), &Error{err: "overflow packing base64"}
+ }
+ copy(msg[off:off+len(b64)], b64)
+ off += len(b64)
+ return off, nil
+}
+
+func unpackStringHex(msg []byte, off, end int) (string, int, error) {
+ // Rest of the RR is hex encoded value, so we don't need an explicit length
+ // to be set. NSEC and TSIG have hex fields with a length field.
+ // What we do need is the end of the RR!
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking hex"}
+ }
+
+ s := hex.EncodeToString(msg[off:end])
+ return s, end, nil
+}
+
+func packStringHex(s string, msg []byte, off int) (int, error) {
+ h, err := hex.DecodeString(s)
+ if err != nil {
+ return len(msg), err
+ }
+ if off+len(h) > len(msg) {
+ return len(msg), &Error{err: "overflow packing hex"}
+ }
+ copy(msg[off:off+len(h)], h)
+ off += len(h)
+ return off, nil
+}
+
+func unpackStringAny(msg []byte, off, end int) (string, int, error) {
+ if end > len(msg) {
+ return "", len(msg), &Error{err: "overflow unpacking anything"}
+ }
+ return string(msg[off:end]), end, nil
+}
+
+func packStringAny(s string, msg []byte, off int) (int, error) {
+ if off+len(s) > len(msg) {
+ return len(msg), &Error{err: "overflow packing anything"}
+ }
+ copy(msg[off:off+len(s)], s)
+ off += len(s)
+ return off, nil
+}
+
+func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
+ txt, off, err := unpackTxt(msg, off)
+ if err != nil {
+ return nil, len(msg), err
+ }
+ return txt, off, nil
+}
+
+func packStringTxt(s []string, msg []byte, off int) (int, error) {
+ txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many.
+ off, err := packTxt(s, msg, off, txtTmp)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
+ var edns []EDNS0
+Option:
+ var code uint16
+ if off+4 > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking opt"}
+ }
+ code = binary.BigEndian.Uint16(msg[off:])
+ off += 2
+ optlen := binary.BigEndian.Uint16(msg[off:])
+ off += 2
+ if off+int(optlen) > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking opt"}
+ }
+ switch code {
+ case EDNS0NSID:
+ e := new(EDNS0_NSID)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0SUBNET:
+ e := new(EDNS0_SUBNET)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0COOKIE:
+ e := new(EDNS0_COOKIE)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0EXPIRE:
+ e := new(EDNS0_EXPIRE)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0UL:
+ e := new(EDNS0_UL)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0LLQ:
+ e := new(EDNS0_LLQ)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0DAU:
+ e := new(EDNS0_DAU)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0DHU:
+ e := new(EDNS0_DHU)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0N3U:
+ e := new(EDNS0_N3U)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ case EDNS0PADDING:
+ e := new(EDNS0_PADDING)
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ default:
+ e := new(EDNS0_LOCAL)
+ e.Code = code
+ if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
+ return nil, len(msg), err
+ }
+ edns = append(edns, e)
+ off += int(optlen)
+ }
+
+ if off < len(msg) {
+ goto Option
+ }
+
+ return edns, off, nil
+}
+
+func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) {
+ for _, el := range options {
+ b, err := el.pack()
+ if err != nil || off+4 > len(msg) {
+ return len(msg), &Error{err: "overflow packing opt"}
+ }
+ binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code
+ binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length
+ off += 4
+ if off+len(b) > len(msg) {
+ copy(msg[off:], b)
+ off = len(msg)
+ continue
+ }
+ // Actual data
+ copy(msg[off:off+len(b)], b)
+ off += len(b)
+ }
+ return off, nil
+}
+
+func unpackStringOctet(msg []byte, off int) (string, int, error) {
+ s := string(msg[off:])
+ return s, len(msg), nil
+}
+
+func packStringOctet(s string, msg []byte, off int) (int, error) {
+ txtTmp := make([]byte, 256*4+1)
+ off, err := packOctetString(s, msg, off, txtTmp)
+ if err != nil {
+ return len(msg), err
+ }
+ return off, nil
+}
+
+func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
+ var nsec []uint16
+ length, window, lastwindow := 0, 0, -1
+ for off < len(msg) {
+ if off+2 > len(msg) {
+ return nsec, len(msg), &Error{err: "overflow unpacking nsecx"}
+ }
+ window = int(msg[off])
+ length = int(msg[off+1])
+ off += 2
+ if window <= lastwindow {
+ // RFC 4034: Blocks are present in the NSEC RR RDATA in
+ // increasing numerical order.
+ return nsec, len(msg), &Error{err: "out of order NSEC block"}
+ }
+ if length == 0 {
+ // RFC 4034: Blocks with no types present MUST NOT be included.
+ return nsec, len(msg), &Error{err: "empty NSEC block"}
+ }
+ if length > 32 {
+ return nsec, len(msg), &Error{err: "NSEC block too long"}
+ }
+ if off+length > len(msg) {
+ return nsec, len(msg), &Error{err: "overflowing NSEC block"}
+ }
+
+ // Walk the bytes in the window and extract the type bits
+ for j, b := range msg[off : off+length] {
+ // Check the bits one by one, and set the type
+ if b&0x80 == 0x80 {
+ nsec = append(nsec, uint16(window*256+j*8+0))
+ }
+ if b&0x40 == 0x40 {
+ nsec = append(nsec, uint16(window*256+j*8+1))
+ }
+ if b&0x20 == 0x20 {
+ nsec = append(nsec, uint16(window*256+j*8+2))
+ }
+ if b&0x10 == 0x10 {
+ nsec = append(nsec, uint16(window*256+j*8+3))
+ }
+ if b&0x8 == 0x8 {
+ nsec = append(nsec, uint16(window*256+j*8+4))
+ }
+ if b&0x4 == 0x4 {
+ nsec = append(nsec, uint16(window*256+j*8+5))
+ }
+ if b&0x2 == 0x2 {
+ nsec = append(nsec, uint16(window*256+j*8+6))
+ }
+ if b&0x1 == 0x1 {
+ nsec = append(nsec, uint16(window*256+j*8+7))
+ }
+ }
+ off += length
+ lastwindow = window
+ }
+ return nsec, off, nil
+}
+
+// typeBitMapLen is a helper function which computes the "maximum" length of
+// a the NSEC Type BitMap field.
+func typeBitMapLen(bitmap []uint16) int {
+ var l int
+ var lastwindow, lastlength uint16
+ for _, t := range bitmap {
+ window := t / 256
+ length := (t-window*256)/8 + 1
+ if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
+ l += int(lastlength) + 2
+ lastlength = 0
+ }
+ if window < lastwindow || length < lastlength {
+ // packDataNsec would return Error{err: "nsec bits out of order"} here, but
+ // when computing the length, we want do be liberal.
+ continue
+ }
+ lastwindow, lastlength = window, length
+ }
+ l += int(lastlength) + 2
+ return l
+}
+
+func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
+ if len(bitmap) == 0 {
+ return off, nil
+ }
+ var lastwindow, lastlength uint16
+ for _, t := range bitmap {
+ window := t / 256
+ length := (t-window*256)/8 + 1
+ if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
+ off += int(lastlength) + 2
+ lastlength = 0
+ }
+ if window < lastwindow || length < lastlength {
+ return len(msg), &Error{err: "nsec bits out of order"}
+ }
+ if off+2+int(length) > len(msg) {
+ return len(msg), &Error{err: "overflow packing nsec"}
+ }
+ // Setting the window #
+ msg[off] = byte(window)
+ // Setting the octets length
+ msg[off+1] = byte(length)
+ // Setting the bit value for the type in the right octet
+ msg[off+1+int(length)] |= byte(1 << (7 - t%8))
+ lastwindow, lastlength = window, length
+ }
+ off += int(lastlength) + 2
+ return off, nil
+}
+
+func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
+ var (
+ servers []string
+ s string
+ err error
+ )
+ if end > len(msg) {
+ return nil, len(msg), &Error{err: "overflow unpacking domain names"}
+ }
+ for off < end {
+ s, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return servers, len(msg), err
+ }
+ servers = append(servers, s)
+ }
+ return servers, off, nil
+}
+
+func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
+ var err error
+ for _, name := range names {
+ off, err = packDomainName(name, msg, off, compression, compress)
+ if err != nil {
+ return len(msg), err
+ }
+ }
+ return off, nil
+}
+
+func packDataApl(data []APLPrefix, msg []byte, off int) (int, error) {
+ var err error
+ for i := range data {
+ off, err = packDataAplPrefix(&data[i], msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+ }
+ return off, nil
+}
+
+func packDataAplPrefix(p *APLPrefix, msg []byte, off int) (int, error) {
+ if len(p.Network.IP) != len(p.Network.Mask) {
+ return len(msg), &Error{err: "address and mask lengths don't match"}
+ }
+
+ var err error
+ prefix, _ := p.Network.Mask.Size()
+ addr := p.Network.IP.Mask(p.Network.Mask)[:(prefix+7)/8]
+
+ switch len(p.Network.IP) {
+ case net.IPv4len:
+ off, err = packUint16(1, msg, off)
+ case net.IPv6len:
+ off, err = packUint16(2, msg, off)
+ default:
+ err = &Error{err: "unrecognized address family"}
+ }
+ if err != nil {
+ return len(msg), err
+ }
+
+ off, err = packUint8(uint8(prefix), msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+
+ var n uint8
+ if p.Negation {
+ n = 0x80
+ }
+ adflen := uint8(len(addr)) & 0x7f
+ off, err = packUint8(n|adflen, msg, off)
+ if err != nil {
+ return len(msg), err
+ }
+
+ if off+len(addr) > len(msg) {
+ return len(msg), &Error{err: "overflow packing APL prefix"}
+ }
+ off += copy(msg[off:], addr)
+
+ return off, nil
+}
+
+func unpackDataApl(msg []byte, off int) ([]APLPrefix, int, error) {
+ var result []APLPrefix
+ for off < len(msg) {
+ prefix, end, err := unpackDataAplPrefix(msg, off)
+ if err != nil {
+ return nil, len(msg), err
+ }
+ off = end
+ result = append(result, prefix)
+ }
+ return result, off, nil
+}
+
+func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) {
+ family, off, err := unpackUint16(msg, off)
+ if err != nil {
+ return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
+ }
+ prefix, off, err := unpackUint8(msg, off)
+ if err != nil {
+ return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
+ }
+ nlen, off, err := unpackUint8(msg, off)
+ if err != nil {
+ return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL prefix"}
+ }
+
+ var ip []byte
+ switch family {
+ case 1:
+ ip = make([]byte, net.IPv4len)
+ case 2:
+ ip = make([]byte, net.IPv6len)
+ default:
+ return APLPrefix{}, len(msg), &Error{err: "unrecognized APL address family"}
+ }
+ if int(prefix) > 8*len(ip) {
+ return APLPrefix{}, len(msg), &Error{err: "APL prefix too long"}
+ }
+
+ afdlen := int(nlen & 0x7f)
+ if (int(prefix)+7)/8 != afdlen {
+ return APLPrefix{}, len(msg), &Error{err: "invalid APL address length"}
+ }
+ if off+afdlen > len(msg) {
+ return APLPrefix{}, len(msg), &Error{err: "overflow unpacking APL address"}
+ }
+ off += copy(ip, msg[off:off+afdlen])
+ if prefix%8 > 0 {
+ last := ip[afdlen-1]
+ zero := uint8(0xff) >> (prefix % 8)
+ if last&zero > 0 {
+ return APLPrefix{}, len(msg), &Error{err: "extra APL address bits"}
+ }
+ }
+
+ return APLPrefix{
+ Negation: (nlen & 0x80) != 0,
+ Network: net.IPNet{
+ IP: ip,
+ Mask: net.CIDRMask(int(prefix), 8*len(ip)),
+ },
+ }, off, nil
+}
diff --git a/vendor/github.com/miekg/dns/msg_truncate.go b/vendor/github.com/miekg/dns/msg_truncate.go
new file mode 100644
index 0000000..89d4075
--- /dev/null
+++ b/vendor/github.com/miekg/dns/msg_truncate.go
@@ -0,0 +1,111 @@
+package dns
+
+// Truncate ensures the reply message will fit into the requested buffer
+// size by removing records that exceed the requested size.
+//
+// It will first check if the reply fits without compression and then with
+// compression. If it won't fit with compression, Truncate then walks the
+// record adding as many records as possible without exceeding the
+// requested buffer size.
+//
+// The TC bit will be set if any records were excluded from the message.
+// This indicates to that the client should retry over TCP.
+//
+// According to RFC 2181, the TC bit should only be set if not all of the
+// "required" RRs can be included in the response. Unfortunately, we have
+// no way of knowing which RRs are required so we set the TC bit if any RR
+// had to be omitted from the response.
+//
+// The appropriate buffer size can be retrieved from the requests OPT
+// record, if present, and is transport specific otherwise. dns.MinMsgSize
+// should be used for UDP requests without an OPT record, and
+// dns.MaxMsgSize for TCP requests without an OPT record.
+func (dns *Msg) Truncate(size int) {
+ if dns.IsTsig() != nil {
+ // To simplify this implementation, we don't perform
+ // truncation on responses with a TSIG record.
+ return
+ }
+
+ // RFC 6891 mandates that the payload size in an OPT record
+ // less than 512 bytes must be treated as equal to 512 bytes.
+ //
+ // For ease of use, we impose that restriction here.
+ if size < 512 {
+ size = 512
+ }
+
+ l := msgLenWithCompressionMap(dns, nil) // uncompressed length
+ if l <= size {
+ // Don't waste effort compressing this message.
+ dns.Compress = false
+ return
+ }
+
+ dns.Compress = true
+
+ edns0 := dns.popEdns0()
+ if edns0 != nil {
+ // Account for the OPT record that gets added at the end,
+ // by subtracting that length from our budget.
+ //
+ // The EDNS(0) OPT record must have the root domain and
+ // it's length is thus unaffected by compression.
+ size -= Len(edns0)
+ }
+
+ compression := make(map[string]struct{})
+
+ l = headerSize
+ for _, r := range dns.Question {
+ l += r.len(l, compression)
+ }
+
+ var numAnswer int
+ if l < size {
+ l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
+ }
+
+ var numNS int
+ if l < size {
+ l, numNS = truncateLoop(dns.Ns, size, l, compression)
+ }
+
+ var numExtra int
+ if l < size {
+ l, numExtra = truncateLoop(dns.Extra, size, l, compression)
+ }
+
+ // See the function documentation for when we set this.
+ dns.Truncated = len(dns.Answer) > numAnswer ||
+ len(dns.Ns) > numNS || len(dns.Extra) > numExtra
+
+ dns.Answer = dns.Answer[:numAnswer]
+ dns.Ns = dns.Ns[:numNS]
+ dns.Extra = dns.Extra[:numExtra]
+
+ if edns0 != nil {
+ // Add the OPT record back onto the additional section.
+ dns.Extra = append(dns.Extra, edns0)
+ }
+}
+
+func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
+ for i, r := range rrs {
+ if r == nil {
+ continue
+ }
+
+ l += r.len(l, compression)
+ if l > size {
+ // Return size, rather than l prior to this record,
+ // to prevent any further records being added.
+ return size, i
+ }
+ if l == size {
+ return l, i + 1
+ }
+ }
+
+ return l, len(rrs)
+}
diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go
new file mode 100644
index 0000000..8f071a4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/nsecx.go
@@ -0,0 +1,95 @@
+package dns
+
+import (
+ "crypto/sha1"
+ "encoding/hex"
+ "strings"
+)
+
+// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
+func HashName(label string, ha uint8, iter uint16, salt string) string {
+ if ha != SHA1 {
+ return ""
+ }
+
+ wireSalt := make([]byte, hex.DecodedLen(len(salt)))
+ n, err := packStringHex(salt, wireSalt, 0)
+ if err != nil {
+ return ""
+ }
+ wireSalt = wireSalt[:n]
+
+ name := make([]byte, 255)
+ off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
+ if err != nil {
+ return ""
+ }
+ name = name[:off]
+
+ s := sha1.New()
+ // k = 0
+ s.Write(name)
+ s.Write(wireSalt)
+ nsec3 := s.Sum(nil)
+
+ // k > 0
+ for k := uint16(0); k < iter; k++ {
+ s.Reset()
+ s.Write(nsec3)
+ s.Write(wireSalt)
+ nsec3 = s.Sum(nsec3[:0])
+ }
+
+ return toBase32(nsec3)
+}
+
+// Cover returns true if a name is covered by the NSEC3 record
+func (rr *NSEC3) Cover(name string) bool {
+ nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
+ owner := strings.ToUpper(rr.Hdr.Name)
+ labelIndices := Split(owner)
+ if len(labelIndices) < 2 {
+ return false
+ }
+ ownerHash := owner[:labelIndices[1]-1]
+ ownerZone := owner[labelIndices[1]:]
+ if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
+ return false
+ }
+
+ nextHash := rr.NextDomain
+
+ // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash
+ if ownerHash == nextHash && nameHash != ownerHash { // empty interval
+ return true
+ }
+ if ownerHash > nextHash { // end of zone
+ if nameHash > ownerHash { // covered since there is nothing after ownerHash
+ return true
+ }
+ return nameHash < nextHash // if nameHash is before beginning of zone it is covered
+ }
+ if nameHash < ownerHash { // nameHash is before ownerHash, not covered
+ return false
+ }
+ return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
+}
+
+// Match returns true if a name matches the NSEC3 record
+func (rr *NSEC3) Match(name string) bool {
+ nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
+ owner := strings.ToUpper(rr.Hdr.Name)
+ labelIndices := Split(owner)
+ if len(labelIndices) < 2 {
+ return false
+ }
+ ownerHash := owner[:labelIndices[1]-1]
+ ownerZone := owner[labelIndices[1]:]
+ if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
+ return false
+ }
+ if ownerHash == nameHash {
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go
new file mode 100644
index 0000000..e28f066
--- /dev/null
+++ b/vendor/github.com/miekg/dns/privaterr.go
@@ -0,0 +1,114 @@
+package dns
+
+import "strings"
+
+// PrivateRdata is an interface used for implementing "Private Use" RR types, see
+// RFC 6895. This allows one to experiment with new RR types, without requesting an
+// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove.
+type PrivateRdata interface {
+ // String returns the text presentaton of the Rdata of the Private RR.
+ String() string
+ // Parse parses the Rdata of the private RR.
+ Parse([]string) error
+ // Pack is used when packing a private RR into a buffer.
+ Pack([]byte) (int, error)
+ // Unpack is used when unpacking a private RR from a buffer.
+ // TODO(miek): diff. signature than Pack, see edns0.go for instance.
+ Unpack([]byte) (int, error)
+ // Copy copies the Rdata into the PrivateRdata argument.
+ Copy(PrivateRdata) error
+ // Len returns the length in octets of the Rdata.
+ Len() int
+}
+
+// PrivateRR represents an RR that uses a PrivateRdata user-defined type.
+// It mocks normal RRs and implements dns.RR interface.
+type PrivateRR struct {
+ Hdr RR_Header
+ Data PrivateRdata
+
+ generator func() PrivateRdata // for copy
+}
+
+// Header return the RR header of r.
+func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
+
+func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
+
+// Private len and copy parts to satisfy RR interface.
+func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
+ l := r.Hdr.len(off, compression)
+ l += r.Data.Len()
+ return l
+}
+
+func (r *PrivateRR) copy() RR {
+ // make new RR like this:
+ rr := &PrivateRR{r.Hdr, r.generator(), r.generator}
+
+ if err := r.Data.Copy(rr.Data); err != nil {
+ panic("dns: got value that could not be used to copy Private rdata: " + err.Error())
+ }
+
+ return rr
+}
+
+func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
+ n, err := r.Data.Pack(msg[off:])
+ if err != nil {
+ return len(msg), err
+ }
+ off += n
+ return off, nil
+}
+
+func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
+ off1, err := r.Data.Unpack(msg[off:])
+ off += off1
+ return off, err
+}
+
+func (r *PrivateRR) parse(c *zlexer, origin string) *ParseError {
+ var l lex
+ text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
+Fetch:
+ for {
+ // TODO(miek): we could also be returning _QUOTE, this might or might not
+ // be an issue (basically parsing TXT becomes hard)
+ switch l, _ = c.Next(); l.value {
+ case zNewline, zEOF:
+ break Fetch
+ case zString:
+ text = append(text, l.token)
+ }
+ }
+
+ err := r.Data.Parse(text)
+ if err != nil {
+ return &ParseError{"", err.Error(), l}
+ }
+
+ return nil
+}
+
+func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
+
+// PrivateHandle registers a private resource record type. It requires
+// string and numeric representation of private RR type and generator function as argument.
+func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
+ rtypestr = strings.ToUpper(rtypestr)
+
+ TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator(), generator} }
+ TypeToString[rtype] = rtypestr
+ StringToType[rtypestr] = rtype
+}
+
+// PrivateHandleRemove removes definitions required to support private RR type.
+func PrivateHandleRemove(rtype uint16) {
+ rtypestr, ok := TypeToString[rtype]
+ if ok {
+ delete(TypeToRR, rtype)
+ delete(TypeToString, rtype)
+ delete(StringToType, rtypestr)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go
new file mode 100644
index 0000000..28151af
--- /dev/null
+++ b/vendor/github.com/miekg/dns/reverse.go
@@ -0,0 +1,52 @@
+package dns
+
+// StringToType is the reverse of TypeToString, needed for string parsing.
+var StringToType = reverseInt16(TypeToString)
+
+// StringToClass is the reverse of ClassToString, needed for string parsing.
+var StringToClass = reverseInt16(ClassToString)
+
+// StringToOpcode is a map of opcodes to strings.
+var StringToOpcode = reverseInt(OpcodeToString)
+
+// StringToRcode is a map of rcodes to strings.
+var StringToRcode = reverseInt(RcodeToString)
+
+func init() {
+ // Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733.
+ StringToRcode["NOTIMPL"] = RcodeNotImplemented
+}
+
+// StringToAlgorithm is the reverse of AlgorithmToString.
+var StringToAlgorithm = reverseInt8(AlgorithmToString)
+
+// StringToHash is a map of names to hash IDs.
+var StringToHash = reverseInt8(HashToString)
+
+// StringToCertType is the reverseof CertTypeToString.
+var StringToCertType = reverseInt16(CertTypeToString)
+
+// Reverse a map
+func reverseInt8(m map[uint8]string) map[string]uint8 {
+ n := make(map[string]uint8, len(m))
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+func reverseInt16(m map[uint16]string) map[string]uint16 {
+ n := make(map[string]uint16, len(m))
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
+
+func reverseInt(m map[int]string) map[string]int {
+ n := make(map[string]int, len(m))
+ for u, s := range m {
+ n[s] = u
+ }
+ return n
+}
diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go
new file mode 100644
index 0000000..a638e86
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sanitize.go
@@ -0,0 +1,86 @@
+package dns
+
+// Dedup removes identical RRs from rrs. It preserves the original ordering.
+// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
+// rrs.
+// m is used to store the RRs temporary. If it is nil a new map will be allocated.
+func Dedup(rrs []RR, m map[string]RR) []RR {
+
+ if m == nil {
+ m = make(map[string]RR)
+ }
+ // Save the keys, so we don't have to call normalizedString twice.
+ keys := make([]*string, 0, len(rrs))
+
+ for _, r := range rrs {
+ key := normalizedString(r)
+ keys = append(keys, &key)
+ if mr, ok := m[key]; ok {
+ // Shortest TTL wins.
+ rh, mrh := r.Header(), mr.Header()
+ if mrh.Ttl > rh.Ttl {
+ mrh.Ttl = rh.Ttl
+ }
+ continue
+ }
+
+ m[key] = r
+ }
+ // If the length of the result map equals the amount of RRs we got,
+ // it means they were all different. We can then just return the original rrset.
+ if len(m) == len(rrs) {
+ return rrs
+ }
+
+ j := 0
+ for i, r := range rrs {
+ // If keys[i] lives in the map, we should copy and remove it.
+ if _, ok := m[*keys[i]]; ok {
+ delete(m, *keys[i])
+ rrs[j] = r
+ j++
+ }
+
+ if len(m) == 0 {
+ break
+ }
+ }
+
+ return rrs[:j]
+}
+
+// normalizedString returns a normalized string from r. The TTL
+// is removed and the domain name is lowercased. We go from this:
+// DomainNameTTLCLASSTYPERDATA to:
+// lowercasenameCLASSTYPE...
+func normalizedString(r RR) string {
+ // A string Go DNS makes has: domainnameTTL...
+ b := []byte(r.String())
+
+ // find the first non-escaped tab, then another, so we capture where the TTL lives.
+ esc := false
+ ttlStart, ttlEnd := 0, 0
+ for i := 0; i < len(b) && ttlEnd == 0; i++ {
+ switch {
+ case b[i] == '\\':
+ esc = !esc
+ case b[i] == '\t' && !esc:
+ if ttlStart == 0 {
+ ttlStart = i
+ continue
+ }
+ if ttlEnd == 0 {
+ ttlEnd = i
+ }
+ case b[i] >= 'A' && b[i] <= 'Z' && !esc:
+ b[i] += 32
+ default:
+ esc = false
+ }
+ }
+
+ // remove TTL.
+ copy(b[ttlStart:], b[ttlEnd:])
+ cut := ttlEnd - ttlStart
+ return string(b[:len(b)-cut])
+}
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
new file mode 100644
index 0000000..671018b
--- /dev/null
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -0,0 +1,1408 @@
+package dns
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const maxTok = 2048 // Largest token we can return.
+
+// The maximum depth of $INCLUDE directives supported by the
+// ZoneParser API.
+const maxIncludeDepth = 7
+
+// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
+// * Add ownernames if they are left blank;
+// * Suppress sequences of spaces;
+// * Make each RR fit on one line (_NEWLINE is send as last)
+// * Handle comments: ;
+// * Handle braces - anywhere.
+const (
+ // Zonefile
+ zEOF = iota
+ zString
+ zBlank
+ zQuote
+ zNewline
+ zRrtpe
+ zOwner
+ zClass
+ zDirOrigin // $ORIGIN
+ zDirTTL // $TTL
+ zDirInclude // $INCLUDE
+ zDirGenerate // $GENERATE
+
+ // Privatekey file
+ zValue
+ zKey
+
+ zExpectOwnerDir // Ownername
+ zExpectOwnerBl // Whitespace after the ownername
+ zExpectAny // Expect rrtype, ttl or class
+ zExpectAnyNoClass // Expect rrtype or ttl
+ zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
+ zExpectAnyNoTTL // Expect rrtype or class
+ zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL
+ zExpectRrtype // Expect rrtype
+ zExpectRrtypeBl // Whitespace BEFORE rrtype
+ zExpectRdata // The first element of the rdata
+ zExpectDirTTLBl // Space after directive $TTL
+ zExpectDirTTL // Directive $TTL
+ zExpectDirOriginBl // Space after directive $ORIGIN
+ zExpectDirOrigin // Directive $ORIGIN
+ zExpectDirIncludeBl // Space after directive $INCLUDE
+ zExpectDirInclude // Directive $INCLUDE
+ zExpectDirGenerate // Directive $GENERATE
+ zExpectDirGenerateBl // Space after directive $GENERATE
+)
+
+// ParseError is a parsing error. It contains the parse error and the location in the io.Reader
+// where the error occurred.
+type ParseError struct {
+ file string
+ err string
+ lex lex
+}
+
+func (e *ParseError) Error() (s string) {
+ if e.file != "" {
+ s = e.file + ": "
+ }
+ s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " +
+ strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column)
+ return
+}
+
+type lex struct {
+ token string // text of the token
+ err bool // when true, token text has lexer error
+ value uint8 // value: zString, _BLANK, etc.
+ torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
+ line int // line in the file
+ column int // column in the file
+}
+
+// Token holds the token that are returned when a zone file is parsed.
+type Token struct {
+ // The scanned resource record when error is not nil.
+ RR
+ // When an error occurred, this has the error specifics.
+ Error *ParseError
+ // A potential comment positioned after the RR and on the same line.
+ Comment string
+}
+
+// ttlState describes the state necessary to fill in an omitted RR TTL
+type ttlState struct {
+ ttl uint32 // ttl is the current default TTL
+ isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
+}
+
+// NewRR reads the RR contained in the string s. Only the first RR is
+// returned. If s contains no records, NewRR will return nil with no
+// error.
+//
+// The class defaults to IN and TTL defaults to 3600. The full zone
+// file syntax like $TTL, $ORIGIN, etc. is supported.
+//
+// All fields of the returned RR are set, except RR.Header().Rdlength
+// which is set to 0.
+func NewRR(s string) (RR, error) {
+ if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline
+ return ReadRR(strings.NewReader(s+"\n"), "")
+ }
+ return ReadRR(strings.NewReader(s), "")
+}
+
+// ReadRR reads the RR contained in r.
+//
+// The string file is used in error reporting and to resolve relative
+// $INCLUDE directives.
+//
+// See NewRR for more documentation.
+func ReadRR(r io.Reader, file string) (RR, error) {
+ zp := NewZoneParser(r, ".", file)
+ zp.SetDefaultTTL(defaultTtl)
+ zp.SetIncludeAllowed(true)
+ rr, _ := zp.Next()
+ return rr, zp.Err()
+}
+
+// ParseZone reads a RFC 1035 style zonefile from r. It returns
+// Tokens on the returned channel, each consisting of either a
+// parsed RR and optional comment or a nil RR and an error. The
+// channel is closed by ParseZone when the end of r is reached.
+//
+// The string file is used in error reporting and to resolve relative
+// $INCLUDE directives. The string origin is used as the initial
+// origin, as if the file would start with an $ORIGIN directive.
+//
+// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
+// supported. Note that $GENERATE's range support up to a maximum of
+// of 65535 steps.
+//
+// Basic usage pattern when reading from a string (z) containing the
+// zone data:
+//
+// for x := range dns.ParseZone(strings.NewReader(z), "", "") {
+// if x.Error != nil {
+// // log.Println(x.Error)
+// } else {
+// // Do something with x.RR
+// }
+// }
+//
+// Comments specified after an RR (and on the same line!) are
+// returned too:
+//
+// foo. IN A 10.0.0.1 ; this is a comment
+//
+// The text "; this is comment" is returned in Token.Comment.
+// Comments inside the RR are returned concatenated along with the
+// RR. Comments on a line by themselves are discarded.
+//
+// To prevent memory leaks it is important to always fully drain the
+// returned channel. If an error occurs, it will always be the last
+// Token sent on the channel.
+//
+// Deprecated: New users should prefer the ZoneParser API.
+func ParseZone(r io.Reader, origin, file string) chan *Token {
+ t := make(chan *Token, 10000)
+ go parseZone(r, origin, file, t)
+ return t
+}
+
+func parseZone(r io.Reader, origin, file string, t chan *Token) {
+ defer close(t)
+
+ zp := NewZoneParser(r, origin, file)
+ zp.SetIncludeAllowed(true)
+
+ for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
+ t <- &Token{RR: rr, Comment: zp.Comment()}
+ }
+
+ if err := zp.Err(); err != nil {
+ pe, ok := err.(*ParseError)
+ if !ok {
+ pe = &ParseError{file: file, err: err.Error()}
+ }
+
+ t <- &Token{Error: pe}
+ }
+}
+
+// ZoneParser is a parser for an RFC 1035 style zonefile.
+//
+// Each parsed RR in the zone is returned sequentially from Next. An
+// optional comment can be retrieved with Comment.
+//
+// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all
+// supported. Although $INCLUDE is disabled by default.
+// Note that $GENERATE's range support up to a maximum of 65535 steps.
+//
+// Basic usage pattern when reading from a string (z) containing the
+// zone data:
+//
+// zp := NewZoneParser(strings.NewReader(z), "", "")
+//
+// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() {
+// // Do something with rr
+// }
+//
+// if err := zp.Err(); err != nil {
+// // log.Println(err)
+// }
+//
+// Comments specified after an RR (and on the same line!) are
+// returned too:
+//
+// foo. IN A 10.0.0.1 ; this is a comment
+//
+// The text "; this is comment" is returned from Comment. Comments inside
+// the RR are returned concatenated along with the RR. Comments on a line
+// by themselves are discarded.
+type ZoneParser struct {
+ c *zlexer
+
+ parseErr *ParseError
+
+ origin string
+ file string
+
+ defttl *ttlState
+
+ h RR_Header
+
+ // sub is used to parse $INCLUDE files and $GENERATE directives.
+ // Next, by calling subNext, forwards the resulting RRs from this
+ // sub parser to the calling code.
+ sub *ZoneParser
+ osFile *os.File
+
+ includeDepth uint8
+
+ includeAllowed bool
+ generateDisallowed bool
+}
+
+// NewZoneParser returns an RFC 1035 style zonefile parser that reads
+// from r.
+//
+// The string file is used in error reporting and to resolve relative
+// $INCLUDE directives. The string origin is used as the initial
+// origin, as if the file would start with an $ORIGIN directive.
+func NewZoneParser(r io.Reader, origin, file string) *ZoneParser {
+ var pe *ParseError
+ if origin != "" {
+ origin = Fqdn(origin)
+ if _, ok := IsDomainName(origin); !ok {
+ pe = &ParseError{file, "bad initial origin name", lex{}}
+ }
+ }
+
+ return &ZoneParser{
+ c: newZLexer(r),
+
+ parseErr: pe,
+
+ origin: origin,
+ file: file,
+ }
+}
+
+// SetDefaultTTL sets the parsers default TTL to ttl.
+func (zp *ZoneParser) SetDefaultTTL(ttl uint32) {
+ zp.defttl = &ttlState{ttl, false}
+}
+
+// SetIncludeAllowed controls whether $INCLUDE directives are
+// allowed. $INCLUDE directives are not supported by default.
+//
+// The $INCLUDE directive will open and read from a user controlled
+// file on the system. Even if the file is not a valid zonefile, the
+// contents of the file may be revealed in error messages, such as:
+//
+// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31
+// /etc/shadow: dns: not a TTL: "root:$6$::0:99999:7:::" at line: 1:125
+func (zp *ZoneParser) SetIncludeAllowed(v bool) {
+ zp.includeAllowed = v
+}
+
+// Err returns the first non-EOF error that was encountered by the
+// ZoneParser.
+func (zp *ZoneParser) Err() error {
+ if zp.parseErr != nil {
+ return zp.parseErr
+ }
+
+ if zp.sub != nil {
+ if err := zp.sub.Err(); err != nil {
+ return err
+ }
+ }
+
+ return zp.c.Err()
+}
+
+func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
+ zp.parseErr = &ParseError{zp.file, err, l}
+ return nil, false
+}
+
+// Comment returns an optional text comment that occurred alongside
+// the RR.
+func (zp *ZoneParser) Comment() string {
+ if zp.parseErr != nil {
+ return ""
+ }
+
+ if zp.sub != nil {
+ return zp.sub.Comment()
+ }
+
+ return zp.c.Comment()
+}
+
+func (zp *ZoneParser) subNext() (RR, bool) {
+ if rr, ok := zp.sub.Next(); ok {
+ return rr, true
+ }
+
+ if zp.sub.osFile != nil {
+ zp.sub.osFile.Close()
+ zp.sub.osFile = nil
+ }
+
+ if zp.sub.Err() != nil {
+ // We have errors to surface.
+ return nil, false
+ }
+
+ zp.sub = nil
+ return zp.Next()
+}
+
+// Next advances the parser to the next RR in the zonefile and
+// returns the (RR, true). It will return (nil, false) when the
+// parsing stops, either by reaching the end of the input or an
+// error. After Next returns (nil, false), the Err method will return
+// any error that occurred during parsing.
+func (zp *ZoneParser) Next() (RR, bool) {
+ if zp.parseErr != nil {
+ return nil, false
+ }
+ if zp.sub != nil {
+ return zp.subNext()
+ }
+
+ // 6 possible beginnings of a line (_ is a space):
+ //
+ // 0. zRRTYPE -> all omitted until the rrtype
+ // 1. zOwner _ zRrtype -> class/ttl omitted
+ // 2. zOwner _ zString _ zRrtype -> class omitted
+ // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class
+ // 4. zOwner _ zClass _ zRrtype -> ttl omitted
+ // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed)
+ //
+ // After detecting these, we know the zRrtype so we can jump to functions
+ // handling the rdata for each of these types.
+
+ st := zExpectOwnerDir // initial state
+ h := &zp.h
+
+ for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() {
+ // zlexer spotted an error already
+ if l.err {
+ return zp.setParseError(l.token, l)
+ }
+
+ switch st {
+ case zExpectOwnerDir:
+ // We can also expect a directive, like $TTL or $ORIGIN
+ if zp.defttl != nil {
+ h.Ttl = zp.defttl.ttl
+ }
+
+ h.Class = ClassINET
+
+ switch l.value {
+ case zNewline:
+ st = zExpectOwnerDir
+ case zOwner:
+ name, ok := toAbsoluteName(l.token, zp.origin)
+ if !ok {
+ return zp.setParseError("bad owner name", l)
+ }
+
+ h.Name = name
+
+ st = zExpectOwnerBl
+ case zDirTTL:
+ st = zExpectDirTTLBl
+ case zDirOrigin:
+ st = zExpectDirOriginBl
+ case zDirInclude:
+ st = zExpectDirIncludeBl
+ case zDirGenerate:
+ st = zExpectDirGenerateBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+
+ st = zExpectRdata
+ case zClass:
+ h.Class = l.torc
+
+ st = zExpectAnyNoClassBl
+ case zBlank:
+ // Discard, can happen when there is nothing on the
+ // line except the RR type
+ case zString:
+ ttl, ok := stringToTTL(l.token)
+ if !ok {
+ return zp.setParseError("not a TTL", l)
+ }
+
+ h.Ttl = ttl
+
+ if zp.defttl == nil || !zp.defttl.isByDirective {
+ zp.defttl = &ttlState{ttl, false}
+ }
+
+ st = zExpectAnyNoTTLBl
+ default:
+ return zp.setParseError("syntax error at beginning", l)
+ }
+ case zExpectDirIncludeBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank after $INCLUDE-directive", l)
+ }
+
+ st = zExpectDirInclude
+ case zExpectDirInclude:
+ if l.value != zString {
+ return zp.setParseError("expecting $INCLUDE value, not this...", l)
+ }
+
+ neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one
+ switch l, _ := zp.c.Next(); l.value {
+ case zBlank:
+ l, _ := zp.c.Next()
+ if l.value == zString {
+ name, ok := toAbsoluteName(l.token, zp.origin)
+ if !ok {
+ return zp.setParseError("bad origin name", l)
+ }
+
+ neworigin = name
+ }
+ case zNewline, zEOF:
+ // Ok
+ default:
+ return zp.setParseError("garbage after $INCLUDE", l)
+ }
+
+ if !zp.includeAllowed {
+ return zp.setParseError("$INCLUDE directive not allowed", l)
+ }
+ if zp.includeDepth >= maxIncludeDepth {
+ return zp.setParseError("too deeply nested $INCLUDE", l)
+ }
+
+ // Start with the new file
+ includePath := l.token
+ if !filepath.IsAbs(includePath) {
+ includePath = filepath.Join(filepath.Dir(zp.file), includePath)
+ }
+
+ r1, e1 := os.Open(includePath)
+ if e1 != nil {
+ var as string
+ if !filepath.IsAbs(l.token) {
+ as = fmt.Sprintf(" as `%s'", includePath)
+ }
+
+ msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1)
+ return zp.setParseError(msg, l)
+ }
+
+ zp.sub = NewZoneParser(r1, neworigin, includePath)
+ zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1
+ zp.sub.SetIncludeAllowed(true)
+ return zp.subNext()
+ case zExpectDirTTLBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank after $TTL-directive", l)
+ }
+
+ st = zExpectDirTTL
+ case zExpectDirTTL:
+ if l.value != zString {
+ return zp.setParseError("expecting $TTL value, not this...", l)
+ }
+
+ if err := slurpRemainder(zp.c); err != nil {
+ return zp.setParseError(err.err, err.lex)
+ }
+
+ ttl, ok := stringToTTL(l.token)
+ if !ok {
+ return zp.setParseError("expecting $TTL value, not this...", l)
+ }
+
+ zp.defttl = &ttlState{ttl, true}
+
+ st = zExpectOwnerDir
+ case zExpectDirOriginBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank after $ORIGIN-directive", l)
+ }
+
+ st = zExpectDirOrigin
+ case zExpectDirOrigin:
+ if l.value != zString {
+ return zp.setParseError("expecting $ORIGIN value, not this...", l)
+ }
+
+ if err := slurpRemainder(zp.c); err != nil {
+ return zp.setParseError(err.err, err.lex)
+ }
+
+ name, ok := toAbsoluteName(l.token, zp.origin)
+ if !ok {
+ return zp.setParseError("bad origin name", l)
+ }
+
+ zp.origin = name
+
+ st = zExpectOwnerDir
+ case zExpectDirGenerateBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank after $GENERATE-directive", l)
+ }
+
+ st = zExpectDirGenerate
+ case zExpectDirGenerate:
+ if zp.generateDisallowed {
+ return zp.setParseError("nested $GENERATE directive not allowed", l)
+ }
+ if l.value != zString {
+ return zp.setParseError("expecting $GENERATE value, not this...", l)
+ }
+
+ return zp.generate(l)
+ case zExpectOwnerBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank after owner", l)
+ }
+
+ st = zExpectAny
+ case zExpectAny:
+ switch l.value {
+ case zRrtpe:
+ if zp.defttl == nil {
+ return zp.setParseError("missing TTL with no previous value", l)
+ }
+
+ h.Rrtype = l.torc
+
+ st = zExpectRdata
+ case zClass:
+ h.Class = l.torc
+
+ st = zExpectAnyNoClassBl
+ case zString:
+ ttl, ok := stringToTTL(l.token)
+ if !ok {
+ return zp.setParseError("not a TTL", l)
+ }
+
+ h.Ttl = ttl
+
+ if zp.defttl == nil || !zp.defttl.isByDirective {
+ zp.defttl = &ttlState{ttl, false}
+ }
+
+ st = zExpectAnyNoTTLBl
+ default:
+ return zp.setParseError("expecting RR type, TTL or class, not this...", l)
+ }
+ case zExpectAnyNoClassBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank before class", l)
+ }
+
+ st = zExpectAnyNoClass
+ case zExpectAnyNoTTLBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank before TTL", l)
+ }
+
+ st = zExpectAnyNoTTL
+ case zExpectAnyNoTTL:
+ switch l.value {
+ case zClass:
+ h.Class = l.torc
+
+ st = zExpectRrtypeBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+
+ st = zExpectRdata
+ default:
+ return zp.setParseError("expecting RR type or class, not this...", l)
+ }
+ case zExpectAnyNoClass:
+ switch l.value {
+ case zString:
+ ttl, ok := stringToTTL(l.token)
+ if !ok {
+ return zp.setParseError("not a TTL", l)
+ }
+
+ h.Ttl = ttl
+
+ if zp.defttl == nil || !zp.defttl.isByDirective {
+ zp.defttl = &ttlState{ttl, false}
+ }
+
+ st = zExpectRrtypeBl
+ case zRrtpe:
+ h.Rrtype = l.torc
+
+ st = zExpectRdata
+ default:
+ return zp.setParseError("expecting RR type or TTL, not this...", l)
+ }
+ case zExpectRrtypeBl:
+ if l.value != zBlank {
+ return zp.setParseError("no blank before RR type", l)
+ }
+
+ st = zExpectRrtype
+ case zExpectRrtype:
+ if l.value != zRrtpe {
+ return zp.setParseError("unknown RR type", l)
+ }
+
+ h.Rrtype = l.torc
+
+ st = zExpectRdata
+ case zExpectRdata:
+ var rr RR
+ if newFn, ok := TypeToRR[h.Rrtype]; ok && canParseAsRR(h.Rrtype) {
+ rr = newFn()
+ *rr.Header() = *h
+ } else {
+ rr = &RFC3597{Hdr: *h}
+ }
+
+ _, isPrivate := rr.(*PrivateRR)
+ if !isPrivate && zp.c.Peek().token == "" {
+ // This is a dynamic update rr.
+
+ // TODO(tmthrgd): Previously slurpRemainder was only called
+ // for certain RR types, which may have been important.
+ if err := slurpRemainder(zp.c); err != nil {
+ return zp.setParseError(err.err, err.lex)
+ }
+
+ return rr, true
+ } else if l.value == zNewline {
+ return zp.setParseError("unexpected newline", l)
+ }
+
+ if err := rr.parse(zp.c, zp.origin); err != nil {
+ // err is a concrete *ParseError without the file field set.
+ // The setParseError call below will construct a new
+ // *ParseError with file set to zp.file.
+
+ // If err.lex is nil than we have encounter an unknown RR type
+ // in that case we substitute our current lex token.
+ if err.lex == (lex{}) {
+ return zp.setParseError(err.err, l)
+ }
+
+ return zp.setParseError(err.err, err.lex)
+ }
+
+ return rr, true
+ }
+ }
+
+ // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this
+ // is not an error, because an empty zone file is still a zone file.
+ return nil, false
+}
+
+// canParseAsRR returns true if the record type can be parsed as a
+// concrete RR. It blacklists certain record types that must be parsed
+// according to RFC 3597 because they lack a presentation format.
+func canParseAsRR(rrtype uint16) bool {
+ switch rrtype {
+ case TypeANY, TypeNULL, TypeOPT, TypeTSIG:
+ return false
+ default:
+ return true
+ }
+}
+
+type zlexer struct {
+ br io.ByteReader
+
+ readErr error
+
+ line int
+ column int
+
+ comBuf string
+ comment string
+
+ l lex
+ cachedL *lex
+
+ brace int
+ quote bool
+ space bool
+ commt bool
+ rrtype bool
+ owner bool
+
+ nextL bool
+
+ eol bool // end-of-line
+}
+
+func newZLexer(r io.Reader) *zlexer {
+ br, ok := r.(io.ByteReader)
+ if !ok {
+ br = bufio.NewReaderSize(r, 1024)
+ }
+
+ return &zlexer{
+ br: br,
+
+ line: 1,
+
+ owner: true,
+ }
+}
+
+func (zl *zlexer) Err() error {
+ if zl.readErr == io.EOF {
+ return nil
+ }
+
+ return zl.readErr
+}
+
+// readByte returns the next byte from the input
+func (zl *zlexer) readByte() (byte, bool) {
+ if zl.readErr != nil {
+ return 0, false
+ }
+
+ c, err := zl.br.ReadByte()
+ if err != nil {
+ zl.readErr = err
+ return 0, false
+ }
+
+ // delay the newline handling until the next token is delivered,
+ // fixes off-by-one errors when reporting a parse error.
+ if zl.eol {
+ zl.line++
+ zl.column = 0
+ zl.eol = false
+ }
+
+ if c == '\n' {
+ zl.eol = true
+ } else {
+ zl.column++
+ }
+
+ return c, true
+}
+
+func (zl *zlexer) Peek() lex {
+ if zl.nextL {
+ return zl.l
+ }
+
+ l, ok := zl.Next()
+ if !ok {
+ return l
+ }
+
+ if zl.nextL {
+ // Cache l. Next returns zl.cachedL then zl.l.
+ zl.cachedL = &l
+ } else {
+ // In this case l == zl.l, so we just tell Next to return zl.l.
+ zl.nextL = true
+ }
+
+ return l
+}
+
+func (zl *zlexer) Next() (lex, bool) {
+ l := &zl.l
+ switch {
+ case zl.cachedL != nil:
+ l, zl.cachedL = zl.cachedL, nil
+ return *l, true
+ case zl.nextL:
+ zl.nextL = false
+ return *l, true
+ case l.err:
+ // Parsing errors should be sticky.
+ return lex{value: zEOF}, false
+ }
+
+ var (
+ str [maxTok]byte // Hold string text
+ com [maxTok]byte // Hold comment text
+
+ stri int // Offset in str (0 means empty)
+ comi int // Offset in com (0 means empty)
+
+ escape bool
+ )
+
+ if zl.comBuf != "" {
+ comi = copy(com[:], zl.comBuf)
+ zl.comBuf = ""
+ }
+
+ zl.comment = ""
+
+ for x, ok := zl.readByte(); ok; x, ok = zl.readByte() {
+ l.line, l.column = zl.line, zl.column
+
+ if stri >= len(str) {
+ l.token = "token length insufficient for parsing"
+ l.err = true
+ return *l, true
+ }
+ if comi >= len(com) {
+ l.token = "comment length insufficient for parsing"
+ l.err = true
+ return *l, true
+ }
+
+ switch x {
+ case ' ', '\t':
+ if escape || zl.quote {
+ // Inside quotes or escaped this is legal.
+ str[stri] = x
+ stri++
+
+ escape = false
+ break
+ }
+
+ if zl.commt {
+ com[comi] = x
+ comi++
+ break
+ }
+
+ var retL lex
+ if stri == 0 {
+ // Space directly in the beginning, handled in the grammar
+ } else if zl.owner {
+ // If we have a string and its the first, make it an owner
+ l.value = zOwner
+ l.token = string(str[:stri])
+
+ // escape $... start with a \ not a $, so this will work
+ switch strings.ToUpper(l.token) {
+ case "$TTL":
+ l.value = zDirTTL
+ case "$ORIGIN":
+ l.value = zDirOrigin
+ case "$INCLUDE":
+ l.value = zDirInclude
+ case "$GENERATE":
+ l.value = zDirGenerate
+ }
+
+ retL = *l
+ } else {
+ l.value = zString
+ l.token = string(str[:stri])
+
+ if !zl.rrtype {
+ tokenUpper := strings.ToUpper(l.token)
+ if t, ok := StringToType[tokenUpper]; ok {
+ l.value = zRrtpe
+ l.torc = t
+
+ zl.rrtype = true
+ } else if strings.HasPrefix(tokenUpper, "TYPE") {
+ t, ok := typeToInt(l.token)
+ if !ok {
+ l.token = "unknown RR type"
+ l.err = true
+ return *l, true
+ }
+
+ l.value = zRrtpe
+ l.torc = t
+
+ zl.rrtype = true
+ }
+
+ if t, ok := StringToClass[tokenUpper]; ok {
+ l.value = zClass
+ l.torc = t
+ } else if strings.HasPrefix(tokenUpper, "CLASS") {
+ t, ok := classToInt(l.token)
+ if !ok {
+ l.token = "unknown class"
+ l.err = true
+ return *l, true
+ }
+
+ l.value = zClass
+ l.torc = t
+ }
+ }
+
+ retL = *l
+ }
+
+ zl.owner = false
+
+ if !zl.space {
+ zl.space = true
+
+ l.value = zBlank
+ l.token = " "
+
+ if retL == (lex{}) {
+ return *l, true
+ }
+
+ zl.nextL = true
+ }
+
+ if retL != (lex{}) {
+ return retL, true
+ }
+ case ';':
+ if escape || zl.quote {
+ // Inside quotes or escaped this is legal.
+ str[stri] = x
+ stri++
+
+ escape = false
+ break
+ }
+
+ zl.commt = true
+ zl.comBuf = ""
+
+ if comi > 1 {
+ // A newline was previously seen inside a comment that
+ // was inside braces and we delayed adding it until now.
+ com[comi] = ' ' // convert newline to space
+ comi++
+ if comi >= len(com) {
+ l.token = "comment length insufficient for parsing"
+ l.err = true
+ return *l, true
+ }
+ }
+
+ com[comi] = ';'
+ comi++
+
+ if stri > 0 {
+ zl.comBuf = string(com[:comi])
+
+ l.value = zString
+ l.token = string(str[:stri])
+ return *l, true
+ }
+ case '\r':
+ escape = false
+
+ if zl.quote {
+ str[stri] = x
+ stri++
+ }
+
+ // discard if outside of quotes
+ case '\n':
+ escape = false
+
+ // Escaped newline
+ if zl.quote {
+ str[stri] = x
+ stri++
+ break
+ }
+
+ if zl.commt {
+ // Reset a comment
+ zl.commt = false
+ zl.rrtype = false
+
+ // If not in a brace this ends the comment AND the RR
+ if zl.brace == 0 {
+ zl.owner = true
+
+ l.value = zNewline
+ l.token = "\n"
+ zl.comment = string(com[:comi])
+ return *l, true
+ }
+
+ zl.comBuf = string(com[:comi])
+ break
+ }
+
+ if zl.brace == 0 {
+ // If there is previous text, we should output it here
+ var retL lex
+ if stri != 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+
+ if !zl.rrtype {
+ tokenUpper := strings.ToUpper(l.token)
+ if t, ok := StringToType[tokenUpper]; ok {
+ zl.rrtype = true
+
+ l.value = zRrtpe
+ l.torc = t
+ }
+ }
+
+ retL = *l
+ }
+
+ l.value = zNewline
+ l.token = "\n"
+
+ zl.comment = zl.comBuf
+ zl.comBuf = ""
+ zl.rrtype = false
+ zl.owner = true
+
+ if retL != (lex{}) {
+ zl.nextL = true
+ return retL, true
+ }
+
+ return *l, true
+ }
+ case '\\':
+ // comments do not get escaped chars, everything is copied
+ if zl.commt {
+ com[comi] = x
+ comi++
+ break
+ }
+
+ // something already escaped must be in string
+ if escape {
+ str[stri] = x
+ stri++
+
+ escape = false
+ break
+ }
+
+ // something escaped outside of string gets added to string
+ str[stri] = x
+ stri++
+
+ escape = true
+ case '"':
+ if zl.commt {
+ com[comi] = x
+ comi++
+ break
+ }
+
+ if escape {
+ str[stri] = x
+ stri++
+
+ escape = false
+ break
+ }
+
+ zl.space = false
+
+ // send previous gathered text and the quote
+ var retL lex
+ if stri != 0 {
+ l.value = zString
+ l.token = string(str[:stri])
+
+ retL = *l
+ }
+
+ // send quote itself as separate token
+ l.value = zQuote
+ l.token = "\""
+
+ zl.quote = !zl.quote
+
+ if retL != (lex{}) {
+ zl.nextL = true
+ return retL, true
+ }
+
+ return *l, true
+ case '(', ')':
+ if zl.commt {
+ com[comi] = x
+ comi++
+ break
+ }
+
+ if escape || zl.quote {
+ // Inside quotes or escaped this is legal.
+ str[stri] = x
+ stri++
+
+ escape = false
+ break
+ }
+
+ switch x {
+ case ')':
+ zl.brace--
+
+ if zl.brace < 0 {
+ l.token = "extra closing brace"
+ l.err = true
+ return *l, true
+ }
+ case '(':
+ zl.brace++
+ }
+ default:
+ escape = false
+
+ if zl.commt {
+ com[comi] = x
+ comi++
+ break
+ }
+
+ str[stri] = x
+ stri++
+
+ zl.space = false
+ }
+ }
+
+ if zl.readErr != nil && zl.readErr != io.EOF {
+ // Don't return any tokens after a read error occurs.
+ return lex{value: zEOF}, false
+ }
+
+ var retL lex
+ if stri > 0 {
+ // Send remainder of str
+ l.value = zString
+ l.token = string(str[:stri])
+ retL = *l
+
+ if comi <= 0 {
+ return retL, true
+ }
+ }
+
+ if comi > 0 {
+ // Send remainder of com
+ l.value = zNewline
+ l.token = "\n"
+ zl.comment = string(com[:comi])
+
+ if retL != (lex{}) {
+ zl.nextL = true
+ return retL, true
+ }
+
+ return *l, true
+ }
+
+ if zl.brace != 0 {
+ l.token = "unbalanced brace"
+ l.err = true
+ return *l, true
+ }
+
+ return lex{value: zEOF}, false
+}
+
+func (zl *zlexer) Comment() string {
+ if zl.l.err {
+ return ""
+ }
+
+ return zl.comment
+}
+
+// Extract the class number from CLASSxx
+func classToInt(token string) (uint16, bool) {
+ offset := 5
+ if len(token) < offset+1 {
+ return 0, false
+ }
+ class, err := strconv.ParseUint(token[offset:], 10, 16)
+ if err != nil {
+ return 0, false
+ }
+ return uint16(class), true
+}
+
+// Extract the rr number from TYPExxx
+func typeToInt(token string) (uint16, bool) {
+ offset := 4
+ if len(token) < offset+1 {
+ return 0, false
+ }
+ typ, err := strconv.ParseUint(token[offset:], 10, 16)
+ if err != nil {
+ return 0, false
+ }
+ return uint16(typ), true
+}
+
+// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
+func stringToTTL(token string) (uint32, bool) {
+ var s, i uint32
+ for _, c := range token {
+ switch c {
+ case 's', 'S':
+ s += i
+ i = 0
+ case 'm', 'M':
+ s += i * 60
+ i = 0
+ case 'h', 'H':
+ s += i * 60 * 60
+ i = 0
+ case 'd', 'D':
+ s += i * 60 * 60 * 24
+ i = 0
+ case 'w', 'W':
+ s += i * 60 * 60 * 24 * 7
+ i = 0
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ i *= 10
+ i += uint32(c) - '0'
+ default:
+ return 0, false
+ }
+ }
+ return s + i, true
+}
+
+// Parse LOC records' [.][mM] into a
+// mantissa exponent format. Token should contain the entire
+// string (i.e. no spaces allowed)
+func stringToCm(token string) (e, m uint8, ok bool) {
+ if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' {
+ token = token[0 : len(token)-1]
+ }
+ s := strings.SplitN(token, ".", 2)
+ var meters, cmeters, val int
+ var err error
+ switch len(s) {
+ case 2:
+ if cmeters, err = strconv.Atoi(s[1]); err != nil {
+ return
+ }
+ fallthrough
+ case 1:
+ if meters, err = strconv.Atoi(s[0]); err != nil {
+ return
+ }
+ case 0:
+ // huh?
+ return 0, 0, false
+ }
+ ok = true
+ if meters > 0 {
+ e = 2
+ val = meters
+ } else {
+ e = 0
+ val = cmeters
+ }
+ for val > 10 {
+ e++
+ val /= 10
+ }
+ if e > 9 {
+ ok = false
+ }
+ m = uint8(val)
+ return
+}
+
+func toAbsoluteName(name, origin string) (absolute string, ok bool) {
+ // check for an explicit origin reference
+ if name == "@" {
+ // require a nonempty origin
+ if origin == "" {
+ return "", false
+ }
+ return origin, true
+ }
+
+ // require a valid domain name
+ _, ok = IsDomainName(name)
+ if !ok || name == "" {
+ return "", false
+ }
+
+ // check if name is already absolute
+ if IsFqdn(name) {
+ return name, true
+ }
+
+ // require a nonempty origin
+ if origin == "" {
+ return "", false
+ }
+ return appendOrigin(name, origin), true
+}
+
+func appendOrigin(name, origin string) string {
+ if origin == "." {
+ return name + origin
+ }
+ return name + "." + origin
+}
+
+// LOC record helper function
+func locCheckNorth(token string, latitude uint32) (uint32, bool) {
+ switch token {
+ case "n", "N":
+ return LOC_EQUATOR + latitude, true
+ case "s", "S":
+ return LOC_EQUATOR - latitude, true
+ }
+ return latitude, false
+}
+
+// LOC record helper function
+func locCheckEast(token string, longitude uint32) (uint32, bool) {
+ switch token {
+ case "e", "E":
+ return LOC_EQUATOR + longitude, true
+ case "w", "W":
+ return LOC_EQUATOR - longitude, true
+ }
+ return longitude, false
+}
+
+// "Eat" the rest of the "line"
+func slurpRemainder(c *zlexer) *ParseError {
+ l, _ := c.Next()
+ switch l.value {
+ case zBlank:
+ l, _ = c.Next()
+ if l.value != zNewline && l.value != zEOF {
+ return &ParseError{"", "garbage after rdata", l}
+ }
+ case zNewline:
+ case zEOF:
+ default:
+ return &ParseError{"", "garbage after rdata", l}
+ }
+ return nil
+}
+
+// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"
+// Used for NID and L64 record.
+func stringToNodeID(l lex) (uint64, *ParseError) {
+ if len(l.token) < 19 {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ // There must be three colons at fixes postitions, if not its a parse error
+ if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19]
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l}
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
new file mode 100644
index 0000000..6c37b2e
--- /dev/null
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -0,0 +1,1764 @@
+package dns
+
+import (
+ "encoding/base64"
+ "net"
+ "strconv"
+ "strings"
+)
+
+// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces)
+// or an error
+func endingToString(c *zlexer, errstr string) (string, *ParseError) {
+ var s string
+ l, _ := c.Next() // zString
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return s, &ParseError{"", errstr, l}
+ }
+ switch l.value {
+ case zString:
+ s += l.token
+ case zBlank: // Ok
+ default:
+ return "", &ParseError{"", errstr, l}
+ }
+ l, _ = c.Next()
+ }
+
+ return s, nil
+}
+
+// A remainder of the rdata with embedded spaces, split on unquoted whitespace
+// and return the parsed string slice or an error
+func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) {
+ // Get the remaining data until we see a zNewline
+ l, _ := c.Next()
+ if l.err {
+ return nil, &ParseError{"", errstr, l}
+ }
+
+ // Build the slice
+ s := make([]string, 0)
+ quote := false
+ empty := false
+ for l.value != zNewline && l.value != zEOF {
+ if l.err {
+ return nil, &ParseError{"", errstr, l}
+ }
+ switch l.value {
+ case zString:
+ empty = false
+ if len(l.token) > 255 {
+ // split up tokens that are larger than 255 into 255-chunks
+ sx := []string{}
+ p, i := 0, 255
+ for {
+ if i <= len(l.token) {
+ sx = append(sx, l.token[p:i])
+ } else {
+ sx = append(sx, l.token[p:])
+ break
+
+ }
+ p, i = p+255, i+255
+ }
+ s = append(s, sx...)
+ break
+ }
+
+ s = append(s, l.token)
+ case zBlank:
+ if quote {
+ // zBlank can only be seen in between txt parts.
+ return nil, &ParseError{"", errstr, l}
+ }
+ case zQuote:
+ if empty && quote {
+ s = append(s, "")
+ }
+ quote = !quote
+ empty = true
+ default:
+ return nil, &ParseError{"", errstr, l}
+ }
+ l, _ = c.Next()
+ }
+
+ if quote {
+ return nil, &ParseError{"", errstr, l}
+ }
+
+ return s, nil
+}
+
+func (rr *A) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ rr.A = net.ParseIP(l.token)
+ // IPv4 addresses cannot include ":".
+ // We do this rather than use net.IP's To4() because
+ // To4() treats IPv4-mapped IPv6 addresses as being
+ // IPv4.
+ isIPv4 := !strings.Contains(l.token, ":")
+ if rr.A == nil || !isIPv4 || l.err {
+ return &ParseError{"", "bad A A", l}
+ }
+ return slurpRemainder(c)
+}
+
+func (rr *AAAA) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ rr.AAAA = net.ParseIP(l.token)
+ // IPv6 addresses must include ":", and IPv4
+ // addresses cannot include ":".
+ isIPv6 := strings.Contains(l.token, ":")
+ if rr.AAAA == nil || !isIPv6 || l.err {
+ return &ParseError{"", "bad AAAA AAAA", l}
+ }
+ return slurpRemainder(c)
+}
+
+func (rr *NS) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad NS Ns", l}
+ }
+ rr.Ns = name
+ return slurpRemainder(c)
+}
+
+func (rr *PTR) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad PTR Ptr", l}
+ }
+ rr.Ptr = name
+ return slurpRemainder(c)
+}
+
+func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad NSAP-PTR Ptr", l}
+ }
+ rr.Ptr = name
+ return slurpRemainder(c)
+}
+
+func (rr *RP) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ mbox, mboxOk := toAbsoluteName(l.token, o)
+ if l.err || !mboxOk {
+ return &ParseError{"", "bad RP Mbox", l}
+ }
+ rr.Mbox = mbox
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ rr.Txt = l.token
+
+ txt, txtOk := toAbsoluteName(l.token, o)
+ if l.err || !txtOk {
+ return &ParseError{"", "bad RP Txt", l}
+ }
+ rr.Txt = txt
+
+ return slurpRemainder(c)
+}
+
+func (rr *MR) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad MR Mr", l}
+ }
+ rr.Mr = name
+ return slurpRemainder(c)
+}
+
+func (rr *MB) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad MB Mb", l}
+ }
+ rr.Mb = name
+ return slurpRemainder(c)
+}
+
+func (rr *MG) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad MG Mg", l}
+ }
+ rr.Mg = name
+ return slurpRemainder(c)
+}
+
+func (rr *HINFO) parse(c *zlexer, o string) *ParseError {
+ chunks, e := endingToTxtSlice(c, "bad HINFO Fields")
+ if e != nil {
+ return e
+ }
+
+ if ln := len(chunks); ln == 0 {
+ return nil
+ } else if ln == 1 {
+ // Can we split it?
+ if out := strings.Fields(chunks[0]); len(out) > 1 {
+ chunks = out
+ } else {
+ chunks = append(chunks, "")
+ }
+ }
+
+ rr.Cpu = chunks[0]
+ rr.Os = strings.Join(chunks[1:], " ")
+
+ return nil
+}
+
+func (rr *MINFO) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ rmail, rmailOk := toAbsoluteName(l.token, o)
+ if l.err || !rmailOk {
+ return &ParseError{"", "bad MINFO Rmail", l}
+ }
+ rr.Rmail = rmail
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ rr.Email = l.token
+
+ email, emailOk := toAbsoluteName(l.token, o)
+ if l.err || !emailOk {
+ return &ParseError{"", "bad MINFO Email", l}
+ }
+ rr.Email = email
+
+ return slurpRemainder(c)
+}
+
+func (rr *MF) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad MF Mf", l}
+ }
+ rr.Mf = name
+ return slurpRemainder(c)
+}
+
+func (rr *MD) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad MD Md", l}
+ }
+ rr.Md = name
+ return slurpRemainder(c)
+}
+
+func (rr *MX) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad MX Pref", l}
+ }
+ rr.Preference = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Mx = l.token
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad MX Mx", l}
+ }
+ rr.Mx = name
+
+ return slurpRemainder(c)
+}
+
+func (rr *RT) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil {
+ return &ParseError{"", "bad RT Preference", l}
+ }
+ rr.Preference = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Host = l.token
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad RT Host", l}
+ }
+ rr.Host = name
+
+ return slurpRemainder(c)
+}
+
+func (rr *AFSDB) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad AFSDB Subtype", l}
+ }
+ rr.Subtype = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Hostname = l.token
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad AFSDB Hostname", l}
+ }
+ rr.Hostname = name
+ return slurpRemainder(c)
+}
+
+func (rr *X25) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ if l.err {
+ return &ParseError{"", "bad X25 PSDNAddress", l}
+ }
+ rr.PSDNAddress = l.token
+ return slurpRemainder(c)
+}
+
+func (rr *KX) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad KX Pref", l}
+ }
+ rr.Preference = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Exchanger = l.token
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad KX Exchanger", l}
+ }
+ rr.Exchanger = name
+ return slurpRemainder(c)
+}
+
+func (rr *CNAME) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad CNAME Target", l}
+ }
+ rr.Target = name
+ return slurpRemainder(c)
+}
+
+func (rr *DNAME) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad DNAME Target", l}
+ }
+ rr.Target = name
+ return slurpRemainder(c)
+}
+
+func (rr *SOA) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ ns, nsOk := toAbsoluteName(l.token, o)
+ if l.err || !nsOk {
+ return &ParseError{"", "bad SOA Ns", l}
+ }
+ rr.Ns = ns
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ rr.Mbox = l.token
+
+ mbox, mboxOk := toAbsoluteName(l.token, o)
+ if l.err || !mboxOk {
+ return &ParseError{"", "bad SOA Mbox", l}
+ }
+ rr.Mbox = mbox
+
+ c.Next() // zBlank
+
+ var (
+ v uint32
+ ok bool
+ )
+ for i := 0; i < 5; i++ {
+ l, _ = c.Next()
+ if l.err {
+ return &ParseError{"", "bad SOA zone parameter", l}
+ }
+ if j, e := strconv.ParseUint(l.token, 10, 32); e != nil {
+ if i == 0 {
+ // Serial must be a number
+ return &ParseError{"", "bad SOA zone parameter", l}
+ }
+ // We allow other fields to be unitful duration strings
+ if v, ok = stringToTTL(l.token); !ok {
+ return &ParseError{"", "bad SOA zone parameter", l}
+
+ }
+ } else {
+ v = uint32(j)
+ }
+ switch i {
+ case 0:
+ rr.Serial = v
+ c.Next() // zBlank
+ case 1:
+ rr.Refresh = v
+ c.Next() // zBlank
+ case 2:
+ rr.Retry = v
+ c.Next() // zBlank
+ case 3:
+ rr.Expire = v
+ c.Next() // zBlank
+ case 4:
+ rr.Minttl = v
+ }
+ }
+ return slurpRemainder(c)
+}
+
+func (rr *SRV) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SRV Priority", l}
+ }
+ rr.Priority = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SRV Weight", l}
+ }
+ rr.Weight = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SRV Port", l}
+ }
+ rr.Port = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Target = l.token
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad SRV Target", l}
+ }
+ rr.Target = name
+ return slurpRemainder(c)
+}
+
+func (rr *NAPTR) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NAPTR Order", l}
+ }
+ rr.Order = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NAPTR Preference", l}
+ }
+ rr.Preference = uint16(i)
+
+ // Flags
+ c.Next() // zBlank
+ l, _ = c.Next() // _QUOTE
+ if l.value != zQuote {
+ return &ParseError{"", "bad NAPTR Flags", l}
+ }
+ l, _ = c.Next() // Either String or Quote
+ if l.value == zString {
+ rr.Flags = l.token
+ l, _ = c.Next() // _QUOTE
+ if l.value != zQuote {
+ return &ParseError{"", "bad NAPTR Flags", l}
+ }
+ } else if l.value == zQuote {
+ rr.Flags = ""
+ } else {
+ return &ParseError{"", "bad NAPTR Flags", l}
+ }
+
+ // Service
+ c.Next() // zBlank
+ l, _ = c.Next() // _QUOTE
+ if l.value != zQuote {
+ return &ParseError{"", "bad NAPTR Service", l}
+ }
+ l, _ = c.Next() // Either String or Quote
+ if l.value == zString {
+ rr.Service = l.token
+ l, _ = c.Next() // _QUOTE
+ if l.value != zQuote {
+ return &ParseError{"", "bad NAPTR Service", l}
+ }
+ } else if l.value == zQuote {
+ rr.Service = ""
+ } else {
+ return &ParseError{"", "bad NAPTR Service", l}
+ }
+
+ // Regexp
+ c.Next() // zBlank
+ l, _ = c.Next() // _QUOTE
+ if l.value != zQuote {
+ return &ParseError{"", "bad NAPTR Regexp", l}
+ }
+ l, _ = c.Next() // Either String or Quote
+ if l.value == zString {
+ rr.Regexp = l.token
+ l, _ = c.Next() // _QUOTE
+ if l.value != zQuote {
+ return &ParseError{"", "bad NAPTR Regexp", l}
+ }
+ } else if l.value == zQuote {
+ rr.Regexp = ""
+ } else {
+ return &ParseError{"", "bad NAPTR Regexp", l}
+ }
+
+ // After quote no space??
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Replacement = l.token
+
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad NAPTR Replacement", l}
+ }
+ rr.Replacement = name
+ return slurpRemainder(c)
+}
+
+func (rr *TALINK) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ previousName, previousNameOk := toAbsoluteName(l.token, o)
+ if l.err || !previousNameOk {
+ return &ParseError{"", "bad TALINK PreviousName", l}
+ }
+ rr.PreviousName = previousName
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ rr.NextName = l.token
+
+ nextName, nextNameOk := toAbsoluteName(l.token, o)
+ if l.err || !nextNameOk {
+ return &ParseError{"", "bad TALINK NextName", l}
+ }
+ rr.NextName = nextName
+
+ return slurpRemainder(c)
+}
+
+func (rr *LOC) parse(c *zlexer, o string) *ParseError {
+ // Non zero defaults for LOC record, see RFC 1876, Section 3.
+ rr.HorizPre = 165 // 10000
+ rr.VertPre = 162 // 10
+ rr.Size = 18 // 1
+ ok := false
+
+ // North
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 32)
+ if e != nil || l.err {
+ return &ParseError{"", "bad LOC Latitude", l}
+ }
+ rr.Latitude = 1000 * 60 * 60 * uint32(i)
+
+ c.Next() // zBlank
+ // Either number, 'N' or 'S'
+ l, _ = c.Next()
+ if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
+ goto East
+ }
+ i, e = strconv.ParseUint(l.token, 10, 32)
+ if e != nil || l.err {
+ return &ParseError{"", "bad LOC Latitude minutes", l}
+ }
+ rr.Latitude += 1000 * 60 * uint32(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
+ return &ParseError{"", "bad LOC Latitude seconds", l}
+ } else {
+ rr.Latitude += uint32(1000 * i)
+ }
+ c.Next() // zBlank
+ // Either number, 'N' or 'S'
+ l, _ = c.Next()
+ if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok {
+ goto East
+ }
+ // If still alive, flag an error
+ return &ParseError{"", "bad LOC Latitude North/South", l}
+
+East:
+ // East
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
+ return &ParseError{"", "bad LOC Longitude", l}
+ } else {
+ rr.Longitude = 1000 * 60 * 60 * uint32(i)
+ }
+ c.Next() // zBlank
+ // Either number, 'E' or 'W'
+ l, _ = c.Next()
+ if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
+ goto Altitude
+ }
+ if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err {
+ return &ParseError{"", "bad LOC Longitude minutes", l}
+ } else {
+ rr.Longitude += 1000 * 60 * uint32(i)
+ }
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err {
+ return &ParseError{"", "bad LOC Longitude seconds", l}
+ } else {
+ rr.Longitude += uint32(1000 * i)
+ }
+ c.Next() // zBlank
+ // Either number, 'E' or 'W'
+ l, _ = c.Next()
+ if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok {
+ goto Altitude
+ }
+ // If still alive, flag an error
+ return &ParseError{"", "bad LOC Longitude East/West", l}
+
+Altitude:
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if len(l.token) == 0 || l.err {
+ return &ParseError{"", "bad LOC Altitude", l}
+ }
+ if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' {
+ l.token = l.token[0 : len(l.token)-1]
+ }
+ if i, e := strconv.ParseFloat(l.token, 32); e != nil {
+ return &ParseError{"", "bad LOC Altitude", l}
+ } else {
+ rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5)
+ }
+
+ // And now optionally the other values
+ l, _ = c.Next()
+ count := 0
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zString:
+ switch count {
+ case 0: // Size
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return &ParseError{"", "bad LOC Size", l}
+ }
+ rr.Size = e&0x0f | m<<4&0xf0
+ case 1: // HorizPre
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return &ParseError{"", "bad LOC HorizPre", l}
+ }
+ rr.HorizPre = e&0x0f | m<<4&0xf0
+ case 2: // VertPre
+ e, m, ok := stringToCm(l.token)
+ if !ok {
+ return &ParseError{"", "bad LOC VertPre", l}
+ }
+ rr.VertPre = e&0x0f | m<<4&0xf0
+ }
+ count++
+ case zBlank:
+ // Ok
+ default:
+ return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l}
+ }
+ l, _ = c.Next()
+ }
+ return nil
+}
+
+func (rr *HIP) parse(c *zlexer, o string) *ParseError {
+ // HitLength is not represented
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad HIP PublicKeyAlgorithm", l}
+ }
+ rr.PublicKeyAlgorithm = uint8(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ if len(l.token) == 0 || l.err {
+ return &ParseError{"", "bad HIP Hit", l}
+ }
+ rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6.
+ rr.HitLength = uint8(len(rr.Hit)) / 2
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ if len(l.token) == 0 || l.err {
+ return &ParseError{"", "bad HIP PublicKey", l}
+ }
+ rr.PublicKey = l.token // This cannot contain spaces
+ rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey)))
+
+ // RendezvousServers (if any)
+ l, _ = c.Next()
+ var xs []string
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zString:
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad HIP RendezvousServers", l}
+ }
+ xs = append(xs, name)
+ case zBlank:
+ // Ok
+ default:
+ return &ParseError{"", "bad HIP RendezvousServers", l}
+ }
+ l, _ = c.Next()
+ }
+
+ rr.RendezvousServers = xs
+ return nil
+}
+
+func (rr *CERT) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ if v, ok := StringToCertType[l.token]; ok {
+ rr.Type = v
+ } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil {
+ return &ParseError{"", "bad CERT Type", l}
+ } else {
+ rr.Type = uint16(i)
+ }
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad CERT KeyTag", l}
+ }
+ rr.KeyTag = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ if v, ok := StringToAlgorithm[l.token]; ok {
+ rr.Algorithm = v
+ } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
+ return &ParseError{"", "bad CERT Algorithm", l}
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ s, e1 := endingToString(c, "bad CERT Certificate")
+ if e1 != nil {
+ return e1
+ }
+ rr.Certificate = s
+ return nil
+}
+
+func (rr *OPENPGPKEY) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToString(c, "bad OPENPGPKEY PublicKey")
+ if e != nil {
+ return e
+ }
+ rr.PublicKey = s
+ return nil
+}
+
+func (rr *CSYNC) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ j, e := strconv.ParseUint(l.token, 10, 32)
+ if e != nil {
+ // Serial must be a number
+ return &ParseError{"", "bad CSYNC serial", l}
+ }
+ rr.Serial = uint32(j)
+
+ c.Next() // zBlank
+
+ l, _ = c.Next()
+ j, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil {
+ // Serial must be a number
+ return &ParseError{"", "bad CSYNC flags", l}
+ }
+ rr.Flags = uint16(j)
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l, _ = c.Next()
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ tokenUpper := strings.ToUpper(l.token)
+ if k, ok = StringToType[tokenUpper]; !ok {
+ if k, ok = typeToInt(l.token); !ok {
+ return &ParseError{"", "bad CSYNC TypeBitMap", l}
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return &ParseError{"", "bad CSYNC TypeBitMap", l}
+ }
+ l, _ = c.Next()
+ }
+ return nil
+}
+
+func (rr *SIG) parse(c *zlexer, o string) *ParseError {
+ return rr.RRSIG.parse(c, o)
+}
+
+func (rr *RRSIG) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ tokenUpper := strings.ToUpper(l.token)
+ if t, ok := StringToType[tokenUpper]; !ok {
+ if strings.HasPrefix(tokenUpper, "TYPE") {
+ t, ok = typeToInt(l.token)
+ if !ok {
+ return &ParseError{"", "bad RRSIG Typecovered", l}
+ }
+ rr.TypeCovered = t
+ } else {
+ return &ParseError{"", "bad RRSIG Typecovered", l}
+ }
+ } else {
+ rr.TypeCovered = t
+ }
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, err := strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad RRSIG Algorithm", l}
+ }
+ rr.Algorithm = uint8(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, err = strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad RRSIG Labels", l}
+ }
+ rr.Labels = uint8(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, err = strconv.ParseUint(l.token, 10, 32)
+ if err != nil || l.err {
+ return &ParseError{"", "bad RRSIG OrigTtl", l}
+ }
+ rr.OrigTtl = uint32(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, err := StringToTime(l.token); err != nil {
+ // Try to see if all numeric and use it as epoch
+ if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
+ // TODO(miek): error out on > MAX_UINT32, same below
+ rr.Expiration = uint32(i)
+ } else {
+ return &ParseError{"", "bad RRSIG Expiration", l}
+ }
+ } else {
+ rr.Expiration = i
+ }
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, err := StringToTime(l.token); err != nil {
+ if i, err := strconv.ParseInt(l.token, 10, 64); err == nil {
+ rr.Inception = uint32(i)
+ } else {
+ return &ParseError{"", "bad RRSIG Inception", l}
+ }
+ } else {
+ rr.Inception = i
+ }
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, err = strconv.ParseUint(l.token, 10, 16)
+ if err != nil || l.err {
+ return &ParseError{"", "bad RRSIG KeyTag", l}
+ }
+ rr.KeyTag = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ rr.SignerName = l.token
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad RRSIG SignerName", l}
+ }
+ rr.SignerName = name
+
+ s, e := endingToString(c, "bad RRSIG Signature")
+ if e != nil {
+ return e
+ }
+ rr.Signature = s
+
+ return nil
+}
+
+func (rr *NSEC) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad NSEC NextDomain", l}
+ }
+ rr.NextDomain = name
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l, _ = c.Next()
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ tokenUpper := strings.ToUpper(l.token)
+ if k, ok = StringToType[tokenUpper]; !ok {
+ if k, ok = typeToInt(l.token); !ok {
+ return &ParseError{"", "bad NSEC TypeBitMap", l}
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return &ParseError{"", "bad NSEC TypeBitMap", l}
+ }
+ l, _ = c.Next()
+ }
+ return nil
+}
+
+func (rr *NSEC3) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NSEC3 Hash", l}
+ }
+ rr.Hash = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NSEC3 Flags", l}
+ }
+ rr.Flags = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NSEC3 Iterations", l}
+ }
+ rr.Iterations = uint16(i)
+ c.Next()
+ l, _ = c.Next()
+ if len(l.token) == 0 || l.err {
+ return &ParseError{"", "bad NSEC3 Salt", l}
+ }
+ if l.token != "-" {
+ rr.SaltLength = uint8(len(l.token)) / 2
+ rr.Salt = l.token
+ }
+
+ c.Next()
+ l, _ = c.Next()
+ if len(l.token) == 0 || l.err {
+ return &ParseError{"", "bad NSEC3 NextDomain", l}
+ }
+ rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits)
+ rr.NextDomain = l.token
+
+ rr.TypeBitMap = make([]uint16, 0)
+ var (
+ k uint16
+ ok bool
+ )
+ l, _ = c.Next()
+ for l.value != zNewline && l.value != zEOF {
+ switch l.value {
+ case zBlank:
+ // Ok
+ case zString:
+ tokenUpper := strings.ToUpper(l.token)
+ if k, ok = StringToType[tokenUpper]; !ok {
+ if k, ok = typeToInt(l.token); !ok {
+ return &ParseError{"", "bad NSEC3 TypeBitMap", l}
+ }
+ }
+ rr.TypeBitMap = append(rr.TypeBitMap, k)
+ default:
+ return &ParseError{"", "bad NSEC3 TypeBitMap", l}
+ }
+ l, _ = c.Next()
+ }
+ return nil
+}
+
+func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NSEC3PARAM Hash", l}
+ }
+ rr.Hash = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NSEC3PARAM Flags", l}
+ }
+ rr.Flags = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NSEC3PARAM Iterations", l}
+ }
+ rr.Iterations = uint16(i)
+ c.Next()
+ l, _ = c.Next()
+ if l.token != "-" {
+ rr.SaltLength = uint8(len(l.token))
+ rr.Salt = l.token
+ }
+ return slurpRemainder(c)
+}
+
+func (rr *EUI48) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ if len(l.token) != 17 || l.err {
+ return &ParseError{"", "bad EUI48 Address", l}
+ }
+ addr := make([]byte, 12)
+ dash := 0
+ for i := 0; i < 10; i += 2 {
+ addr[i] = l.token[i+dash]
+ addr[i+1] = l.token[i+1+dash]
+ dash++
+ if l.token[i+1+dash] != '-' {
+ return &ParseError{"", "bad EUI48 Address", l}
+ }
+ }
+ addr[10] = l.token[15]
+ addr[11] = l.token[16]
+
+ i, e := strconv.ParseUint(string(addr), 16, 48)
+ if e != nil {
+ return &ParseError{"", "bad EUI48 Address", l}
+ }
+ rr.Address = i
+ return slurpRemainder(c)
+}
+
+func (rr *EUI64) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ if len(l.token) != 23 || l.err {
+ return &ParseError{"", "bad EUI64 Address", l}
+ }
+ addr := make([]byte, 16)
+ dash := 0
+ for i := 0; i < 14; i += 2 {
+ addr[i] = l.token[i+dash]
+ addr[i+1] = l.token[i+1+dash]
+ dash++
+ if l.token[i+1+dash] != '-' {
+ return &ParseError{"", "bad EUI64 Address", l}
+ }
+ }
+ addr[14] = l.token[21]
+ addr[15] = l.token[22]
+
+ i, e := strconv.ParseUint(string(addr), 16, 64)
+ if e != nil {
+ return &ParseError{"", "bad EUI68 Address", l}
+ }
+ rr.Address = i
+ return slurpRemainder(c)
+}
+
+func (rr *SSHFP) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SSHFP Algorithm", l}
+ }
+ rr.Algorithm = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SSHFP Type", l}
+ }
+ rr.Type = uint8(i)
+ c.Next() // zBlank
+ s, e1 := endingToString(c, "bad SSHFP Fingerprint")
+ if e1 != nil {
+ return e1
+ }
+ rr.FingerPrint = s
+ return nil
+}
+
+func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad " + typ + " Flags", l}
+ }
+ rr.Flags = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad " + typ + " Protocol", l}
+ }
+ rr.Protocol = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad " + typ + " Algorithm", l}
+ }
+ rr.Algorithm = uint8(i)
+ s, e1 := endingToString(c, "bad "+typ+" PublicKey")
+ if e1 != nil {
+ return e1
+ }
+ rr.PublicKey = s
+ return nil
+}
+
+func (rr *DNSKEY) parse(c *zlexer, o string) *ParseError {
+ return rr.parseDNSKEY(c, o, "DNSKEY")
+}
+
+func (rr *KEY) parse(c *zlexer, o string) *ParseError {
+ return rr.parseDNSKEY(c, o, "KEY")
+}
+
+func (rr *CDNSKEY) parse(c *zlexer, o string) *ParseError {
+ return rr.parseDNSKEY(c, o, "CDNSKEY")
+}
+
+func (rr *RKEY) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad RKEY Flags", l}
+ }
+ rr.Flags = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad RKEY Protocol", l}
+ }
+ rr.Protocol = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad RKEY Algorithm", l}
+ }
+ rr.Algorithm = uint8(i)
+ s, e1 := endingToString(c, "bad RKEY PublicKey")
+ if e1 != nil {
+ return e1
+ }
+ rr.PublicKey = s
+ return nil
+}
+
+func (rr *EID) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToString(c, "bad EID Endpoint")
+ if e != nil {
+ return e
+ }
+ rr.Endpoint = s
+ return nil
+}
+
+func (rr *NIMLOC) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToString(c, "bad NIMLOC Locator")
+ if e != nil {
+ return e
+ }
+ rr.Locator = s
+ return nil
+}
+
+func (rr *GPOS) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ _, e := strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return &ParseError{"", "bad GPOS Longitude", l}
+ }
+ rr.Longitude = l.token
+ c.Next() // zBlank
+ l, _ = c.Next()
+ _, e = strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return &ParseError{"", "bad GPOS Latitude", l}
+ }
+ rr.Latitude = l.token
+ c.Next() // zBlank
+ l, _ = c.Next()
+ _, e = strconv.ParseFloat(l.token, 64)
+ if e != nil || l.err {
+ return &ParseError{"", "bad GPOS Altitude", l}
+ }
+ rr.Altitude = l.token
+ return slurpRemainder(c)
+}
+
+func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad " + typ + " KeyTag", l}
+ }
+ rr.KeyTag = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, e = strconv.ParseUint(l.token, 10, 8); e != nil {
+ tokenUpper := strings.ToUpper(l.token)
+ i, ok := StringToAlgorithm[tokenUpper]
+ if !ok || l.err {
+ return &ParseError{"", "bad " + typ + " Algorithm", l}
+ }
+ rr.Algorithm = i
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad " + typ + " DigestType", l}
+ }
+ rr.DigestType = uint8(i)
+ s, e1 := endingToString(c, "bad "+typ+" Digest")
+ if e1 != nil {
+ return e1
+ }
+ rr.Digest = s
+ return nil
+}
+
+func (rr *DS) parse(c *zlexer, o string) *ParseError {
+ return rr.parseDS(c, o, "DS")
+}
+
+func (rr *DLV) parse(c *zlexer, o string) *ParseError {
+ return rr.parseDS(c, o, "DLV")
+}
+
+func (rr *CDS) parse(c *zlexer, o string) *ParseError {
+ return rr.parseDS(c, o, "CDS")
+}
+
+func (rr *TA) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad TA KeyTag", l}
+ }
+ rr.KeyTag = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if i, e := strconv.ParseUint(l.token, 10, 8); e != nil {
+ tokenUpper := strings.ToUpper(l.token)
+ i, ok := StringToAlgorithm[tokenUpper]
+ if !ok || l.err {
+ return &ParseError{"", "bad TA Algorithm", l}
+ }
+ rr.Algorithm = i
+ } else {
+ rr.Algorithm = uint8(i)
+ }
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad TA DigestType", l}
+ }
+ rr.DigestType = uint8(i)
+ s, err := endingToString(c, "bad TA Digest")
+ if err != nil {
+ return err
+ }
+ rr.Digest = s
+ return nil
+}
+
+func (rr *TLSA) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad TLSA Usage", l}
+ }
+ rr.Usage = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad TLSA Selector", l}
+ }
+ rr.Selector = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad TLSA MatchingType", l}
+ }
+ rr.MatchingType = uint8(i)
+ // So this needs be e2 (i.e. different than e), because...??t
+ s, e2 := endingToString(c, "bad TLSA Certificate")
+ if e2 != nil {
+ return e2
+ }
+ rr.Certificate = s
+ return nil
+}
+
+func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SMIMEA Usage", l}
+ }
+ rr.Usage = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SMIMEA Selector", l}
+ }
+ rr.Selector = uint8(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 8)
+ if e != nil || l.err {
+ return &ParseError{"", "bad SMIMEA MatchingType", l}
+ }
+ rr.MatchingType = uint8(i)
+ // So this needs be e2 (i.e. different than e), because...??t
+ s, e2 := endingToString(c, "bad SMIMEA Certificate")
+ if e2 != nil {
+ return e2
+ }
+ rr.Certificate = s
+ return nil
+}
+
+func (rr *RFC3597) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ if l.token != "\\#" {
+ return &ParseError{"", "bad RFC3597 Rdata", l}
+ }
+
+ c.Next() // zBlank
+ l, _ = c.Next()
+ rdlength, e := strconv.Atoi(l.token)
+ if e != nil || l.err {
+ return &ParseError{"", "bad RFC3597 Rdata ", l}
+ }
+
+ s, e1 := endingToString(c, "bad RFC3597 Rdata")
+ if e1 != nil {
+ return e1
+ }
+ if rdlength*2 != len(s) {
+ return &ParseError{"", "bad RFC3597 Rdata", l}
+ }
+ rr.Rdata = s
+ return nil
+}
+
+func (rr *SPF) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToTxtSlice(c, "bad SPF Txt")
+ if e != nil {
+ return e
+ }
+ rr.Txt = s
+ return nil
+}
+
+func (rr *AVC) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToTxtSlice(c, "bad AVC Txt")
+ if e != nil {
+ return e
+ }
+ rr.Txt = s
+ return nil
+}
+
+func (rr *TXT) parse(c *zlexer, o string) *ParseError {
+ // no zBlank reading here, because all this rdata is TXT
+ s, e := endingToTxtSlice(c, "bad TXT Txt")
+ if e != nil {
+ return e
+ }
+ rr.Txt = s
+ return nil
+}
+
+// identical to setTXT
+func (rr *NINFO) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToTxtSlice(c, "bad NINFO ZSData")
+ if e != nil {
+ return e
+ }
+ rr.ZSData = s
+ return nil
+}
+
+func (rr *URI) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad URI Priority", l}
+ }
+ rr.Priority = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ i, e = strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad URI Weight", l}
+ }
+ rr.Weight = uint16(i)
+
+ c.Next() // zBlank
+ s, err := endingToTxtSlice(c, "bad URI Target")
+ if err != nil {
+ return err
+ }
+ if len(s) != 1 {
+ return &ParseError{"", "bad URI Target", l}
+ }
+ rr.Target = s[0]
+ return nil
+}
+
+func (rr *DHCID) parse(c *zlexer, o string) *ParseError {
+ // awesome record to parse!
+ s, e := endingToString(c, "bad DHCID Digest")
+ if e != nil {
+ return e
+ }
+ rr.Digest = s
+ return nil
+}
+
+func (rr *NID) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad NID Preference", l}
+ }
+ rr.Preference = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ u, err := stringToNodeID(l)
+ if err != nil || l.err {
+ return err
+ }
+ rr.NodeID = u
+ return slurpRemainder(c)
+}
+
+func (rr *L32) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad L32 Preference", l}
+ }
+ rr.Preference = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Locator32 = net.ParseIP(l.token)
+ if rr.Locator32 == nil || l.err {
+ return &ParseError{"", "bad L32 Locator", l}
+ }
+ return slurpRemainder(c)
+}
+
+func (rr *LP) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad LP Preference", l}
+ }
+ rr.Preference = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Fqdn = l.token
+ name, nameOk := toAbsoluteName(l.token, o)
+ if l.err || !nameOk {
+ return &ParseError{"", "bad LP Fqdn", l}
+ }
+ rr.Fqdn = name
+
+ return slurpRemainder(c)
+}
+
+func (rr *L64) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad L64 Preference", l}
+ }
+ rr.Preference = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ u, err := stringToNodeID(l)
+ if err != nil || l.err {
+ return err
+ }
+ rr.Locator64 = u
+ return slurpRemainder(c)
+}
+
+func (rr *UID) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 32)
+ if e != nil || l.err {
+ return &ParseError{"", "bad UID Uid", l}
+ }
+ rr.Uid = uint32(i)
+ return slurpRemainder(c)
+}
+
+func (rr *GID) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 32)
+ if e != nil || l.err {
+ return &ParseError{"", "bad GID Gid", l}
+ }
+ rr.Gid = uint32(i)
+ return slurpRemainder(c)
+}
+
+func (rr *UINFO) parse(c *zlexer, o string) *ParseError {
+ s, e := endingToTxtSlice(c, "bad UINFO Uinfo")
+ if e != nil {
+ return e
+ }
+ if ln := len(s); ln == 0 {
+ return nil
+ }
+ rr.Uinfo = s[0] // silently discard anything after the first character-string
+ return nil
+}
+
+func (rr *PX) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, e := strconv.ParseUint(l.token, 10, 16)
+ if e != nil || l.err {
+ return &ParseError{"", "bad PX Preference", l}
+ }
+ rr.Preference = uint16(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Map822 = l.token
+ map822, map822Ok := toAbsoluteName(l.token, o)
+ if l.err || !map822Ok {
+ return &ParseError{"", "bad PX Map822", l}
+ }
+ rr.Map822 = map822
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ rr.Mapx400 = l.token
+ mapx400, mapx400Ok := toAbsoluteName(l.token, o)
+ if l.err || !mapx400Ok {
+ return &ParseError{"", "bad PX Mapx400", l}
+ }
+ rr.Mapx400 = mapx400
+
+ return slurpRemainder(c)
+}
+
+func (rr *CAA) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+ i, err := strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad CAA Flag", l}
+ }
+ rr.Flag = uint8(i)
+
+ c.Next() // zBlank
+ l, _ = c.Next() // zString
+ if l.value != zString {
+ return &ParseError{"", "bad CAA Tag", l}
+ }
+ rr.Tag = l.token
+
+ c.Next() // zBlank
+ s, e := endingToTxtSlice(c, "bad CAA Value")
+ if e != nil {
+ return e
+ }
+ if len(s) != 1 {
+ return &ParseError{"", "bad CAA Value", l}
+ }
+ rr.Value = s[0]
+ return nil
+}
+
+func (rr *TKEY) parse(c *zlexer, o string) *ParseError {
+ l, _ := c.Next()
+
+ // Algorithm
+ if l.value != zString {
+ return &ParseError{"", "bad TKEY algorithm", l}
+ }
+ rr.Algorithm = l.token
+ c.Next() // zBlank
+
+ // Get the key length and key values
+ l, _ = c.Next()
+ i, err := strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad TKEY key length", l}
+ }
+ rr.KeySize = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if l.value != zString {
+ return &ParseError{"", "bad TKEY key", l}
+ }
+ rr.Key = l.token
+ c.Next() // zBlank
+
+ // Get the otherdata length and string data
+ l, _ = c.Next()
+ i, err = strconv.ParseUint(l.token, 10, 8)
+ if err != nil || l.err {
+ return &ParseError{"", "bad TKEY otherdata length", l}
+ }
+ rr.OtherLen = uint16(i)
+ c.Next() // zBlank
+ l, _ = c.Next()
+ if l.value != zString {
+ return &ParseError{"", "bad TKEY otherday", l}
+ }
+ rr.OtherData = l.token
+
+ return nil
+}
+
+func (rr *APL) parse(c *zlexer, o string) *ParseError {
+ var prefixes []APLPrefix
+
+ for {
+ l, _ := c.Next()
+ if l.value == zNewline || l.value == zEOF {
+ break
+ }
+ if l.value == zBlank && prefixes != nil {
+ continue
+ }
+ if l.value != zString {
+ return &ParseError{"", "unexpected APL field", l}
+ }
+
+ // Expected format: [!]afi:address/prefix
+
+ colon := strings.IndexByte(l.token, ':')
+ if colon == -1 {
+ return &ParseError{"", "missing colon in APL field", l}
+ }
+
+ family, cidr := l.token[:colon], l.token[colon+1:]
+
+ var negation bool
+ if family != "" && family[0] == '!' {
+ negation = true
+ family = family[1:]
+ }
+
+ afi, err := strconv.ParseUint(family, 10, 16)
+ if err != nil {
+ return &ParseError{"", "failed to parse APL family: " + err.Error(), l}
+ }
+ var addrLen int
+ switch afi {
+ case 1:
+ addrLen = net.IPv4len
+ case 2:
+ addrLen = net.IPv6len
+ default:
+ return &ParseError{"", "unrecognized APL family", l}
+ }
+
+ ip, subnet, err := net.ParseCIDR(cidr)
+ if err != nil {
+ return &ParseError{"", "failed to parse APL address: " + err.Error(), l}
+ }
+ if !ip.Equal(subnet.IP) {
+ return &ParseError{"", "extra bits in APL address", l}
+ }
+
+ if len(subnet.IP) != addrLen {
+ return &ParseError{"", "address mismatch with the APL family", l}
+ }
+
+ prefixes = append(prefixes, APLPrefix{
+ Negation: negation,
+ Network: *subnet,
+ })
+ }
+
+ rr.Prefixes = prefixes
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go
new file mode 100644
index 0000000..69deb33
--- /dev/null
+++ b/vendor/github.com/miekg/dns/serve_mux.go
@@ -0,0 +1,123 @@
+package dns
+
+import (
+ "strings"
+ "sync"
+)
+
+// ServeMux is an DNS request multiplexer. It matches the zone name of
+// each incoming request against a list of registered patterns add calls
+// the handler for the pattern that most closely matches the zone name.
+//
+// ServeMux is DNSSEC aware, meaning that queries for the DS record are
+// redirected to the parent zone (if that is also registered), otherwise
+// the child gets the query.
+//
+// ServeMux is also safe for concurrent access from multiple goroutines.
+//
+// The zero ServeMux is empty and ready for use.
+type ServeMux struct {
+ z map[string]Handler
+ m sync.RWMutex
+}
+
+// NewServeMux allocates and returns a new ServeMux.
+func NewServeMux() *ServeMux {
+ return new(ServeMux)
+}
+
+// DefaultServeMux is the default ServeMux used by Serve.
+var DefaultServeMux = NewServeMux()
+
+func (mux *ServeMux) match(q string, t uint16) Handler {
+ mux.m.RLock()
+ defer mux.m.RUnlock()
+ if mux.z == nil {
+ return nil
+ }
+
+ q = strings.ToLower(q)
+
+ var handler Handler
+ for off, end := 0, false; !end; off, end = NextLabel(q, off) {
+ if h, ok := mux.z[q[off:]]; ok {
+ if t != TypeDS {
+ return h
+ }
+ // Continue for DS to see if we have a parent too, if so delegate to the parent
+ handler = h
+ }
+ }
+
+ // Wildcard match, if we have found nothing try the root zone as a last resort.
+ if h, ok := mux.z["."]; ok {
+ return h
+ }
+
+ return handler
+}
+
+// Handle adds a handler to the ServeMux for pattern.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ if mux.z == nil {
+ mux.z = make(map[string]Handler)
+ }
+ mux.z[Fqdn(pattern)] = handler
+ mux.m.Unlock()
+}
+
+// HandleFunc adds a handler function to the ServeMux for pattern.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ mux.Handle(pattern, HandlerFunc(handler))
+}
+
+// HandleRemove deregisters the handler specific for pattern from the ServeMux.
+func (mux *ServeMux) HandleRemove(pattern string) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ delete(mux.z, Fqdn(pattern))
+ mux.m.Unlock()
+}
+
+// ServeDNS dispatches the request to the handler whose pattern most
+// closely matches the request message.
+//
+// ServeDNS is DNSSEC aware, meaning that queries for the DS record
+// are redirected to the parent zone (if that is also registered),
+// otherwise the child gets the query.
+//
+// If no handler is found, or there is no question, a standard SERVFAIL
+// message is returned
+func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
+ var h Handler
+ if len(req.Question) >= 1 { // allow more than one question
+ h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
+ }
+
+ if h != nil {
+ h.ServeDNS(w, req)
+ } else {
+ HandleFailed(w, req)
+ }
+}
+
+// Handle registers the handler with the given pattern
+// in the DefaultServeMux. The documentation for
+// ServeMux explains how patterns are matched.
+func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+
+// HandleRemove deregisters the handle with the given pattern
+// in the DefaultServeMux.
+func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
+
+// HandleFunc registers the handler function with the given pattern
+// in the DefaultServeMux.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ DefaultServeMux.HandleFunc(pattern, handler)
+}
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
new file mode 100644
index 0000000..3cf1a02
--- /dev/null
+++ b/vendor/github.com/miekg/dns/server.go
@@ -0,0 +1,764 @@
+// DNS server implementation.
+
+package dns
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/binary"
+ "errors"
+ "io"
+ "net"
+ "strings"
+ "sync"
+ "time"
+)
+
+// Default maximum number of TCP queries before we close the socket.
+const maxTCPQueries = 128
+
+// aLongTimeAgo is a non-zero time, far in the past, used for
+// immediate cancelation of network operations.
+var aLongTimeAgo = time.Unix(1, 0)
+
+// Handler is implemented by any value that implements ServeDNS.
+type Handler interface {
+ ServeDNS(w ResponseWriter, r *Msg)
+}
+
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as DNS handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// Handler object that calls f.
+type HandlerFunc func(ResponseWriter, *Msg)
+
+// ServeDNS calls f(w, r).
+func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
+ f(w, r)
+}
+
+// A ResponseWriter interface is used by an DNS handler to
+// construct an DNS response.
+type ResponseWriter interface {
+ // LocalAddr returns the net.Addr of the server
+ LocalAddr() net.Addr
+ // RemoteAddr returns the net.Addr of the client that sent the current request.
+ RemoteAddr() net.Addr
+ // WriteMsg writes a reply back to the client.
+ WriteMsg(*Msg) error
+ // Write writes a raw buffer back to the client.
+ Write([]byte) (int, error)
+ // Close closes the connection.
+ Close() error
+ // TsigStatus returns the status of the Tsig.
+ TsigStatus() error
+ // TsigTimersOnly sets the tsig timers only boolean.
+ TsigTimersOnly(bool)
+ // Hijack lets the caller take over the connection.
+ // After a call to Hijack(), the DNS package will not do anything with the connection.
+ Hijack()
+}
+
+// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
+// when available.
+type ConnectionStater interface {
+ ConnectionState() *tls.ConnectionState
+}
+
+type response struct {
+ closed bool // connection has been closed
+ hijacked bool // connection has been hijacked by handler
+ tsigTimersOnly bool
+ tsigStatus error
+ tsigRequestMAC string
+ tsigSecret map[string]string // the tsig secrets
+ udp *net.UDPConn // i/o connection if UDP was used
+ tcp net.Conn // i/o connection if TCP was used
+ udpSession *SessionUDP // oob data to get egress interface right
+ writer Writer // writer to output the raw DNS bits
+}
+
+// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
+func HandleFailed(w ResponseWriter, r *Msg) {
+ m := new(Msg)
+ m.SetRcode(r, RcodeServerFailure)
+ // does not matter if this write fails
+ w.WriteMsg(m)
+}
+
+// ListenAndServe Starts a server on address and network specified Invoke handler
+// for incoming queries.
+func ListenAndServe(addr string, network string, handler Handler) error {
+ server := &Server{Addr: addr, Net: network, Handler: handler}
+ return server.ListenAndServe()
+}
+
+// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in
+// http://golang.org/pkg/net/http/#ListenAndServeTLS
+func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return err
+ }
+
+ config := tls.Config{
+ Certificates: []tls.Certificate{cert},
+ }
+
+ server := &Server{
+ Addr: addr,
+ Net: "tcp-tls",
+ TLSConfig: &config,
+ Handler: handler,
+ }
+
+ return server.ListenAndServe()
+}
+
+// ActivateAndServe activates a server with a listener from systemd,
+// l and p should not both be non-nil.
+// If both l and p are not nil only p will be used.
+// Invoke handler for incoming queries.
+func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
+ server := &Server{Listener: l, PacketConn: p, Handler: handler}
+ return server.ActivateAndServe()
+}
+
+// Writer writes raw DNS messages; each call to Write should send an entire message.
+type Writer interface {
+ io.Writer
+}
+
+// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message.
+type Reader interface {
+ // ReadTCP reads a raw message from a TCP connection. Implementations may alter
+ // connection properties, for example the read-deadline.
+ ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
+ // ReadUDP reads a raw message from a UDP connection. Implementations may alter
+ // connection properties, for example the read-deadline.
+ ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error)
+}
+
+// defaultReader is an adapter for the Server struct that implements the Reader interface
+// using the readTCP and readUDP func of the embedded Server.
+type defaultReader struct {
+ *Server
+}
+
+func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
+ return dr.readTCP(conn, timeout)
+}
+
+func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
+ return dr.readUDP(conn, timeout)
+}
+
+// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader.
+// Implementations should never return a nil Reader.
+type DecorateReader func(Reader) Reader
+
+// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer.
+// Implementations should never return a nil Writer.
+type DecorateWriter func(Writer) Writer
+
+// A Server defines parameters for running an DNS server.
+type Server struct {
+ // Address to listen on, ":dns" if empty.
+ Addr string
+ // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one
+ Net string
+ // TCP Listener to use, this is to aid in systemd's socket activation.
+ Listener net.Listener
+ // TLS connection configuration
+ TLSConfig *tls.Config
+ // UDP "Listener" to use, this is to aid in systemd's socket activation.
+ PacketConn net.PacketConn
+ // Handler to invoke, dns.DefaultServeMux if nil.
+ Handler Handler
+ // Default buffer size to use to read incoming UDP messages. If not set
+ // it defaults to MinMsgSize (512 B).
+ UDPSize int
+ // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second.
+ ReadTimeout time.Duration
+ // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second.
+ WriteTimeout time.Duration
+ // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
+ IdleTimeout func() time.Duration
+ // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
+ TsigSecret map[string]string
+ // If NotifyStartedFunc is set it is called once the server has started listening.
+ NotifyStartedFunc func()
+ // DecorateReader is optional, allows customization of the process that reads raw DNS messages.
+ DecorateReader DecorateReader
+ // DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
+ DecorateWriter DecorateWriter
+ // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1).
+ MaxTCPQueries int
+ // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address.
+ // It is only supported on go1.11+ and when using ListenAndServe.
+ ReusePort bool
+ // AcceptMsgFunc will check the incoming message and will reject it early in the process.
+ // By default DefaultMsgAcceptFunc will be used.
+ MsgAcceptFunc MsgAcceptFunc
+
+ // Shutdown handling
+ lock sync.RWMutex
+ started bool
+ shutdown chan struct{}
+ conns map[net.Conn]struct{}
+
+ // A pool for UDP message buffers.
+ udpPool sync.Pool
+}
+
+func (srv *Server) isStarted() bool {
+ srv.lock.RLock()
+ started := srv.started
+ srv.lock.RUnlock()
+ return started
+}
+
+func makeUDPBuffer(size int) func() interface{} {
+ return func() interface{} {
+ return make([]byte, size)
+ }
+}
+
+func (srv *Server) init() {
+ srv.shutdown = make(chan struct{})
+ srv.conns = make(map[net.Conn]struct{})
+
+ if srv.UDPSize == 0 {
+ srv.UDPSize = MinMsgSize
+ }
+ if srv.MsgAcceptFunc == nil {
+ srv.MsgAcceptFunc = DefaultMsgAcceptFunc
+ }
+ if srv.Handler == nil {
+ srv.Handler = DefaultServeMux
+ }
+
+ srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
+}
+
+func unlockOnce(l sync.Locker) func() {
+ var once sync.Once
+ return func() { once.Do(l.Unlock) }
+}
+
+// ListenAndServe starts a nameserver on the configured address in *Server.
+func (srv *Server) ListenAndServe() error {
+ unlock := unlockOnce(&srv.lock)
+ srv.lock.Lock()
+ defer unlock()
+
+ if srv.started {
+ return &Error{err: "server already started"}
+ }
+
+ addr := srv.Addr
+ if addr == "" {
+ addr = ":domain"
+ }
+
+ srv.init()
+
+ switch srv.Net {
+ case "tcp", "tcp4", "tcp6":
+ l, err := listenTCP(srv.Net, addr, srv.ReusePort)
+ if err != nil {
+ return err
+ }
+ srv.Listener = l
+ srv.started = true
+ unlock()
+ return srv.serveTCP(l)
+ case "tcp-tls", "tcp4-tls", "tcp6-tls":
+ if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) {
+ return errors.New("dns: neither Certificates nor GetCertificate set in Config")
+ }
+ network := strings.TrimSuffix(srv.Net, "-tls")
+ l, err := listenTCP(network, addr, srv.ReusePort)
+ if err != nil {
+ return err
+ }
+ l = tls.NewListener(l, srv.TLSConfig)
+ srv.Listener = l
+ srv.started = true
+ unlock()
+ return srv.serveTCP(l)
+ case "udp", "udp4", "udp6":
+ l, err := listenUDP(srv.Net, addr, srv.ReusePort)
+ if err != nil {
+ return err
+ }
+ u := l.(*net.UDPConn)
+ if e := setUDPSocketOptions(u); e != nil {
+ return e
+ }
+ srv.PacketConn = l
+ srv.started = true
+ unlock()
+ return srv.serveUDP(u)
+ }
+ return &Error{err: "bad network"}
+}
+
+// ActivateAndServe starts a nameserver with the PacketConn or Listener
+// configured in *Server. Its main use is to start a server from systemd.
+func (srv *Server) ActivateAndServe() error {
+ unlock := unlockOnce(&srv.lock)
+ srv.lock.Lock()
+ defer unlock()
+
+ if srv.started {
+ return &Error{err: "server already started"}
+ }
+
+ srv.init()
+
+ pConn := srv.PacketConn
+ l := srv.Listener
+ if pConn != nil {
+ // Check PacketConn interface's type is valid and value
+ // is not nil
+ if t, ok := pConn.(*net.UDPConn); ok && t != nil {
+ if e := setUDPSocketOptions(t); e != nil {
+ return e
+ }
+ srv.started = true
+ unlock()
+ return srv.serveUDP(t)
+ }
+ }
+ if l != nil {
+ srv.started = true
+ unlock()
+ return srv.serveTCP(l)
+ }
+ return &Error{err: "bad listeners"}
+}
+
+// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
+// ActivateAndServe will return.
+func (srv *Server) Shutdown() error {
+ return srv.ShutdownContext(context.Background())
+}
+
+// ShutdownContext shuts down a server. After a call to ShutdownContext,
+// ListenAndServe and ActivateAndServe will return.
+//
+// A context.Context may be passed to limit how long to wait for connections
+// to terminate.
+func (srv *Server) ShutdownContext(ctx context.Context) error {
+ srv.lock.Lock()
+ if !srv.started {
+ srv.lock.Unlock()
+ return &Error{err: "server not started"}
+ }
+
+ srv.started = false
+
+ if srv.PacketConn != nil {
+ srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
+ }
+
+ if srv.Listener != nil {
+ srv.Listener.Close()
+ }
+
+ for rw := range srv.conns {
+ rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
+ }
+
+ srv.lock.Unlock()
+
+ if testShutdownNotify != nil {
+ testShutdownNotify.Broadcast()
+ }
+
+ var ctxErr error
+ select {
+ case <-srv.shutdown:
+ case <-ctx.Done():
+ ctxErr = ctx.Err()
+ }
+
+ if srv.PacketConn != nil {
+ srv.PacketConn.Close()
+ }
+
+ return ctxErr
+}
+
+var testShutdownNotify *sync.Cond
+
+// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
+func (srv *Server) getReadTimeout() time.Duration {
+ if srv.ReadTimeout != 0 {
+ return srv.ReadTimeout
+ }
+ return dnsTimeout
+}
+
+// serveTCP starts a TCP listener for the server.
+func (srv *Server) serveTCP(l net.Listener) error {
+ defer l.Close()
+
+ if srv.NotifyStartedFunc != nil {
+ srv.NotifyStartedFunc()
+ }
+
+ var wg sync.WaitGroup
+ defer func() {
+ wg.Wait()
+ close(srv.shutdown)
+ }()
+
+ for srv.isStarted() {
+ rw, err := l.Accept()
+ if err != nil {
+ if !srv.isStarted() {
+ return nil
+ }
+ if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
+ continue
+ }
+ return err
+ }
+ srv.lock.Lock()
+ // Track the connection to allow unblocking reads on shutdown.
+ srv.conns[rw] = struct{}{}
+ srv.lock.Unlock()
+ wg.Add(1)
+ go srv.serveTCPConn(&wg, rw)
+ }
+
+ return nil
+}
+
+// serveUDP starts a UDP listener for the server.
+func (srv *Server) serveUDP(l *net.UDPConn) error {
+ defer l.Close()
+
+ if srv.NotifyStartedFunc != nil {
+ srv.NotifyStartedFunc()
+ }
+
+ reader := Reader(defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+
+ var wg sync.WaitGroup
+ defer func() {
+ wg.Wait()
+ close(srv.shutdown)
+ }()
+
+ rtimeout := srv.getReadTimeout()
+ // deadline is not used here
+ for srv.isStarted() {
+ m, s, err := reader.ReadUDP(l, rtimeout)
+ if err != nil {
+ if !srv.isStarted() {
+ return nil
+ }
+ if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
+ continue
+ }
+ return err
+ }
+ if len(m) < headerSize {
+ if cap(m) == srv.UDPSize {
+ srv.udpPool.Put(m[:srv.UDPSize])
+ }
+ continue
+ }
+ wg.Add(1)
+ go srv.serveUDPPacket(&wg, m, l, s)
+ }
+
+ return nil
+}
+
+// Serve a new TCP connection.
+func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
+ w := &response{tsigSecret: srv.TsigSecret, tcp: rw}
+ if srv.DecorateWriter != nil {
+ w.writer = srv.DecorateWriter(w)
+ } else {
+ w.writer = w
+ }
+
+ reader := Reader(defaultReader{srv})
+ if srv.DecorateReader != nil {
+ reader = srv.DecorateReader(reader)
+ }
+
+ idleTimeout := tcpIdleTimeout
+ if srv.IdleTimeout != nil {
+ idleTimeout = srv.IdleTimeout()
+ }
+
+ timeout := srv.getReadTimeout()
+
+ limit := srv.MaxTCPQueries
+ if limit == 0 {
+ limit = maxTCPQueries
+ }
+
+ for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
+ m, err := reader.ReadTCP(w.tcp, timeout)
+ if err != nil {
+ // TODO(tmthrgd): handle error
+ break
+ }
+ srv.serveDNS(m, w)
+ if w.closed {
+ break // Close() was called
+ }
+ if w.hijacked {
+ break // client will call Close() themselves
+ }
+ // The first read uses the read timeout, the rest use the
+ // idle timeout.
+ timeout = idleTimeout
+ }
+
+ if !w.hijacked {
+ w.Close()
+ }
+
+ srv.lock.Lock()
+ delete(srv.conns, w.tcp)
+ srv.lock.Unlock()
+
+ wg.Done()
+}
+
+// Serve a new UDP request.
+func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) {
+ w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s}
+ if srv.DecorateWriter != nil {
+ w.writer = srv.DecorateWriter(w)
+ } else {
+ w.writer = w
+ }
+
+ srv.serveDNS(m, w)
+ wg.Done()
+}
+
+func (srv *Server) serveDNS(m []byte, w *response) {
+ dh, off, err := unpackMsgHdr(m, 0)
+ if err != nil {
+ // Let client hang, they are sending crap; any reply can be used to amplify.
+ return
+ }
+
+ req := new(Msg)
+ req.setHdr(dh)
+
+ switch action := srv.MsgAcceptFunc(dh); action {
+ case MsgAccept:
+ if req.unpack(dh, m, off) == nil {
+ break
+ }
+
+ fallthrough
+ case MsgReject, MsgRejectNotImplemented:
+ opcode := req.Opcode
+ req.SetRcodeFormatError(req)
+ req.Zero = false
+ if action == MsgRejectNotImplemented {
+ req.Opcode = opcode
+ req.Rcode = RcodeNotImplemented
+ }
+
+ // Are we allowed to delete any OPT records here?
+ req.Ns, req.Answer, req.Extra = nil, nil, nil
+
+ w.WriteMsg(req)
+ fallthrough
+ case MsgIgnore:
+ if w.udp != nil && cap(m) == srv.UDPSize {
+ srv.udpPool.Put(m[:srv.UDPSize])
+ }
+
+ return
+ }
+
+ w.tsigStatus = nil
+ if w.tsigSecret != nil {
+ if t := req.IsTsig(); t != nil {
+ if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
+ w.tsigStatus = TsigVerify(m, secret, "", false)
+ } else {
+ w.tsigStatus = ErrSecret
+ }
+ w.tsigTimersOnly = false
+ w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC
+ }
+ }
+
+ if w.udp != nil && cap(m) == srv.UDPSize {
+ srv.udpPool.Put(m[:srv.UDPSize])
+ }
+
+ srv.Handler.ServeDNS(w, req) // Writes back to the client
+}
+
+func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
+ // If we race with ShutdownContext, the read deadline may
+ // have been set in the distant past to unblock the read
+ // below. We must not override it, otherwise we may block
+ // ShutdownContext.
+ srv.lock.RLock()
+ if srv.started {
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ }
+ srv.lock.RUnlock()
+
+ var length uint16
+ if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+
+ m := make([]byte, length)
+ if _, err := io.ReadFull(conn, m); err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
+ srv.lock.RLock()
+ if srv.started {
+ // See the comment in readTCP above.
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ }
+ srv.lock.RUnlock()
+
+ m := srv.udpPool.Get().([]byte)
+ n, s, err := ReadFromSessionUDP(conn, m)
+ if err != nil {
+ srv.udpPool.Put(m)
+ return nil, nil, err
+ }
+ m = m[:n]
+ return m, s, nil
+}
+
+// WriteMsg implements the ResponseWriter.WriteMsg method.
+func (w *response) WriteMsg(m *Msg) (err error) {
+ if w.closed {
+ return &Error{err: "WriteMsg called after Close"}
+ }
+
+ var data []byte
+ if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check)
+ if t := m.IsTsig(); t != nil {
+ data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly)
+ if err != nil {
+ return err
+ }
+ _, err = w.writer.Write(data)
+ return err
+ }
+ }
+ data, err = m.Pack()
+ if err != nil {
+ return err
+ }
+ _, err = w.writer.Write(data)
+ return err
+}
+
+// Write implements the ResponseWriter.Write method.
+func (w *response) Write(m []byte) (int, error) {
+ if w.closed {
+ return 0, &Error{err: "Write called after Close"}
+ }
+
+ switch {
+ case w.udp != nil:
+ return WriteToSessionUDP(w.udp, m, w.udpSession)
+ case w.tcp != nil:
+ if len(m) > MaxMsgSize {
+ return 0, &Error{err: "message too large"}
+ }
+
+ l := make([]byte, 2)
+ binary.BigEndian.PutUint16(l, uint16(len(m)))
+
+ n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
+ return int(n), err
+ default:
+ panic("dns: internal error: udp and tcp both nil")
+ }
+}
+
+// LocalAddr implements the ResponseWriter.LocalAddr method.
+func (w *response) LocalAddr() net.Addr {
+ switch {
+ case w.udp != nil:
+ return w.udp.LocalAddr()
+ case w.tcp != nil:
+ return w.tcp.LocalAddr()
+ default:
+ panic("dns: internal error: udp and tcp both nil")
+ }
+}
+
+// RemoteAddr implements the ResponseWriter.RemoteAddr method.
+func (w *response) RemoteAddr() net.Addr {
+ switch {
+ case w.udpSession != nil:
+ return w.udpSession.RemoteAddr()
+ case w.tcp != nil:
+ return w.tcp.RemoteAddr()
+ default:
+ panic("dns: internal error: udpSession and tcp both nil")
+ }
+}
+
+// TsigStatus implements the ResponseWriter.TsigStatus method.
+func (w *response) TsigStatus() error { return w.tsigStatus }
+
+// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method.
+func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b }
+
+// Hijack implements the ResponseWriter.Hijack method.
+func (w *response) Hijack() { w.hijacked = true }
+
+// Close implements the ResponseWriter.Close method
+func (w *response) Close() error {
+ if w.closed {
+ return &Error{err: "connection already closed"}
+ }
+ w.closed = true
+
+ switch {
+ case w.udp != nil:
+ // Can't close the udp conn, as that is actually the listener.
+ return nil
+ case w.tcp != nil:
+ return w.tcp.Close()
+ default:
+ panic("dns: internal error: udp and tcp both nil")
+ }
+}
+
+// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
+func (w *response) ConnectionState() *tls.ConnectionState {
+ type tlsConnectionStater interface {
+ ConnectionState() tls.ConnectionState
+ }
+ if v, ok := w.tcp.(tlsConnectionStater); ok {
+ t := v.ConnectionState()
+ return &t
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
new file mode 100644
index 0000000..55cf1c3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/sig0.go
@@ -0,0 +1,209 @@
+package dns
+
+import (
+ "crypto"
+ "crypto/dsa"
+ "crypto/ecdsa"
+ "crypto/rsa"
+ "encoding/binary"
+ "math/big"
+ "strings"
+ "time"
+)
+
+// Sign signs a dns.Msg. It fills the signature with the appropriate data.
+// The SIG record should have the SignerName, KeyTag, Algorithm, Inception
+// and Expiration set.
+func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
+ if k == nil {
+ return nil, ErrPrivKey
+ }
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return nil, ErrKey
+ }
+
+ rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
+ rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
+
+ buf := make([]byte, m.Len()+Len(rr))
+ mbuf, err := m.PackBuffer(buf)
+ if err != nil {
+ return nil, err
+ }
+ if &buf[0] != &mbuf[0] {
+ return nil, ErrBuf
+ }
+ off, err := PackRR(rr, buf, len(mbuf), nil, false)
+ if err != nil {
+ return nil, err
+ }
+ buf = buf[:off:cap(buf)]
+
+ hash, ok := AlgorithmToHash[rr.Algorithm]
+ if !ok {
+ return nil, ErrAlg
+ }
+
+ hasher := hash.New()
+ // Write SIG rdata
+ hasher.Write(buf[len(mbuf)+1+2+2+4+2:])
+ // Write message
+ hasher.Write(buf[:len(mbuf)])
+
+ signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm)
+ if err != nil {
+ return nil, err
+ }
+
+ rr.Signature = toBase64(signature)
+
+ buf = append(buf, signature...)
+ if len(buf) > int(^uint16(0)) {
+ return nil, ErrBuf
+ }
+ // Adjust sig data length
+ rdoff := len(mbuf) + 1 + 2 + 2 + 4
+ rdlen := binary.BigEndian.Uint16(buf[rdoff:])
+ rdlen += uint16(len(signature))
+ binary.BigEndian.PutUint16(buf[rdoff:], rdlen)
+ // Adjust additional count
+ adc := binary.BigEndian.Uint16(buf[10:])
+ adc++
+ binary.BigEndian.PutUint16(buf[10:], adc)
+ return buf, nil
+}
+
+// Verify validates the message buf using the key k.
+// It's assumed that buf is a valid message from which rr was unpacked.
+func (rr *SIG) Verify(k *KEY, buf []byte) error {
+ if k == nil {
+ return ErrKey
+ }
+ if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
+ return ErrKey
+ }
+
+ var hash crypto.Hash
+ switch rr.Algorithm {
+ case DSA, RSASHA1:
+ hash = crypto.SHA1
+ case RSASHA256, ECDSAP256SHA256:
+ hash = crypto.SHA256
+ case ECDSAP384SHA384:
+ hash = crypto.SHA384
+ case RSASHA512:
+ hash = crypto.SHA512
+ default:
+ return ErrAlg
+ }
+ hasher := hash.New()
+
+ buflen := len(buf)
+ qdc := binary.BigEndian.Uint16(buf[4:])
+ anc := binary.BigEndian.Uint16(buf[6:])
+ auc := binary.BigEndian.Uint16(buf[8:])
+ adc := binary.BigEndian.Uint16(buf[10:])
+ offset := headerSize
+ var err error
+ for i := uint16(0); i < qdc && offset < buflen; i++ {
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip past Type and Class
+ offset += 2 + 2
+ }
+ for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ {
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip past Type, Class and TTL
+ offset += 2 + 2 + 4
+ if offset+1 >= buflen {
+ continue
+ }
+ rdlen := binary.BigEndian.Uint16(buf[offset:])
+ offset += 2
+ offset += int(rdlen)
+ }
+ if offset >= buflen {
+ return &Error{err: "overflowing unpacking signed message"}
+ }
+
+ // offset should be just prior to SIG
+ bodyend := offset
+ // owner name SHOULD be root
+ _, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // Skip Type, Class, TTL, RDLen
+ offset += 2 + 2 + 4 + 2
+ sigstart := offset
+ // Skip Type Covered, Algorithm, Labels, Original TTL
+ offset += 2 + 1 + 1 + 4
+ if offset+4+4 >= buflen {
+ return &Error{err: "overflow unpacking signed message"}
+ }
+ expire := binary.BigEndian.Uint32(buf[offset:])
+ offset += 4
+ incept := binary.BigEndian.Uint32(buf[offset:])
+ offset += 4
+ now := uint32(time.Now().Unix())
+ if now < incept || now > expire {
+ return ErrTime
+ }
+ // Skip key tag
+ offset += 2
+ var signername string
+ signername, offset, err = UnpackDomainName(buf, offset)
+ if err != nil {
+ return err
+ }
+ // If key has come from the DNS name compression might
+ // have mangled the case of the name
+ if !strings.EqualFold(signername, k.Header().Name) {
+ return &Error{err: "signer name doesn't match key name"}
+ }
+ sigend := offset
+ hasher.Write(buf[sigstart:sigend])
+ hasher.Write(buf[:10])
+ hasher.Write([]byte{
+ byte((adc - 1) << 8),
+ byte(adc - 1),
+ })
+ hasher.Write(buf[12:bodyend])
+
+ hashed := hasher.Sum(nil)
+ sig := buf[sigend:]
+ switch k.Algorithm {
+ case DSA:
+ pk := k.publicKeyDSA()
+ sig = sig[1:]
+ r := new(big.Int).SetBytes(sig[:len(sig)/2])
+ s := new(big.Int).SetBytes(sig[len(sig)/2:])
+ if pk != nil {
+ if dsa.Verify(pk, hashed, r, s) {
+ return nil
+ }
+ return ErrSig
+ }
+ case RSASHA1, RSASHA256, RSASHA512:
+ pk := k.publicKeyRSA()
+ if pk != nil {
+ return rsa.VerifyPKCS1v15(pk, hash, hashed, sig)
+ }
+ case ECDSAP256SHA256, ECDSAP384SHA384:
+ pk := k.publicKeyECDSA()
+ r := new(big.Int).SetBytes(sig[:len(sig)/2])
+ s := new(big.Int).SetBytes(sig[len(sig)/2:])
+ if pk != nil {
+ if ecdsa.Verify(pk, hashed, r, s) {
+ return nil
+ }
+ return ErrSig
+ }
+ }
+ return ErrKeyAlg
+}
diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go
new file mode 100644
index 0000000..febcc30
--- /dev/null
+++ b/vendor/github.com/miekg/dns/singleinflight.go
@@ -0,0 +1,61 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Adapted for dns package usage by Miek Gieben.
+
+package dns
+
+import "sync"
+import "time"
+
+// call is an in-flight or completed singleflight.Do call
+type call struct {
+ wg sync.WaitGroup
+ val *Msg
+ rtt time.Duration
+ err error
+ dups int
+}
+
+// singleflight represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type singleflight struct {
+ sync.Mutex // protects m
+ m map[string]*call // lazily initialized
+
+ dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
+ g.Lock()
+ if g.m == nil {
+ g.m = make(map[string]*call)
+ }
+ if c, ok := g.m[key]; ok {
+ c.dups++
+ g.Unlock()
+ c.wg.Wait()
+ return c.val, c.rtt, c.err, true
+ }
+ c := new(call)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.Unlock()
+
+ c.val, c.rtt, c.err = fn()
+ c.wg.Done()
+
+ if !g.dontDeleteForTesting {
+ g.Lock()
+ delete(g.m, key)
+ g.Unlock()
+ }
+
+ return c.val, c.rtt, c.err, c.dups > 0
+}
diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go
new file mode 100644
index 0000000..89f09f0
--- /dev/null
+++ b/vendor/github.com/miekg/dns/smimea.go
@@ -0,0 +1,44 @@
+package dns
+
+import (
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/hex"
+)
+
+// Sign creates a SMIMEA record from an SSL certificate.
+func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
+ r.Hdr.Rrtype = TypeSMIMEA
+ r.Usage = uint8(usage)
+ r.Selector = uint8(selector)
+ r.MatchingType = uint8(matchingType)
+
+ r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
+ return err
+}
+
+// Verify verifies a SMIMEA record against an SSL certificate. If it is OK
+// a nil error is returned.
+func (r *SMIMEA) Verify(cert *x509.Certificate) error {
+ c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
+ if err != nil {
+ return err // Not also ErrSig?
+ }
+ if r.Certificate == c {
+ return nil
+ }
+ return ErrSig // ErrSig, really?
+}
+
+// SMIMEAName returns the ownername of a SMIMEA resource record as per the
+// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
+func SMIMEAName(email, domain string) (string, error) {
+ hasher := sha256.New()
+ hasher.Write([]byte(email))
+
+ // RFC Section 3: "The local-part is hashed using the SHA2-256
+ // algorithm with the hash truncated to 28 octets and
+ // represented in its hexadecimal representation to become the
+ // left-most label in the prepared domain name"
+ return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
+}
diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go
new file mode 100644
index 0000000..4e07983
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tlsa.go
@@ -0,0 +1,44 @@
+package dns
+
+import (
+ "crypto/x509"
+ "net"
+ "strconv"
+)
+
+// Sign creates a TLSA record from an SSL certificate.
+func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) {
+ r.Hdr.Rrtype = TypeTLSA
+ r.Usage = uint8(usage)
+ r.Selector = uint8(selector)
+ r.MatchingType = uint8(matchingType)
+
+ r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
+ return err
+}
+
+// Verify verifies a TLSA record against an SSL certificate. If it is OK
+// a nil error is returned.
+func (r *TLSA) Verify(cert *x509.Certificate) error {
+ c, err := CertificateToDANE(r.Selector, r.MatchingType, cert)
+ if err != nil {
+ return err // Not also ErrSig?
+ }
+ if r.Certificate == c {
+ return nil
+ }
+ return ErrSig // ErrSig, really?
+}
+
+// TLSAName returns the ownername of a TLSA resource record as per the
+// rules specified in RFC 6698, Section 3.
+func TLSAName(name, service, network string) (string, error) {
+ if !IsFqdn(name) {
+ return "", ErrFqdn
+ }
+ p, err := net.LookupPort(network, service)
+ if err != nil {
+ return "", err
+ }
+ return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil
+}
diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go
new file mode 100644
index 0000000..61efa24
--- /dev/null
+++ b/vendor/github.com/miekg/dns/tsig.go
@@ -0,0 +1,389 @@
+package dns
+
+import (
+ "crypto/hmac"
+ "crypto/md5"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "encoding/binary"
+ "encoding/hex"
+ "hash"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// HMAC hashing codes. These are transmitted as domain names.
+const (
+ HmacMD5 = "hmac-md5.sig-alg.reg.int."
+ HmacSHA1 = "hmac-sha1."
+ HmacSHA256 = "hmac-sha256."
+ HmacSHA512 = "hmac-sha512."
+)
+
+// TSIG is the RR the holds the transaction signature of a message.
+// See RFC 2845 and RFC 4635.
+type TSIG struct {
+ Hdr RR_Header
+ Algorithm string `dns:"domain-name"`
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+ MACSize uint16
+ MAC string `dns:"size-hex:MACSize"`
+ OrigId uint16
+ Error uint16
+ OtherLen uint16
+ OtherData string `dns:"size-hex:OtherLen"`
+}
+
+// TSIG has no official presentation format, but this will suffice.
+
+func (rr *TSIG) String() string {
+ s := "\n;; TSIG PSEUDOSECTION:\n; " // add another semi-colon to signify TSIG does not have a presentation format
+ s += rr.Hdr.String() +
+ " " + rr.Algorithm +
+ " " + tsigTimeToString(rr.TimeSigned) +
+ " " + strconv.Itoa(int(rr.Fudge)) +
+ " " + strconv.Itoa(int(rr.MACSize)) +
+ " " + strings.ToUpper(rr.MAC) +
+ " " + strconv.Itoa(int(rr.OrigId)) +
+ " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR
+ " " + strconv.Itoa(int(rr.OtherLen)) +
+ " " + rr.OtherData
+ return s
+}
+
+func (rr *TSIG) parse(c *zlexer, origin string) *ParseError {
+ panic("dns: internal error: parse should never be called on TSIG")
+}
+
+// The following values must be put in wireformat, so that the MAC can be calculated.
+// RFC 2845, section 3.4.2. TSIG Variables.
+type tsigWireFmt struct {
+ // From RR_Header
+ Name string `dns:"domain-name"`
+ Class uint16
+ Ttl uint32
+ // Rdata of the TSIG
+ Algorithm string `dns:"domain-name"`
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+ // MACSize, MAC and OrigId excluded
+ Error uint16
+ OtherLen uint16
+ OtherData string `dns:"size-hex:OtherLen"`
+}
+
+// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC
+type macWireFmt struct {
+ MACSize uint16
+ MAC string `dns:"size-hex:MACSize"`
+}
+
+// 3.3. Time values used in TSIG calculations
+type timerWireFmt struct {
+ TimeSigned uint64 `dns:"uint48"`
+ Fudge uint16
+}
+
+// TsigGenerate fills out the TSIG record attached to the message.
+// The message should contain
+// a "stub" TSIG RR with the algorithm, key name (owner name of the RR),
+// time fudge (defaults to 300 seconds) and the current time
+// The TSIG MAC is saved in that Tsig RR.
+// When TsigGenerate is called for the first time requestMAC is set to the empty string and
+// timersOnly is false.
+// If something goes wrong an error is returned, otherwise it is nil.
+func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) {
+ if m.IsTsig() == nil {
+ panic("dns: TSIG not last RR in additional")
+ }
+ // If we barf here, the caller is to blame
+ rawsecret, err := fromBase64([]byte(secret))
+ if err != nil {
+ return nil, "", err
+ }
+
+ rr := m.Extra[len(m.Extra)-1].(*TSIG)
+ m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg
+ mbuf, err := m.Pack()
+ if err != nil {
+ return nil, "", err
+ }
+ buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly)
+
+ t := new(TSIG)
+ var h hash.Hash
+ switch strings.ToLower(rr.Algorithm) {
+ case HmacMD5:
+ h = hmac.New(md5.New, rawsecret)
+ case HmacSHA1:
+ h = hmac.New(sha1.New, rawsecret)
+ case HmacSHA256:
+ h = hmac.New(sha256.New, rawsecret)
+ case HmacSHA512:
+ h = hmac.New(sha512.New, rawsecret)
+ default:
+ return nil, "", ErrKeyAlg
+ }
+ h.Write(buf)
+ t.MAC = hex.EncodeToString(h.Sum(nil))
+ t.MACSize = uint16(len(t.MAC) / 2) // Size is half!
+
+ t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0}
+ t.Fudge = rr.Fudge
+ t.TimeSigned = rr.TimeSigned
+ t.Algorithm = rr.Algorithm
+ t.OrigId = m.Id
+
+ tbuf := make([]byte, Len(t))
+ off, err := PackRR(t, tbuf, 0, nil, false)
+ if err != nil {
+ return nil, "", err
+ }
+ mbuf = append(mbuf, tbuf[:off]...)
+ // Update the ArCount directly in the buffer.
+ binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))
+
+ return mbuf, t.MAC, nil
+}
+
+// TsigVerify verifies the TSIG on a message.
+// If the signature does not validate err contains the
+// error, otherwise it is nil.
+func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error {
+ rawsecret, err := fromBase64([]byte(secret))
+ if err != nil {
+ return err
+ }
+ // Strip the TSIG from the incoming msg
+ stripped, tsig, err := stripTsig(msg)
+ if err != nil {
+ return err
+ }
+
+ msgMAC, err := hex.DecodeString(tsig.MAC)
+ if err != nil {
+ return err
+ }
+
+ buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly)
+
+ // Fudge factor works both ways. A message can arrive before it was signed because
+ // of clock skew.
+ now := uint64(time.Now().Unix())
+ ti := now - tsig.TimeSigned
+ if now < tsig.TimeSigned {
+ ti = tsig.TimeSigned - now
+ }
+ if uint64(tsig.Fudge) < ti {
+ return ErrTime
+ }
+
+ var h hash.Hash
+ switch strings.ToLower(tsig.Algorithm) {
+ case HmacMD5:
+ h = hmac.New(md5.New, rawsecret)
+ case HmacSHA1:
+ h = hmac.New(sha1.New, rawsecret)
+ case HmacSHA256:
+ h = hmac.New(sha256.New, rawsecret)
+ case HmacSHA512:
+ h = hmac.New(sha512.New, rawsecret)
+ default:
+ return ErrKeyAlg
+ }
+ h.Write(buf)
+ if !hmac.Equal(h.Sum(nil), msgMAC) {
+ return ErrSig
+ }
+ return nil
+}
+
+// Create a wiredata buffer for the MAC calculation.
+func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte {
+ var buf []byte
+ if rr.TimeSigned == 0 {
+ rr.TimeSigned = uint64(time.Now().Unix())
+ }
+ if rr.Fudge == 0 {
+ rr.Fudge = 300 // Standard (RFC) default.
+ }
+
+ // Replace message ID in header with original ID from TSIG
+ binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId)
+
+ if requestMAC != "" {
+ m := new(macWireFmt)
+ m.MACSize = uint16(len(requestMAC) / 2)
+ m.MAC = requestMAC
+ buf = make([]byte, len(requestMAC)) // long enough
+ n, _ := packMacWire(m, buf)
+ buf = buf[:n]
+ }
+
+ tsigvar := make([]byte, DefaultMsgSize)
+ if timersOnly {
+ tsig := new(timerWireFmt)
+ tsig.TimeSigned = rr.TimeSigned
+ tsig.Fudge = rr.Fudge
+ n, _ := packTimerWire(tsig, tsigvar)
+ tsigvar = tsigvar[:n]
+ } else {
+ tsig := new(tsigWireFmt)
+ tsig.Name = strings.ToLower(rr.Hdr.Name)
+ tsig.Class = ClassANY
+ tsig.Ttl = rr.Hdr.Ttl
+ tsig.Algorithm = strings.ToLower(rr.Algorithm)
+ tsig.TimeSigned = rr.TimeSigned
+ tsig.Fudge = rr.Fudge
+ tsig.Error = rr.Error
+ tsig.OtherLen = rr.OtherLen
+ tsig.OtherData = rr.OtherData
+ n, _ := packTsigWire(tsig, tsigvar)
+ tsigvar = tsigvar[:n]
+ }
+
+ if requestMAC != "" {
+ x := append(buf, msgbuf...)
+ buf = append(x, tsigvar...)
+ } else {
+ buf = append(msgbuf, tsigvar...)
+ }
+ return buf
+}
+
+// Strip the TSIG from the raw message.
+func stripTsig(msg []byte) ([]byte, *TSIG, error) {
+ // Copied from msg.go's Unpack() Header, but modified.
+ var (
+ dh Header
+ err error
+ )
+ off, tsigoff := 0, 0
+
+ if dh, off, err = unpackMsgHdr(msg, off); err != nil {
+ return nil, nil, err
+ }
+ if dh.Arcount == 0 {
+ return nil, nil, ErrNoSig
+ }
+
+ // Rcode, see msg.go Unpack()
+ if int(dh.Bits&0xF) == RcodeNotAuth {
+ return nil, nil, ErrAuth
+ }
+
+ for i := 0; i < int(dh.Qdcount); i++ {
+ _, off, err = unpackQuestion(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ _, off, err = unpackRRslice(int(dh.Ancount), msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ _, off, err = unpackRRslice(int(dh.Nscount), msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ rr := new(TSIG)
+ var extra RR
+ for i := 0; i < int(dh.Arcount); i++ {
+ tsigoff = off
+ extra, off, err = UnpackRR(msg, off)
+ if err != nil {
+ return nil, nil, err
+ }
+ if extra.Header().Rrtype == TypeTSIG {
+ rr = extra.(*TSIG)
+ // Adjust Arcount.
+ arcount := binary.BigEndian.Uint16(msg[10:])
+ binary.BigEndian.PutUint16(msg[10:], arcount-1)
+ break
+ }
+ }
+ if rr == nil {
+ return nil, nil, ErrNoSig
+ }
+ return msg[:tsigoff], rr, nil
+}
+
+// Translate the TSIG time signed into a date. There is no
+// need for RFC1982 calculations as this date is 48 bits.
+func tsigTimeToString(t uint64) string {
+ ti := time.Unix(int64(t), 0).UTC()
+ return ti.Format("20060102150405")
+}
+
+func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) {
+ // copied from zmsg.go TSIG packing
+ // RR_Header
+ off, err := PackDomainName(tw.Name, msg, 0, nil, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.Class, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(tw.Ttl, msg, off)
+ if err != nil {
+ return off, err
+ }
+
+ off, err = PackDomainName(tw.Algorithm, msg, off, nil, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint48(tw.TimeSigned, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.Fudge, msg, off)
+ if err != nil {
+ return off, err
+ }
+
+ off, err = packUint16(tw.Error, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.OtherLen, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(tw.OtherData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func packMacWire(mw *macWireFmt, msg []byte) (int, error) {
+ off, err := packUint16(mw.MACSize, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(mw.MAC, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) {
+ off, err := packUint48(tw.TimeSigned, msg, 0)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(tw.Fudge, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
new file mode 100644
index 0000000..a6048cb
--- /dev/null
+++ b/vendor/github.com/miekg/dns/types.go
@@ -0,0 +1,1527 @@
+package dns
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type (
+ // Type is a DNS type.
+ Type uint16
+ // Class is a DNS class.
+ Class uint16
+ // Name is a DNS domain name.
+ Name string
+)
+
+// Packet formats
+
+// Wire constants and supported types.
+const (
+ // valid RR_Header.Rrtype and Question.qtype
+
+ TypeNone uint16 = 0
+ TypeA uint16 = 1
+ TypeNS uint16 = 2
+ TypeMD uint16 = 3
+ TypeMF uint16 = 4
+ TypeCNAME uint16 = 5
+ TypeSOA uint16 = 6
+ TypeMB uint16 = 7
+ TypeMG uint16 = 8
+ TypeMR uint16 = 9
+ TypeNULL uint16 = 10
+ TypePTR uint16 = 12
+ TypeHINFO uint16 = 13
+ TypeMINFO uint16 = 14
+ TypeMX uint16 = 15
+ TypeTXT uint16 = 16
+ TypeRP uint16 = 17
+ TypeAFSDB uint16 = 18
+ TypeX25 uint16 = 19
+ TypeISDN uint16 = 20
+ TypeRT uint16 = 21
+ TypeNSAPPTR uint16 = 23
+ TypeSIG uint16 = 24
+ TypeKEY uint16 = 25
+ TypePX uint16 = 26
+ TypeGPOS uint16 = 27
+ TypeAAAA uint16 = 28
+ TypeLOC uint16 = 29
+ TypeNXT uint16 = 30
+ TypeEID uint16 = 31
+ TypeNIMLOC uint16 = 32
+ TypeSRV uint16 = 33
+ TypeATMA uint16 = 34
+ TypeNAPTR uint16 = 35
+ TypeKX uint16 = 36
+ TypeCERT uint16 = 37
+ TypeDNAME uint16 = 39
+ TypeOPT uint16 = 41 // EDNS
+ TypeAPL uint16 = 42
+ TypeDS uint16 = 43
+ TypeSSHFP uint16 = 44
+ TypeRRSIG uint16 = 46
+ TypeNSEC uint16 = 47
+ TypeDNSKEY uint16 = 48
+ TypeDHCID uint16 = 49
+ TypeNSEC3 uint16 = 50
+ TypeNSEC3PARAM uint16 = 51
+ TypeTLSA uint16 = 52
+ TypeSMIMEA uint16 = 53
+ TypeHIP uint16 = 55
+ TypeNINFO uint16 = 56
+ TypeRKEY uint16 = 57
+ TypeTALINK uint16 = 58
+ TypeCDS uint16 = 59
+ TypeCDNSKEY uint16 = 60
+ TypeOPENPGPKEY uint16 = 61
+ TypeCSYNC uint16 = 62
+ TypeSPF uint16 = 99
+ TypeUINFO uint16 = 100
+ TypeUID uint16 = 101
+ TypeGID uint16 = 102
+ TypeUNSPEC uint16 = 103
+ TypeNID uint16 = 104
+ TypeL32 uint16 = 105
+ TypeL64 uint16 = 106
+ TypeLP uint16 = 107
+ TypeEUI48 uint16 = 108
+ TypeEUI64 uint16 = 109
+ TypeURI uint16 = 256
+ TypeCAA uint16 = 257
+ TypeAVC uint16 = 258
+
+ TypeTKEY uint16 = 249
+ TypeTSIG uint16 = 250
+
+ // valid Question.Qtype only
+ TypeIXFR uint16 = 251
+ TypeAXFR uint16 = 252
+ TypeMAILB uint16 = 253
+ TypeMAILA uint16 = 254
+ TypeANY uint16 = 255
+
+ TypeTA uint16 = 32768
+ TypeDLV uint16 = 32769
+ TypeReserved uint16 = 65535
+
+ // valid Question.Qclass
+ ClassINET = 1
+ ClassCSNET = 2
+ ClassCHAOS = 3
+ ClassHESIOD = 4
+ ClassNONE = 254
+ ClassANY = 255
+
+ // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
+ RcodeSuccess = 0 // NoError - No Error [DNS]
+ RcodeFormatError = 1 // FormErr - Format Error [DNS]
+ RcodeServerFailure = 2 // ServFail - Server Failure [DNS]
+ RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS]
+ RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS]
+ RcodeRefused = 5 // Refused - Query Refused [DNS]
+ RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update]
+ RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update]
+ RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update]
+ RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update]
+ RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG]
+ RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG]
+ RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0]
+ RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG]
+ RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG]
+ RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY]
+ RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY]
+ RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY]
+ RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG]
+ RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies]
+
+ // Message Opcodes. There is no 3.
+ OpcodeQuery = 0
+ OpcodeIQuery = 1
+ OpcodeStatus = 2
+ OpcodeNotify = 4
+ OpcodeUpdate = 5
+)
+
+// Header is the wire format for the DNS packet header.
+type Header struct {
+ Id uint16
+ Bits uint16
+ Qdcount, Ancount, Nscount, Arcount uint16
+}
+
+const (
+ headerSize = 12
+
+ // Header.Bits
+ _QR = 1 << 15 // query/response (response=1)
+ _AA = 1 << 10 // authoritative
+ _TC = 1 << 9 // truncated
+ _RD = 1 << 8 // recursion desired
+ _RA = 1 << 7 // recursion available
+ _Z = 1 << 6 // Z
+ _AD = 1 << 5 // authticated data
+ _CD = 1 << 4 // checking disabled
+)
+
+// Various constants used in the LOC RR, See RFC 1887.
+const (
+ LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
+ LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
+ LOC_HOURS = 60 * 1000
+ LOC_DEGREES = 60 * LOC_HOURS
+ LOC_ALTITUDEBASE = 100000
+)
+
+// Different Certificate Types, see RFC 4398, Section 2.1
+const (
+ CertPKIX = 1 + iota
+ CertSPKI
+ CertPGP
+ CertIPIX
+ CertISPKI
+ CertIPGP
+ CertACPKIX
+ CertIACPKIX
+ CertURI = 253
+ CertOID = 254
+)
+
+// CertTypeToString converts the Cert Type to its string representation.
+// See RFC 4398 and RFC 6944.
+var CertTypeToString = map[uint16]string{
+ CertPKIX: "PKIX",
+ CertSPKI: "SPKI",
+ CertPGP: "PGP",
+ CertIPIX: "IPIX",
+ CertISPKI: "ISPKI",
+ CertIPGP: "IPGP",
+ CertACPKIX: "ACPKIX",
+ CertIACPKIX: "IACPKIX",
+ CertURI: "URI",
+ CertOID: "OID",
+}
+
+//go:generate go run types_generate.go
+
+// Question holds a DNS question. There can be multiple questions in the
+// question section of a message. Usually there is just one.
+type Question struct {
+ Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed)
+ Qtype uint16
+ Qclass uint16
+}
+
+func (q *Question) len(off int, compression map[string]struct{}) int {
+ l := domainNameLen(q.Name, off, compression, true)
+ l += 2 + 2
+ return l
+}
+
+func (q *Question) String() (s string) {
+ // prefix with ; (as in dig)
+ s = ";" + sprintName(q.Name) + "\t"
+ s += Class(q.Qclass).String() + "\t"
+ s += " " + Type(q.Qtype).String()
+ return s
+}
+
+// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY
+// is named "*" there.
+type ANY struct {
+ Hdr RR_Header
+ // Does not have any rdata
+}
+
+func (rr *ANY) String() string { return rr.Hdr.String() }
+
+func (rr *ANY) parse(c *zlexer, origin string) *ParseError {
+ panic("dns: internal error: parse should never be called on ANY")
+}
+
+// NULL RR. See RFC 1035.
+type NULL struct {
+ Hdr RR_Header
+ Data string `dns:"any"`
+}
+
+func (rr *NULL) String() string {
+ // There is no presentation format; prefix string with a comment.
+ return ";" + rr.Hdr.String() + rr.Data
+}
+
+func (rr *NULL) parse(c *zlexer, origin string) *ParseError {
+ panic("dns: internal error: parse should never be called on NULL")
+}
+
+// CNAME RR. See RFC 1034.
+type CNAME struct {
+ Hdr RR_Header
+ Target string `dns:"cdomain-name"`
+}
+
+func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) }
+
+// HINFO RR. See RFC 1034.
+type HINFO struct {
+ Hdr RR_Header
+ Cpu string
+ Os string
+}
+
+func (rr *HINFO) String() string {
+ return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os})
+}
+
+// MB RR. See RFC 1035.
+type MB struct {
+ Hdr RR_Header
+ Mb string `dns:"cdomain-name"`
+}
+
+func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) }
+
+// MG RR. See RFC 1035.
+type MG struct {
+ Hdr RR_Header
+ Mg string `dns:"cdomain-name"`
+}
+
+func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) }
+
+// MINFO RR. See RFC 1035.
+type MINFO struct {
+ Hdr RR_Header
+ Rmail string `dns:"cdomain-name"`
+ Email string `dns:"cdomain-name"`
+}
+
+func (rr *MINFO) String() string {
+ return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email)
+}
+
+// MR RR. See RFC 1035.
+type MR struct {
+ Hdr RR_Header
+ Mr string `dns:"cdomain-name"`
+}
+
+func (rr *MR) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mr)
+}
+
+// MF RR. See RFC 1035.
+type MF struct {
+ Hdr RR_Header
+ Mf string `dns:"cdomain-name"`
+}
+
+func (rr *MF) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mf)
+}
+
+// MD RR. See RFC 1035.
+type MD struct {
+ Hdr RR_Header
+ Md string `dns:"cdomain-name"`
+}
+
+func (rr *MD) String() string {
+ return rr.Hdr.String() + sprintName(rr.Md)
+}
+
+// MX RR. See RFC 1035.
+type MX struct {
+ Hdr RR_Header
+ Preference uint16
+ Mx string `dns:"cdomain-name"`
+}
+
+func (rr *MX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx)
+}
+
+// AFSDB RR. See RFC 1183.
+type AFSDB struct {
+ Hdr RR_Header
+ Subtype uint16
+ Hostname string `dns:"domain-name"`
+}
+
+func (rr *AFSDB) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname)
+}
+
+// X25 RR. See RFC 1183, Section 3.1.
+type X25 struct {
+ Hdr RR_Header
+ PSDNAddress string
+}
+
+func (rr *X25) String() string {
+ return rr.Hdr.String() + rr.PSDNAddress
+}
+
+// RT RR. See RFC 1183, Section 3.3.
+type RT struct {
+ Hdr RR_Header
+ Preference uint16
+ Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035.
+}
+
+func (rr *RT) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host)
+}
+
+// NS RR. See RFC 1035.
+type NS struct {
+ Hdr RR_Header
+ Ns string `dns:"cdomain-name"`
+}
+
+func (rr *NS) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ns)
+}
+
+// PTR RR. See RFC 1035.
+type PTR struct {
+ Hdr RR_Header
+ Ptr string `dns:"cdomain-name"`
+}
+
+func (rr *PTR) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ptr)
+}
+
+// RP RR. See RFC 1138, Section 2.2.
+type RP struct {
+ Hdr RR_Header
+ Mbox string `dns:"domain-name"`
+ Txt string `dns:"domain-name"`
+}
+
+func (rr *RP) String() string {
+ return rr.Hdr.String() + sprintName(rr.Mbox) + " " + sprintName(rr.Txt)
+}
+
+// SOA RR. See RFC 1035.
+type SOA struct {
+ Hdr RR_Header
+ Ns string `dns:"cdomain-name"`
+ Mbox string `dns:"cdomain-name"`
+ Serial uint32
+ Refresh uint32
+ Retry uint32
+ Expire uint32
+ Minttl uint32
+}
+
+func (rr *SOA) String() string {
+ return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) +
+ " " + strconv.FormatInt(int64(rr.Serial), 10) +
+ " " + strconv.FormatInt(int64(rr.Refresh), 10) +
+ " " + strconv.FormatInt(int64(rr.Retry), 10) +
+ " " + strconv.FormatInt(int64(rr.Expire), 10) +
+ " " + strconv.FormatInt(int64(rr.Minttl), 10)
+}
+
+// TXT RR. See RFC 1035.
+type TXT struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+func sprintName(s string) string {
+ var dst strings.Builder
+
+ for i := 0; i < len(s); {
+ if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
+ if dst.Len() != 0 {
+ dst.WriteString(s[i : i+2])
+ }
+ i += 2
+ continue
+ }
+
+ b, n := nextByte(s, i)
+ if n == 0 {
+ i++
+ continue
+ }
+ if b == '.' {
+ if dst.Len() != 0 {
+ dst.WriteByte('.')
+ }
+ i += n
+ continue
+ }
+ switch b {
+ case ' ', '\'', '@', ';', '(', ')', '"', '\\': // additional chars to escape
+ if dst.Len() == 0 {
+ dst.Grow(len(s) * 2)
+ dst.WriteString(s[:i])
+ }
+ dst.WriteByte('\\')
+ dst.WriteByte(b)
+ default:
+ if ' ' <= b && b <= '~' {
+ if dst.Len() != 0 {
+ dst.WriteByte(b)
+ }
+ } else {
+ if dst.Len() == 0 {
+ dst.Grow(len(s) * 2)
+ dst.WriteString(s[:i])
+ }
+ dst.WriteString(escapeByte(b))
+ }
+ }
+ i += n
+ }
+ if dst.Len() == 0 {
+ return s
+ }
+ return dst.String()
+}
+
+func sprintTxtOctet(s string) string {
+ var dst strings.Builder
+ dst.Grow(2 + len(s))
+ dst.WriteByte('"')
+ for i := 0; i < len(s); {
+ if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
+ dst.WriteString(s[i : i+2])
+ i += 2
+ continue
+ }
+
+ b, n := nextByte(s, i)
+ switch {
+ case n == 0:
+ i++ // dangling back slash
+ case b == '.':
+ dst.WriteByte('.')
+ case b < ' ' || b > '~':
+ dst.WriteString(escapeByte(b))
+ default:
+ dst.WriteByte(b)
+ }
+ i += n
+ }
+ dst.WriteByte('"')
+ return dst.String()
+}
+
+func sprintTxt(txt []string) string {
+ var out strings.Builder
+ for i, s := range txt {
+ out.Grow(3 + len(s))
+ if i > 0 {
+ out.WriteString(` "`)
+ } else {
+ out.WriteByte('"')
+ }
+ for j := 0; j < len(s); {
+ b, n := nextByte(s, j)
+ if n == 0 {
+ break
+ }
+ writeTXTStringByte(&out, b)
+ j += n
+ }
+ out.WriteByte('"')
+ }
+ return out.String()
+}
+
+func writeTXTStringByte(s *strings.Builder, b byte) {
+ switch {
+ case b == '"' || b == '\\':
+ s.WriteByte('\\')
+ s.WriteByte(b)
+ case b < ' ' || b > '~':
+ s.WriteString(escapeByte(b))
+ default:
+ s.WriteByte(b)
+ }
+}
+
+const (
+ escapedByteSmall = "" +
+ `\000\001\002\003\004\005\006\007\008\009` +
+ `\010\011\012\013\014\015\016\017\018\019` +
+ `\020\021\022\023\024\025\026\027\028\029` +
+ `\030\031`
+ escapedByteLarge = `\127\128\129` +
+ `\130\131\132\133\134\135\136\137\138\139` +
+ `\140\141\142\143\144\145\146\147\148\149` +
+ `\150\151\152\153\154\155\156\157\158\159` +
+ `\160\161\162\163\164\165\166\167\168\169` +
+ `\170\171\172\173\174\175\176\177\178\179` +
+ `\180\181\182\183\184\185\186\187\188\189` +
+ `\190\191\192\193\194\195\196\197\198\199` +
+ `\200\201\202\203\204\205\206\207\208\209` +
+ `\210\211\212\213\214\215\216\217\218\219` +
+ `\220\221\222\223\224\225\226\227\228\229` +
+ `\230\231\232\233\234\235\236\237\238\239` +
+ `\240\241\242\243\244\245\246\247\248\249` +
+ `\250\251\252\253\254\255`
+)
+
+// escapeByte returns the \DDD escaping of b which must
+// satisfy b < ' ' || b > '~'.
+func escapeByte(b byte) string {
+ if b < ' ' {
+ return escapedByteSmall[b*4 : b*4+4]
+ }
+
+ b -= '~' + 1
+ // The cast here is needed as b*4 may overflow byte.
+ return escapedByteLarge[int(b)*4 : int(b)*4+4]
+}
+
+func nextByte(s string, offset int) (byte, int) {
+ if offset >= len(s) {
+ return 0, 0
+ }
+ if s[offset] != '\\' {
+ // not an escape sequence
+ return s[offset], 1
+ }
+ switch len(s) - offset {
+ case 1: // dangling escape
+ return 0, 0
+ case 2, 3: // too short to be \ddd
+ default: // maybe \ddd
+ if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) {
+ return dddStringToByte(s[offset+1:]), 4
+ }
+ }
+ // not \ddd, just an RFC 1035 "quoted" character
+ return s[offset+1], 2
+}
+
+// SPF RR. See RFC 4408, Section 3.1.1.
+type SPF struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template.
+type AVC struct {
+ Hdr RR_Header
+ Txt []string `dns:"txt"`
+}
+
+func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
+
+// SRV RR. See RFC 2782.
+type SRV struct {
+ Hdr RR_Header
+ Priority uint16
+ Weight uint16
+ Port uint16
+ Target string `dns:"domain-name"`
+}
+
+func (rr *SRV) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Priority)) + " " +
+ strconv.Itoa(int(rr.Weight)) + " " +
+ strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target)
+}
+
+// NAPTR RR. See RFC 2915.
+type NAPTR struct {
+ Hdr RR_Header
+ Order uint16
+ Preference uint16
+ Flags string
+ Service string
+ Regexp string
+ Replacement string `dns:"domain-name"`
+}
+
+func (rr *NAPTR) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Order)) + " " +
+ strconv.Itoa(int(rr.Preference)) + " " +
+ "\"" + rr.Flags + "\" " +
+ "\"" + rr.Service + "\" " +
+ "\"" + rr.Regexp + "\" " +
+ rr.Replacement
+}
+
+// CERT RR. See RFC 4398.
+type CERT struct {
+ Hdr RR_Header
+ Type uint16
+ KeyTag uint16
+ Algorithm uint8
+ Certificate string `dns:"base64"`
+}
+
+func (rr *CERT) String() string {
+ var (
+ ok bool
+ certtype, algorithm string
+ )
+ if certtype, ok = CertTypeToString[rr.Type]; !ok {
+ certtype = strconv.Itoa(int(rr.Type))
+ }
+ if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok {
+ algorithm = strconv.Itoa(int(rr.Algorithm))
+ }
+ return rr.Hdr.String() + certtype +
+ " " + strconv.Itoa(int(rr.KeyTag)) +
+ " " + algorithm +
+ " " + rr.Certificate
+}
+
+// DNAME RR. See RFC 2672.
+type DNAME struct {
+ Hdr RR_Header
+ Target string `dns:"domain-name"`
+}
+
+func (rr *DNAME) String() string {
+ return rr.Hdr.String() + sprintName(rr.Target)
+}
+
+// A RR. See RFC 1035.
+type A struct {
+ Hdr RR_Header
+ A net.IP `dns:"a"`
+}
+
+func (rr *A) String() string {
+ if rr.A == nil {
+ return rr.Hdr.String()
+ }
+ return rr.Hdr.String() + rr.A.String()
+}
+
+// AAAA RR. See RFC 3596.
+type AAAA struct {
+ Hdr RR_Header
+ AAAA net.IP `dns:"aaaa"`
+}
+
+func (rr *AAAA) String() string {
+ if rr.AAAA == nil {
+ return rr.Hdr.String()
+ }
+ return rr.Hdr.String() + rr.AAAA.String()
+}
+
+// PX RR. See RFC 2163.
+type PX struct {
+ Hdr RR_Header
+ Preference uint16
+ Map822 string `dns:"domain-name"`
+ Mapx400 string `dns:"domain-name"`
+}
+
+func (rr *PX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400)
+}
+
+// GPOS RR. See RFC 1712.
+type GPOS struct {
+ Hdr RR_Header
+ Longitude string
+ Latitude string
+ Altitude string
+}
+
+func (rr *GPOS) String() string {
+ return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
+}
+
+// LOC RR. See RFC RFC 1876.
+type LOC struct {
+ Hdr RR_Header
+ Version uint8
+ Size uint8
+ HorizPre uint8
+ VertPre uint8
+ Latitude uint32
+ Longitude uint32
+ Altitude uint32
+}
+
+// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent
+// format and returns a string in m (two decimals for the cm)
+func cmToM(m, e uint8) string {
+ if e < 2 {
+ if e == 1 {
+ m *= 10
+ }
+
+ return fmt.Sprintf("0.%02d", m)
+ }
+
+ s := fmt.Sprintf("%d", m)
+ for e > 2 {
+ s += "0"
+ e--
+ }
+ return s
+}
+
+func (rr *LOC) String() string {
+ s := rr.Hdr.String()
+
+ lat := rr.Latitude
+ ns := "N"
+ if lat > LOC_EQUATOR {
+ lat = lat - LOC_EQUATOR
+ } else {
+ ns = "S"
+ lat = LOC_EQUATOR - lat
+ }
+ h := lat / LOC_DEGREES
+ lat = lat % LOC_DEGREES
+ m := lat / LOC_HOURS
+ lat = lat % LOC_HOURS
+ s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lat)/1000, ns)
+
+ lon := rr.Longitude
+ ew := "E"
+ if lon > LOC_PRIMEMERIDIAN {
+ lon = lon - LOC_PRIMEMERIDIAN
+ } else {
+ ew = "W"
+ lon = LOC_PRIMEMERIDIAN - lon
+ }
+ h = lon / LOC_DEGREES
+ lon = lon % LOC_DEGREES
+ m = lon / LOC_HOURS
+ lon = lon % LOC_HOURS
+ s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lon)/1000, ew)
+
+ var alt = float64(rr.Altitude) / 100
+ alt -= LOC_ALTITUDEBASE
+ if rr.Altitude%100 != 0 {
+ s += fmt.Sprintf("%.2fm ", alt)
+ } else {
+ s += fmt.Sprintf("%.0fm ", alt)
+ }
+
+ s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m "
+ s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m "
+ s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m"
+
+ return s
+}
+
+// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931.
+type SIG struct {
+ RRSIG
+}
+
+// RRSIG RR. See RFC 4034 and RFC 3755.
+type RRSIG struct {
+ Hdr RR_Header
+ TypeCovered uint16
+ Algorithm uint8
+ Labels uint8
+ OrigTtl uint32
+ Expiration uint32
+ Inception uint32
+ KeyTag uint16
+ SignerName string `dns:"domain-name"`
+ Signature string `dns:"base64"`
+}
+
+func (rr *RRSIG) String() string {
+ s := rr.Hdr.String()
+ s += Type(rr.TypeCovered).String()
+ s += " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.Labels)) +
+ " " + strconv.FormatInt(int64(rr.OrigTtl), 10) +
+ " " + TimeToString(rr.Expiration) +
+ " " + TimeToString(rr.Inception) +
+ " " + strconv.Itoa(int(rr.KeyTag)) +
+ " " + sprintName(rr.SignerName) +
+ " " + rr.Signature
+ return s
+}
+
+// NSEC RR. See RFC 4034 and RFC 3755.
+type NSEC struct {
+ Hdr RR_Header
+ NextDomain string `dns:"domain-name"`
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *NSEC) String() string {
+ s := rr.Hdr.String() + sprintName(rr.NextDomain)
+ for _, t := range rr.TypeBitMap {
+ s += " " + Type(t).String()
+ }
+ return s
+}
+
+func (rr *NSEC) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.NextDomain, off+l, compression, false)
+ l += typeBitMapLen(rr.TypeBitMap)
+ return l
+}
+
+// DLV RR. See RFC 4431.
+type DLV struct{ DS }
+
+// CDS RR. See RFC 7344.
+type CDS struct{ DS }
+
+// DS RR. See RFC 4034 and RFC 3658.
+type DS struct {
+ Hdr RR_Header
+ KeyTag uint16
+ Algorithm uint8
+ DigestType uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *DS) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.DigestType)) +
+ " " + strings.ToUpper(rr.Digest)
+}
+
+// KX RR. See RFC 2230.
+type KX struct {
+ Hdr RR_Header
+ Preference uint16
+ Exchanger string `dns:"domain-name"`
+}
+
+func (rr *KX) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
+ " " + sprintName(rr.Exchanger)
+}
+
+// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf.
+type TA struct {
+ Hdr RR_Header
+ KeyTag uint16
+ Algorithm uint8
+ DigestType uint8
+ Digest string `dns:"hex"`
+}
+
+func (rr *TA) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.DigestType)) +
+ " " + strings.ToUpper(rr.Digest)
+}
+
+// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template.
+type TALINK struct {
+ Hdr RR_Header
+ PreviousName string `dns:"domain-name"`
+ NextName string `dns:"domain-name"`
+}
+
+func (rr *TALINK) String() string {
+ return rr.Hdr.String() +
+ sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
+}
+
+// SSHFP RR. See RFC RFC 4255.
+type SSHFP struct {
+ Hdr RR_Header
+ Algorithm uint8
+ Type uint8
+ FingerPrint string `dns:"hex"`
+}
+
+func (rr *SSHFP) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) +
+ " " + strconv.Itoa(int(rr.Type)) +
+ " " + strings.ToUpper(rr.FingerPrint)
+}
+
+// KEY RR. See RFC RFC 2535.
+type KEY struct {
+ DNSKEY
+}
+
+// CDNSKEY RR. See RFC 7344.
+type CDNSKEY struct {
+ DNSKEY
+}
+
+// DNSKEY RR. See RFC 4034 and RFC 3755.
+type DNSKEY struct {
+ Hdr RR_Header
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *DNSKEY) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Protocol)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + rr.PublicKey
+}
+
+// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template.
+type RKEY struct {
+ Hdr RR_Header
+ Flags uint16
+ Protocol uint8
+ Algorithm uint8
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *RKEY) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Protocol)) +
+ " " + strconv.Itoa(int(rr.Algorithm)) +
+ " " + rr.PublicKey
+}
+
+// NSAPPTR RR. See RFC 1348.
+type NSAPPTR struct {
+ Hdr RR_Header
+ Ptr string `dns:"domain-name"`
+}
+
+func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) }
+
+// NSEC3 RR. See RFC 5155.
+type NSEC3 struct {
+ Hdr RR_Header
+ Hash uint8
+ Flags uint8
+ Iterations uint16
+ SaltLength uint8
+ Salt string `dns:"size-hex:SaltLength"`
+ HashLength uint8
+ NextDomain string `dns:"size-base32:HashLength"`
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *NSEC3) String() string {
+ s := rr.Hdr.String()
+ s += strconv.Itoa(int(rr.Hash)) +
+ " " + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Iterations)) +
+ " " + saltToString(rr.Salt) +
+ " " + rr.NextDomain
+ for _, t := range rr.TypeBitMap {
+ s += " " + Type(t).String()
+ }
+ return s
+}
+
+func (rr *NSEC3) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
+ l += typeBitMapLen(rr.TypeBitMap)
+ return l
+}
+
+// NSEC3PARAM RR. See RFC 5155.
+type NSEC3PARAM struct {
+ Hdr RR_Header
+ Hash uint8
+ Flags uint8
+ Iterations uint16
+ SaltLength uint8
+ Salt string `dns:"size-hex:SaltLength"`
+}
+
+func (rr *NSEC3PARAM) String() string {
+ s := rr.Hdr.String()
+ s += strconv.Itoa(int(rr.Hash)) +
+ " " + strconv.Itoa(int(rr.Flags)) +
+ " " + strconv.Itoa(int(rr.Iterations)) +
+ " " + saltToString(rr.Salt)
+ return s
+}
+
+// TKEY RR. See RFC 2930.
+type TKEY struct {
+ Hdr RR_Header
+ Algorithm string `dns:"domain-name"`
+ Inception uint32
+ Expiration uint32
+ Mode uint16
+ Error uint16
+ KeySize uint16
+ Key string `dns:"size-hex:KeySize"`
+ OtherLen uint16
+ OtherData string `dns:"size-hex:OtherLen"`
+}
+
+// TKEY has no official presentation format, but this will suffice.
+func (rr *TKEY) String() string {
+ s := ";" + rr.Hdr.String() +
+ " " + rr.Algorithm +
+ " " + TimeToString(rr.Inception) +
+ " " + TimeToString(rr.Expiration) +
+ " " + strconv.Itoa(int(rr.Mode)) +
+ " " + strconv.Itoa(int(rr.Error)) +
+ " " + strconv.Itoa(int(rr.KeySize)) +
+ " " + rr.Key +
+ " " + strconv.Itoa(int(rr.OtherLen)) +
+ " " + rr.OtherData
+ return s
+}
+
+// RFC3597 represents an unknown/generic RR. See RFC 3597.
+type RFC3597 struct {
+ Hdr RR_Header
+ Rdata string `dns:"hex"`
+}
+
+func (rr *RFC3597) String() string {
+ // Let's call it a hack
+ s := rfc3597Header(rr.Hdr)
+
+ s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata
+ return s
+}
+
+func rfc3597Header(h RR_Header) string {
+ var s string
+
+ s += sprintName(h.Name) + "\t"
+ s += strconv.FormatInt(int64(h.Ttl), 10) + "\t"
+ s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t"
+ s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t"
+ return s
+}
+
+// URI RR. See RFC 7553.
+type URI struct {
+ Hdr RR_Header
+ Priority uint16
+ Weight uint16
+ Target string `dns:"octet"`
+}
+
+func (rr *URI) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) +
+ " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
+}
+
+// DHCID RR. See RFC 4701.
+type DHCID struct {
+ Hdr RR_Header
+ Digest string `dns:"base64"`
+}
+
+func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest }
+
+// TLSA RR. See RFC 6698.
+type TLSA struct {
+ Hdr RR_Header
+ Usage uint8
+ Selector uint8
+ MatchingType uint8
+ Certificate string `dns:"hex"`
+}
+
+func (rr *TLSA) String() string {
+ return rr.Hdr.String() +
+ strconv.Itoa(int(rr.Usage)) +
+ " " + strconv.Itoa(int(rr.Selector)) +
+ " " + strconv.Itoa(int(rr.MatchingType)) +
+ " " + rr.Certificate
+}
+
+// SMIMEA RR. See RFC 8162.
+type SMIMEA struct {
+ Hdr RR_Header
+ Usage uint8
+ Selector uint8
+ MatchingType uint8
+ Certificate string `dns:"hex"`
+}
+
+func (rr *SMIMEA) String() string {
+ s := rr.Hdr.String() +
+ strconv.Itoa(int(rr.Usage)) +
+ " " + strconv.Itoa(int(rr.Selector)) +
+ " " + strconv.Itoa(int(rr.MatchingType))
+
+ // Every Nth char needs a space on this output. If we output
+ // this as one giant line, we can't read it can in because in some cases
+ // the cert length overflows scan.maxTok (2048).
+ sx := splitN(rr.Certificate, 1024) // conservative value here
+ s += " " + strings.Join(sx, " ")
+ return s
+}
+
+// HIP RR. See RFC 8005.
+type HIP struct {
+ Hdr RR_Header
+ HitLength uint8
+ PublicKeyAlgorithm uint8
+ PublicKeyLength uint16
+ Hit string `dns:"size-hex:HitLength"`
+ PublicKey string `dns:"size-base64:PublicKeyLength"`
+ RendezvousServers []string `dns:"domain-name"`
+}
+
+func (rr *HIP) String() string {
+ s := rr.Hdr.String() +
+ strconv.Itoa(int(rr.PublicKeyAlgorithm)) +
+ " " + rr.Hit +
+ " " + rr.PublicKey
+ for _, d := range rr.RendezvousServers {
+ s += " " + sprintName(d)
+ }
+ return s
+}
+
+// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template.
+type NINFO struct {
+ Hdr RR_Header
+ ZSData []string `dns:"txt"`
+}
+
+func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
+
+// NID RR. See RFC RFC 6742.
+type NID struct {
+ Hdr RR_Header
+ Preference uint16
+ NodeID uint64
+}
+
+func (rr *NID) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ node := fmt.Sprintf("%0.16x", rr.NodeID)
+ s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
+ return s
+}
+
+// L32 RR, See RFC 6742.
+type L32 struct {
+ Hdr RR_Header
+ Preference uint16
+ Locator32 net.IP `dns:"a"`
+}
+
+func (rr *L32) String() string {
+ if rr.Locator32 == nil {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ }
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) +
+ " " + rr.Locator32.String()
+}
+
+// L64 RR, See RFC 6742.
+type L64 struct {
+ Hdr RR_Header
+ Preference uint16
+ Locator64 uint64
+}
+
+func (rr *L64) String() string {
+ s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference))
+ node := fmt.Sprintf("%0.16X", rr.Locator64)
+ s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16]
+ return s
+}
+
+// LP RR. See RFC 6742.
+type LP struct {
+ Hdr RR_Header
+ Preference uint16
+ Fqdn string `dns:"domain-name"`
+}
+
+func (rr *LP) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn)
+}
+
+// EUI48 RR. See RFC 7043.
+type EUI48 struct {
+ Hdr RR_Header
+ Address uint64 `dns:"uint48"`
+}
+
+func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) }
+
+// EUI64 RR. See RFC 7043.
+type EUI64 struct {
+ Hdr RR_Header
+ Address uint64
+}
+
+func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) }
+
+// CAA RR. See RFC 6844.
+type CAA struct {
+ Hdr RR_Header
+ Flag uint8
+ Tag string
+ Value string `dns:"octet"`
+}
+
+func (rr *CAA) String() string {
+ return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
+}
+
+// UID RR. Deprecated, IANA-Reserved.
+type UID struct {
+ Hdr RR_Header
+ Uid uint32
+}
+
+func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) }
+
+// GID RR. Deprecated, IANA-Reserved.
+type GID struct {
+ Hdr RR_Header
+ Gid uint32
+}
+
+func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) }
+
+// UINFO RR. Deprecated, IANA-Reserved.
+type UINFO struct {
+ Hdr RR_Header
+ Uinfo string
+}
+
+func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) }
+
+// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt.
+type EID struct {
+ Hdr RR_Header
+ Endpoint string `dns:"hex"`
+}
+
+func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) }
+
+// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt.
+type NIMLOC struct {
+ Hdr RR_Header
+ Locator string `dns:"hex"`
+}
+
+func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) }
+
+// OPENPGPKEY RR. See RFC 7929.
+type OPENPGPKEY struct {
+ Hdr RR_Header
+ PublicKey string `dns:"base64"`
+}
+
+func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey }
+
+// CSYNC RR. See RFC 7477.
+type CSYNC struct {
+ Hdr RR_Header
+ Serial uint32
+ Flags uint16
+ TypeBitMap []uint16 `dns:"nsec"`
+}
+
+func (rr *CSYNC) String() string {
+ s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags))
+
+ for _, t := range rr.TypeBitMap {
+ s += " " + Type(t).String()
+ }
+ return s
+}
+
+func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 4 + 2
+ l += typeBitMapLen(rr.TypeBitMap)
+ return l
+}
+
+// APL RR. See RFC 3123.
+type APL struct {
+ Hdr RR_Header
+ Prefixes []APLPrefix `dns:"apl"`
+}
+
+// APLPrefix is an address prefix hold by an APL record.
+type APLPrefix struct {
+ Negation bool
+ Network net.IPNet
+}
+
+// String returns presentation form of the APL record.
+func (rr *APL) String() string {
+ var sb strings.Builder
+ sb.WriteString(rr.Hdr.String())
+ for i, p := range rr.Prefixes {
+ if i > 0 {
+ sb.WriteByte(' ')
+ }
+ sb.WriteString(p.str())
+ }
+ return sb.String()
+}
+
+// str returns presentation form of the APL prefix.
+func (p *APLPrefix) str() string {
+ var sb strings.Builder
+ if p.Negation {
+ sb.WriteByte('!')
+ }
+
+ switch len(p.Network.IP) {
+ case net.IPv4len:
+ sb.WriteByte('1')
+ case net.IPv6len:
+ sb.WriteByte('2')
+ }
+
+ sb.WriteByte(':')
+
+ switch len(p.Network.IP) {
+ case net.IPv4len:
+ sb.WriteString(p.Network.IP.String())
+ case net.IPv6len:
+ // add prefix for IPv4-mapped IPv6
+ if v4 := p.Network.IP.To4(); v4 != nil {
+ sb.WriteString("::ffff:")
+ }
+ sb.WriteString(p.Network.IP.String())
+ }
+
+ sb.WriteByte('/')
+
+ prefix, _ := p.Network.Mask.Size()
+ sb.WriteString(strconv.Itoa(prefix))
+
+ return sb.String()
+}
+
+// equals reports whether two APL prefixes are identical.
+func (a *APLPrefix) equals(b *APLPrefix) bool {
+ return a.Negation == b.Negation &&
+ bytes.Equal(a.Network.IP, b.Network.IP) &&
+ bytes.Equal(a.Network.Mask, b.Network.Mask)
+}
+
+// copy returns a copy of the APL prefix.
+func (p *APLPrefix) copy() APLPrefix {
+ return APLPrefix{
+ Negation: p.Negation,
+ Network: copyNet(p.Network),
+ }
+}
+
+// len returns size of the prefix in wire format.
+func (p *APLPrefix) len() int {
+ // 4-byte header and the network address prefix (see Section 4 of RFC 3123)
+ prefix, _ := p.Network.Mask.Size()
+ return 4 + (prefix+7)/8
+}
+
+// TimeToString translates the RRSIG's incep. and expir. times to the
+// string representation used when printing the record.
+// It takes serial arithmetic (RFC 1982) into account.
+func TimeToString(t uint32) string {
+ mod := (int64(t)-time.Now().Unix())/year68 - 1
+ if mod < 0 {
+ mod = 0
+ }
+ ti := time.Unix(int64(t)-mod*year68, 0).UTC()
+ return ti.Format("20060102150405")
+}
+
+// StringToTime translates the RRSIG's incep. and expir. times from
+// string values like "20110403154150" to an 32 bit integer.
+// It takes serial arithmetic (RFC 1982) into account.
+func StringToTime(s string) (uint32, error) {
+ t, err := time.Parse("20060102150405", s)
+ if err != nil {
+ return 0, err
+ }
+ mod := t.Unix()/year68 - 1
+ if mod < 0 {
+ mod = 0
+ }
+ return uint32(t.Unix() - mod*year68), nil
+}
+
+// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty.
+func saltToString(s string) string {
+ if len(s) == 0 {
+ return "-"
+ }
+ return strings.ToUpper(s)
+}
+
+func euiToString(eui uint64, bits int) (hex string) {
+ switch bits {
+ case 64:
+ hex = fmt.Sprintf("%16.16x", eui)
+ hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
+ "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16]
+ case 48:
+ hex = fmt.Sprintf("%12.12x", eui)
+ hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] +
+ "-" + hex[8:10] + "-" + hex[10:12]
+ }
+ return
+}
+
+// copyIP returns a copy of ip.
+func copyIP(ip net.IP) net.IP {
+ p := make(net.IP, len(ip))
+ copy(p, ip)
+ return p
+}
+
+// copyNet returns a copy of a subnet.
+func copyNet(n net.IPNet) net.IPNet {
+ m := make(net.IPMask, len(n.Mask))
+ copy(m, n.Mask)
+
+ return net.IPNet{
+ IP: copyIP(n.IP),
+ Mask: m,
+ }
+}
+
+// SplitN splits a string into N sized string chunks.
+// This might become an exported function once.
+func splitN(s string, n int) []string {
+ if len(s) < n {
+ return []string{s}
+ }
+ sx := []string{}
+ p, i := 0, n
+ for {
+ if i <= len(s) {
+ sx = append(sx, s[p:i])
+ } else {
+ sx = append(sx, s[p:])
+ break
+
+ }
+ p, i = p+n, i+n
+ }
+
+ return sx
+}
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
new file mode 100644
index 0000000..a4826ee
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -0,0 +1,102 @@
+// +build !windows
+
+package dns
+
+import (
+ "net"
+
+ "golang.org/x/net/ipv4"
+ "golang.org/x/net/ipv6"
+)
+
+// This is the required size of the OOB buffer to pass to ReadMsgUDP.
+var udpOOBSize = func() int {
+ // We can't know whether we'll get an IPv4 control message or an
+ // IPv6 control message ahead of time. To get around this, we size
+ // the buffer equal to the largest of the two.
+
+ oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface)
+ oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface)
+
+ if len(oob4) > len(oob6) {
+ return len(oob4)
+ }
+
+ return len(oob6)
+}()
+
+// SessionUDP holds the remote address and the associated
+// out-of-band data.
+type SessionUDP struct {
+ raddr *net.UDPAddr
+ context []byte
+}
+
+// RemoteAddr returns the remote network address.
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ oob := make([]byte, udpOOBSize)
+ n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
+ if err != nil {
+ return n, nil, err
+ }
+ return n, &SessionUDP{raddr, oob[:oobn]}, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ oob := correctSource(session.context)
+ n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
+ return n, err
+}
+
+func setUDPSocketOptions(conn *net.UDPConn) error {
+ // Try setting the flags for both families and ignore the errors unless they
+ // both error.
+ err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
+ err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
+ if err6 != nil && err4 != nil {
+ return err4
+ }
+ return nil
+}
+
+// parseDstFromOOB takes oob data and returns the destination IP.
+func parseDstFromOOB(oob []byte) net.IP {
+ // Start with IPv6 and then fallback to IPv4
+ // TODO(fastest963): Figure out a way to prefer one or the other. Looking at
+ // the lvl of the header for a 0 or 41 isn't cross-platform.
+ cm6 := new(ipv6.ControlMessage)
+ if cm6.Parse(oob) == nil && cm6.Dst != nil {
+ return cm6.Dst
+ }
+ cm4 := new(ipv4.ControlMessage)
+ if cm4.Parse(oob) == nil && cm4.Dst != nil {
+ return cm4.Dst
+ }
+ return nil
+}
+
+// correctSource takes oob data and returns new oob data with the Src equal to the Dst
+func correctSource(oob []byte) []byte {
+ dst := parseDstFromOOB(oob)
+ if dst == nil {
+ return nil
+ }
+ // If the dst is definitely an IPv6, then use ipv6's ControlMessage to
+ // respond otherwise use ipv4's because ipv6's marshal ignores ipv4
+ // addresses.
+ if dst.To4() == nil {
+ cm := new(ipv6.ControlMessage)
+ cm.Src = dst
+ oob = cm.Marshal()
+ } else {
+ cm := new(ipv4.ControlMessage)
+ cm.Src = dst
+ oob = cm.Marshal()
+ }
+ return oob
+}
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
new file mode 100644
index 0000000..e7dd8ca
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -0,0 +1,35 @@
+// +build windows
+
+package dns
+
+import "net"
+
+// SessionUDP holds the remote address
+type SessionUDP struct {
+ raddr *net.UDPAddr
+}
+
+// RemoteAddr returns the remote network address.
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ n, raddr, err := conn.ReadFrom(b)
+ if err != nil {
+ return n, nil, err
+ }
+ return n, &SessionUDP{raddr.(*net.UDPAddr)}, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
+// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ return conn.WriteTo(b, session.raddr)
+}
+
+// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
+// use the standard method in udp.go for these.
+func setUDPSocketOptions(*net.UDPConn) error { return nil }
+func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go
new file mode 100644
index 0000000..69dd386
--- /dev/null
+++ b/vendor/github.com/miekg/dns/update.go
@@ -0,0 +1,110 @@
+package dns
+
+// NameUsed sets the RRs in the prereq section to
+// "Name is in use" RRs. RFC 2136 section 2.4.4.
+func (u *Msg) NameUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
+ }
+}
+
+// NameNotUsed sets the RRs in the prereq section to
+// "Name is in not use" RRs. RFC 2136 section 2.4.5.
+func (u *Msg) NameNotUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}})
+ }
+}
+
+// Used sets the RRs in the prereq section to
+// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2.
+func (u *Msg) Used(rr []RR) {
+ if len(u.Question) == 0 {
+ panic("dns: empty question section")
+ }
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ r.Header().Class = u.Question[0].Qclass
+ u.Answer = append(u.Answer, r)
+ }
+}
+
+// RRsetUsed sets the RRs in the prereq section to
+// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1.
+func (u *Msg) RRsetUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ h := r.Header()
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
+ }
+}
+
+// RRsetNotUsed sets the RRs in the prereq section to
+// "RRset does not exist" RRs. RFC 2136 section 2.4.3.
+func (u *Msg) RRsetNotUsed(rr []RR) {
+ if u.Answer == nil {
+ u.Answer = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ h := r.Header()
+ u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}})
+ }
+}
+
+// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1.
+func (u *Msg) Insert(rr []RR) {
+ if len(u.Question) == 0 {
+ panic("dns: empty question section")
+ }
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ r.Header().Class = u.Question[0].Qclass
+ u.Ns = append(u.Ns, r)
+ }
+}
+
+// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2.
+func (u *Msg) RemoveRRset(rr []RR) {
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ h := r.Header()
+ u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
+ }
+}
+
+// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3
+func (u *Msg) RemoveName(rr []RR) {
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}})
+ }
+}
+
+// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4
+func (u *Msg) Remove(rr []RR) {
+ if u.Ns == nil {
+ u.Ns = make([]RR, 0, len(rr))
+ }
+ for _, r := range rr {
+ h := r.Header()
+ h.Class = ClassNONE
+ h.Ttl = 0
+ u.Ns = append(u.Ns, r)
+ }
+}
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
new file mode 100644
index 0000000..cab46b4
--- /dev/null
+++ b/vendor/github.com/miekg/dns/version.go
@@ -0,0 +1,15 @@
+package dns
+
+import "fmt"
+
+// Version is current version of this library.
+var Version = V{1, 1, 27}
+
+// V holds the version of this library.
+type V struct {
+ Major, Minor, Patch int
+}
+
+func (v V) String() string {
+ return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
+}
diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go
new file mode 100644
index 0000000..43970e6
--- /dev/null
+++ b/vendor/github.com/miekg/dns/xfr.go
@@ -0,0 +1,266 @@
+package dns
+
+import (
+ "fmt"
+ "time"
+)
+
+// Envelope is used when doing a zone transfer with a remote server.
+type Envelope struct {
+ RR []RR // The set of RRs in the answer section of the xfr reply message.
+ Error error // If something went wrong, this contains the error.
+}
+
+// A Transfer defines parameters that are used during a zone transfer.
+type Transfer struct {
+ *Conn
+ DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
+ ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
+ WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
+ TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
+ tsigTimersOnly bool
+}
+
+// Think we need to away to stop the transfer
+
+// In performs an incoming transfer with the server in a.
+// If you would like to set the source IP, or some other attribute
+// of a Dialer for a Transfer, you can do so by specifying the attributes
+// in the Transfer.Conn:
+//
+// d := net.Dialer{LocalAddr: transfer_source}
+// con, err := d.Dial("tcp", master)
+// dnscon := &dns.Conn{Conn:con}
+// transfer = &dns.Transfer{Conn: dnscon}
+// channel, err := transfer.In(message, master)
+//
+func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
+ switch q.Question[0].Qtype {
+ case TypeAXFR, TypeIXFR:
+ default:
+ return nil, &Error{"unsupported question type"}
+ }
+
+ timeout := dnsTimeout
+ if t.DialTimeout != 0 {
+ timeout = t.DialTimeout
+ }
+
+ if t.Conn == nil {
+ t.Conn, err = DialTimeout("tcp", a, timeout)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err := t.WriteMsg(q); err != nil {
+ return nil, err
+ }
+
+ env = make(chan *Envelope)
+ switch q.Question[0].Qtype {
+ case TypeAXFR:
+ go t.inAxfr(q, env)
+ case TypeIXFR:
+ go t.inIxfr(q, env)
+ }
+
+ return env, nil
+}
+
+func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
+ first := true
+ defer t.Close()
+ defer close(c)
+ timeout := dnsTimeout
+ if t.ReadTimeout != 0 {
+ timeout = t.ReadTimeout
+ }
+ for {
+ t.Conn.SetReadDeadline(time.Now().Add(timeout))
+ in, err := t.ReadMsg()
+ if err != nil {
+ c <- &Envelope{nil, err}
+ return
+ }
+ if q.Id != in.Id {
+ c <- &Envelope{in.Answer, ErrId}
+ return
+ }
+ if first {
+ if in.Rcode != RcodeSuccess {
+ c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
+ return
+ }
+ if !isSOAFirst(in) {
+ c <- &Envelope{in.Answer, ErrSoa}
+ return
+ }
+ first = !first
+ // only one answer that is SOA, receive more
+ if len(in.Answer) == 1 {
+ t.tsigTimersOnly = true
+ c <- &Envelope{in.Answer, nil}
+ continue
+ }
+ }
+
+ if !first {
+ t.tsigTimersOnly = true // Subsequent envelopes use this.
+ if isSOALast(in) {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ c <- &Envelope{in.Answer, nil}
+ }
+ }
+}
+
+func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
+ var serial uint32 // The first serial seen is the current server serial
+ axfr := true
+ n := 0
+ qser := q.Ns[0].(*SOA).Serial
+ defer t.Close()
+ defer close(c)
+ timeout := dnsTimeout
+ if t.ReadTimeout != 0 {
+ timeout = t.ReadTimeout
+ }
+ for {
+ t.SetReadDeadline(time.Now().Add(timeout))
+ in, err := t.ReadMsg()
+ if err != nil {
+ c <- &Envelope{nil, err}
+ return
+ }
+ if q.Id != in.Id {
+ c <- &Envelope{in.Answer, ErrId}
+ return
+ }
+ if in.Rcode != RcodeSuccess {
+ c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
+ return
+ }
+ if n == 0 {
+ // Check if the returned answer is ok
+ if !isSOAFirst(in) {
+ c <- &Envelope{in.Answer, ErrSoa}
+ return
+ }
+ // This serial is important
+ serial = in.Answer[0].(*SOA).Serial
+ // Check if there are no changes in zone
+ if qser >= serial {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ }
+ // Now we need to check each message for SOA records, to see what we need to do
+ t.tsigTimersOnly = true
+ for _, rr := range in.Answer {
+ if v, ok := rr.(*SOA); ok {
+ if v.Serial == serial {
+ n++
+ // quit if it's a full axfr or the the servers' SOA is repeated the third time
+ if axfr && n == 2 || n == 3 {
+ c <- &Envelope{in.Answer, nil}
+ return
+ }
+ } else if axfr {
+ // it's an ixfr
+ axfr = false
+ }
+ }
+ }
+ c <- &Envelope{in.Answer, nil}
+ }
+}
+
+// Out performs an outgoing transfer with the client connecting in w.
+// Basic use pattern:
+//
+// ch := make(chan *dns.Envelope)
+// tr := new(dns.Transfer)
+// var wg sync.WaitGroup
+// go func() {
+// tr.Out(w, r, ch)
+// wg.Done()
+// }()
+// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}}
+// close(ch)
+// wg.Wait() // wait until everything is written out
+// w.Close() // close connection
+//
+// The server is responsible for sending the correct sequence of RRs through the channel ch.
+func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error {
+ for x := range ch {
+ r := new(Msg)
+ // Compress?
+ r.SetReply(q)
+ r.Authoritative = true
+ // assume it fits TODO(miek): fix
+ r.Answer = append(r.Answer, x.RR...)
+ if tsig := q.IsTsig(); tsig != nil && w.TsigStatus() == nil {
+ r.SetTsig(tsig.Hdr.Name, tsig.Algorithm, tsig.Fudge, time.Now().Unix())
+ }
+ if err := w.WriteMsg(r); err != nil {
+ return err
+ }
+ w.TsigTimersOnly(true)
+ }
+ return nil
+}
+
+// ReadMsg reads a message from the transfer connection t.
+func (t *Transfer) ReadMsg() (*Msg, error) {
+ m := new(Msg)
+ p := make([]byte, MaxMsgSize)
+ n, err := t.Read(p)
+ if err != nil && n == 0 {
+ return nil, err
+ }
+ p = p[:n]
+ if err := m.Unpack(p); err != nil {
+ return nil, err
+ }
+ if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
+ if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
+ return m, ErrSecret
+ }
+ // Need to work on the original message p, as that was used to calculate the tsig.
+ err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
+ t.tsigRequestMAC = ts.MAC
+ }
+ return m, err
+}
+
+// WriteMsg writes a message through the transfer connection t.
+func (t *Transfer) WriteMsg(m *Msg) (err error) {
+ var out []byte
+ if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil {
+ if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok {
+ return ErrSecret
+ }
+ out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly)
+ } else {
+ out, err = m.Pack()
+ }
+ if err != nil {
+ return err
+ }
+ _, err = t.Write(out)
+ return err
+}
+
+func isSOAFirst(in *Msg) bool {
+ return len(in.Answer) > 0 &&
+ in.Answer[0].Header().Rrtype == TypeSOA
+}
+
+func isSOALast(in *Msg) bool {
+ return len(in.Answer) > 0 &&
+ in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
+}
+
+const errXFR = "bad xfr rcode: %d"
diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go
new file mode 100644
index 0000000..a58a8c0
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zduplicate.go
@@ -0,0 +1,1157 @@
+// Code generated by "go run duplicate_generate.go"; DO NOT EDIT.
+
+package dns
+
+// isDuplicate() functions
+
+func (r1 *A) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*A)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !r1.A.Equal(r2.A) {
+ return false
+ }
+ return true
+}
+
+func (r1 *AAAA) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*AAAA)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !r1.AAAA.Equal(r2.AAAA) {
+ return false
+ }
+ return true
+}
+
+func (r1 *AFSDB) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*AFSDB)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Subtype != r2.Subtype {
+ return false
+ }
+ if !isDuplicateName(r1.Hostname, r2.Hostname) {
+ return false
+ }
+ return true
+}
+
+func (r1 *ANY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*ANY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ return true
+}
+
+func (r1 *APL) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*APL)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if len(r1.Prefixes) != len(r2.Prefixes) {
+ return false
+ }
+ for i := 0; i < len(r1.Prefixes); i++ {
+ if !r1.Prefixes[i].equals(&r2.Prefixes[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *AVC) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*AVC)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if len(r1.Txt) != len(r2.Txt) {
+ return false
+ }
+ for i := 0; i < len(r1.Txt); i++ {
+ if r1.Txt[i] != r2.Txt[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *CAA) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*CAA)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Flag != r2.Flag {
+ return false
+ }
+ if r1.Tag != r2.Tag {
+ return false
+ }
+ if r1.Value != r2.Value {
+ return false
+ }
+ return true
+}
+
+func (r1 *CERT) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*CERT)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Type != r2.Type {
+ return false
+ }
+ if r1.KeyTag != r2.KeyTag {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.Certificate != r2.Certificate {
+ return false
+ }
+ return true
+}
+
+func (r1 *CNAME) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*CNAME)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Target, r2.Target) {
+ return false
+ }
+ return true
+}
+
+func (r1 *CSYNC) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*CSYNC)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Serial != r2.Serial {
+ return false
+ }
+ if r1.Flags != r2.Flags {
+ return false
+ }
+ if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
+ return false
+ }
+ for i := 0; i < len(r1.TypeBitMap); i++ {
+ if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *DHCID) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*DHCID)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Digest != r2.Digest {
+ return false
+ }
+ return true
+}
+
+func (r1 *DNAME) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*DNAME)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Target, r2.Target) {
+ return false
+ }
+ return true
+}
+
+func (r1 *DNSKEY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*DNSKEY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Flags != r2.Flags {
+ return false
+ }
+ if r1.Protocol != r2.Protocol {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.PublicKey != r2.PublicKey {
+ return false
+ }
+ return true
+}
+
+func (r1 *DS) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*DS)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.KeyTag != r2.KeyTag {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.DigestType != r2.DigestType {
+ return false
+ }
+ if r1.Digest != r2.Digest {
+ return false
+ }
+ return true
+}
+
+func (r1 *EID) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*EID)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Endpoint != r2.Endpoint {
+ return false
+ }
+ return true
+}
+
+func (r1 *EUI48) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*EUI48)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Address != r2.Address {
+ return false
+ }
+ return true
+}
+
+func (r1 *EUI64) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*EUI64)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Address != r2.Address {
+ return false
+ }
+ return true
+}
+
+func (r1 *GID) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*GID)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Gid != r2.Gid {
+ return false
+ }
+ return true
+}
+
+func (r1 *GPOS) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*GPOS)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Longitude != r2.Longitude {
+ return false
+ }
+ if r1.Latitude != r2.Latitude {
+ return false
+ }
+ if r1.Altitude != r2.Altitude {
+ return false
+ }
+ return true
+}
+
+func (r1 *HINFO) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*HINFO)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Cpu != r2.Cpu {
+ return false
+ }
+ if r1.Os != r2.Os {
+ return false
+ }
+ return true
+}
+
+func (r1 *HIP) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*HIP)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.HitLength != r2.HitLength {
+ return false
+ }
+ if r1.PublicKeyAlgorithm != r2.PublicKeyAlgorithm {
+ return false
+ }
+ if r1.PublicKeyLength != r2.PublicKeyLength {
+ return false
+ }
+ if r1.Hit != r2.Hit {
+ return false
+ }
+ if r1.PublicKey != r2.PublicKey {
+ return false
+ }
+ if len(r1.RendezvousServers) != len(r2.RendezvousServers) {
+ return false
+ }
+ for i := 0; i < len(r1.RendezvousServers); i++ {
+ if !isDuplicateName(r1.RendezvousServers[i], r2.RendezvousServers[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *KX) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*KX)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if !isDuplicateName(r1.Exchanger, r2.Exchanger) {
+ return false
+ }
+ return true
+}
+
+func (r1 *L32) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*L32)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if !r1.Locator32.Equal(r2.Locator32) {
+ return false
+ }
+ return true
+}
+
+func (r1 *L64) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*L64)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if r1.Locator64 != r2.Locator64 {
+ return false
+ }
+ return true
+}
+
+func (r1 *LOC) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*LOC)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Version != r2.Version {
+ return false
+ }
+ if r1.Size != r2.Size {
+ return false
+ }
+ if r1.HorizPre != r2.HorizPre {
+ return false
+ }
+ if r1.VertPre != r2.VertPre {
+ return false
+ }
+ if r1.Latitude != r2.Latitude {
+ return false
+ }
+ if r1.Longitude != r2.Longitude {
+ return false
+ }
+ if r1.Altitude != r2.Altitude {
+ return false
+ }
+ return true
+}
+
+func (r1 *LP) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*LP)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if !isDuplicateName(r1.Fqdn, r2.Fqdn) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MB) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MB)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Mb, r2.Mb) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MD) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MD)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Md, r2.Md) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MF) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MF)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Mf, r2.Mf) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MG) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MG)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Mg, r2.Mg) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MINFO) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MINFO)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Rmail, r2.Rmail) {
+ return false
+ }
+ if !isDuplicateName(r1.Email, r2.Email) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MR) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MR)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Mr, r2.Mr) {
+ return false
+ }
+ return true
+}
+
+func (r1 *MX) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*MX)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if !isDuplicateName(r1.Mx, r2.Mx) {
+ return false
+ }
+ return true
+}
+
+func (r1 *NAPTR) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NAPTR)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Order != r2.Order {
+ return false
+ }
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if r1.Flags != r2.Flags {
+ return false
+ }
+ if r1.Service != r2.Service {
+ return false
+ }
+ if r1.Regexp != r2.Regexp {
+ return false
+ }
+ if !isDuplicateName(r1.Replacement, r2.Replacement) {
+ return false
+ }
+ return true
+}
+
+func (r1 *NID) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NID)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if r1.NodeID != r2.NodeID {
+ return false
+ }
+ return true
+}
+
+func (r1 *NIMLOC) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NIMLOC)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Locator != r2.Locator {
+ return false
+ }
+ return true
+}
+
+func (r1 *NINFO) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NINFO)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if len(r1.ZSData) != len(r2.ZSData) {
+ return false
+ }
+ for i := 0; i < len(r1.ZSData); i++ {
+ if r1.ZSData[i] != r2.ZSData[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *NS) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NS)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Ns, r2.Ns) {
+ return false
+ }
+ return true
+}
+
+func (r1 *NSAPPTR) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NSAPPTR)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Ptr, r2.Ptr) {
+ return false
+ }
+ return true
+}
+
+func (r1 *NSEC) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NSEC)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.NextDomain, r2.NextDomain) {
+ return false
+ }
+ if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
+ return false
+ }
+ for i := 0; i < len(r1.TypeBitMap); i++ {
+ if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *NSEC3) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NSEC3)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Hash != r2.Hash {
+ return false
+ }
+ if r1.Flags != r2.Flags {
+ return false
+ }
+ if r1.Iterations != r2.Iterations {
+ return false
+ }
+ if r1.SaltLength != r2.SaltLength {
+ return false
+ }
+ if r1.Salt != r2.Salt {
+ return false
+ }
+ if r1.HashLength != r2.HashLength {
+ return false
+ }
+ if r1.NextDomain != r2.NextDomain {
+ return false
+ }
+ if len(r1.TypeBitMap) != len(r2.TypeBitMap) {
+ return false
+ }
+ for i := 0; i < len(r1.TypeBitMap); i++ {
+ if r1.TypeBitMap[i] != r2.TypeBitMap[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *NSEC3PARAM) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NSEC3PARAM)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Hash != r2.Hash {
+ return false
+ }
+ if r1.Flags != r2.Flags {
+ return false
+ }
+ if r1.Iterations != r2.Iterations {
+ return false
+ }
+ if r1.SaltLength != r2.SaltLength {
+ return false
+ }
+ if r1.Salt != r2.Salt {
+ return false
+ }
+ return true
+}
+
+func (r1 *NULL) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*NULL)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Data != r2.Data {
+ return false
+ }
+ return true
+}
+
+func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*OPENPGPKEY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.PublicKey != r2.PublicKey {
+ return false
+ }
+ return true
+}
+
+func (r1 *PTR) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*PTR)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Ptr, r2.Ptr) {
+ return false
+ }
+ return true
+}
+
+func (r1 *PX) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*PX)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if !isDuplicateName(r1.Map822, r2.Map822) {
+ return false
+ }
+ if !isDuplicateName(r1.Mapx400, r2.Mapx400) {
+ return false
+ }
+ return true
+}
+
+func (r1 *RFC3597) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RFC3597)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Rdata != r2.Rdata {
+ return false
+ }
+ return true
+}
+
+func (r1 *RKEY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RKEY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Flags != r2.Flags {
+ return false
+ }
+ if r1.Protocol != r2.Protocol {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.PublicKey != r2.PublicKey {
+ return false
+ }
+ return true
+}
+
+func (r1 *RP) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RP)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Mbox, r2.Mbox) {
+ return false
+ }
+ if !isDuplicateName(r1.Txt, r2.Txt) {
+ return false
+ }
+ return true
+}
+
+func (r1 *RRSIG) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RRSIG)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.TypeCovered != r2.TypeCovered {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.Labels != r2.Labels {
+ return false
+ }
+ if r1.OrigTtl != r2.OrigTtl {
+ return false
+ }
+ if r1.Expiration != r2.Expiration {
+ return false
+ }
+ if r1.Inception != r2.Inception {
+ return false
+ }
+ if r1.KeyTag != r2.KeyTag {
+ return false
+ }
+ if !isDuplicateName(r1.SignerName, r2.SignerName) {
+ return false
+ }
+ if r1.Signature != r2.Signature {
+ return false
+ }
+ return true
+}
+
+func (r1 *RT) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*RT)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Preference != r2.Preference {
+ return false
+ }
+ if !isDuplicateName(r1.Host, r2.Host) {
+ return false
+ }
+ return true
+}
+
+func (r1 *SMIMEA) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*SMIMEA)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Usage != r2.Usage {
+ return false
+ }
+ if r1.Selector != r2.Selector {
+ return false
+ }
+ if r1.MatchingType != r2.MatchingType {
+ return false
+ }
+ if r1.Certificate != r2.Certificate {
+ return false
+ }
+ return true
+}
+
+func (r1 *SOA) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*SOA)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Ns, r2.Ns) {
+ return false
+ }
+ if !isDuplicateName(r1.Mbox, r2.Mbox) {
+ return false
+ }
+ if r1.Serial != r2.Serial {
+ return false
+ }
+ if r1.Refresh != r2.Refresh {
+ return false
+ }
+ if r1.Retry != r2.Retry {
+ return false
+ }
+ if r1.Expire != r2.Expire {
+ return false
+ }
+ if r1.Minttl != r2.Minttl {
+ return false
+ }
+ return true
+}
+
+func (r1 *SPF) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*SPF)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if len(r1.Txt) != len(r2.Txt) {
+ return false
+ }
+ for i := 0; i < len(r1.Txt); i++ {
+ if r1.Txt[i] != r2.Txt[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *SRV) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*SRV)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Priority != r2.Priority {
+ return false
+ }
+ if r1.Weight != r2.Weight {
+ return false
+ }
+ if r1.Port != r2.Port {
+ return false
+ }
+ if !isDuplicateName(r1.Target, r2.Target) {
+ return false
+ }
+ return true
+}
+
+func (r1 *SSHFP) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*SSHFP)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.Type != r2.Type {
+ return false
+ }
+ if r1.FingerPrint != r2.FingerPrint {
+ return false
+ }
+ return true
+}
+
+func (r1 *TA) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*TA)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.KeyTag != r2.KeyTag {
+ return false
+ }
+ if r1.Algorithm != r2.Algorithm {
+ return false
+ }
+ if r1.DigestType != r2.DigestType {
+ return false
+ }
+ if r1.Digest != r2.Digest {
+ return false
+ }
+ return true
+}
+
+func (r1 *TALINK) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*TALINK)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.PreviousName, r2.PreviousName) {
+ return false
+ }
+ if !isDuplicateName(r1.NextName, r2.NextName) {
+ return false
+ }
+ return true
+}
+
+func (r1 *TKEY) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*TKEY)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Algorithm, r2.Algorithm) {
+ return false
+ }
+ if r1.Inception != r2.Inception {
+ return false
+ }
+ if r1.Expiration != r2.Expiration {
+ return false
+ }
+ if r1.Mode != r2.Mode {
+ return false
+ }
+ if r1.Error != r2.Error {
+ return false
+ }
+ if r1.KeySize != r2.KeySize {
+ return false
+ }
+ if r1.Key != r2.Key {
+ return false
+ }
+ if r1.OtherLen != r2.OtherLen {
+ return false
+ }
+ if r1.OtherData != r2.OtherData {
+ return false
+ }
+ return true
+}
+
+func (r1 *TLSA) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*TLSA)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Usage != r2.Usage {
+ return false
+ }
+ if r1.Selector != r2.Selector {
+ return false
+ }
+ if r1.MatchingType != r2.MatchingType {
+ return false
+ }
+ if r1.Certificate != r2.Certificate {
+ return false
+ }
+ return true
+}
+
+func (r1 *TSIG) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*TSIG)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if !isDuplicateName(r1.Algorithm, r2.Algorithm) {
+ return false
+ }
+ if r1.TimeSigned != r2.TimeSigned {
+ return false
+ }
+ if r1.Fudge != r2.Fudge {
+ return false
+ }
+ if r1.MACSize != r2.MACSize {
+ return false
+ }
+ if r1.MAC != r2.MAC {
+ return false
+ }
+ if r1.OrigId != r2.OrigId {
+ return false
+ }
+ if r1.Error != r2.Error {
+ return false
+ }
+ if r1.OtherLen != r2.OtherLen {
+ return false
+ }
+ if r1.OtherData != r2.OtherData {
+ return false
+ }
+ return true
+}
+
+func (r1 *TXT) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*TXT)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if len(r1.Txt) != len(r2.Txt) {
+ return false
+ }
+ for i := 0; i < len(r1.Txt); i++ {
+ if r1.Txt[i] != r2.Txt[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (r1 *UID) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*UID)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Uid != r2.Uid {
+ return false
+ }
+ return true
+}
+
+func (r1 *UINFO) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*UINFO)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Uinfo != r2.Uinfo {
+ return false
+ }
+ return true
+}
+
+func (r1 *URI) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*URI)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.Priority != r2.Priority {
+ return false
+ }
+ if r1.Weight != r2.Weight {
+ return false
+ }
+ if r1.Target != r2.Target {
+ return false
+ }
+ return true
+}
+
+func (r1 *X25) isDuplicate(_r2 RR) bool {
+ r2, ok := _r2.(*X25)
+ if !ok {
+ return false
+ }
+ _ = r2
+ if r1.PSDNAddress != r2.PSDNAddress {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go
new file mode 100644
index 0000000..02a5dfa
--- /dev/null
+++ b/vendor/github.com/miekg/dns/zmsg.go
@@ -0,0 +1,2741 @@
+// Code generated by "go run msg_generate.go"; DO NOT EDIT.
+
+package dns
+
+// pack*() functions
+
+func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDataA(rr.A, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDataAAAA(rr.AAAA, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Subtype, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Hostname, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ return off, nil
+}
+
+func (rr *APL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDataApl(rr.Prefixes, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringTxt(rr.Txt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Flag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Tag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringOctet(rr.Value, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Type, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.Certificate, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Target, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint32(rr.Serial, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataNsec(rr.TypeBitMap, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringBase64(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Target, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringHex(rr.Endpoint, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint48(rr.Address, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint64(rr.Address, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint32(rr.Gid, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packString(rr.Longitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Latitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Altitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packString(rr.Cpu, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Os, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.HitLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.PublicKeyAlgorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.PublicKeyLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Hit, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Exchanger, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataA(rr.Locator32, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint64(rr.Locator64, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Version, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Size, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.HorizPre, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.VertPre, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Latitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Longitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Altitude, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Fqdn, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Mb, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Md, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Mf, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Mg, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Rmail, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Email, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Mr, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Mx, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Order, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Service, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packString(rr.Regexp, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Replacement, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint64(rr.NodeID, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringHex(rr.Locator, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringTxt(rr.ZSData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Ns, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Ptr, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.NextDomain, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataNsec(rr.TypeBitMap, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Hash, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Iterations, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.SaltLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ // Only pack salt if value is not "-", i.e. empty
+ if rr.Salt != "-" {
+ off, err = packStringHex(rr.Salt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ }
+ off, err = packUint8(rr.HashLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase32(rr.NextDomain, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDataNsec(rr.TypeBitMap, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Hash, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Iterations, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.SaltLength, msg, off)
+ if err != nil {
+ return off, err
+ }
+ // Only pack salt if value is not "-", i.e. empty
+ if rr.Salt != "-" {
+ off, err = packStringHex(rr.Salt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ }
+ return off, nil
+}
+
+func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringAny(rr.Data, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDataOpt(rr.Option, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Ptr, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Map822, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Mapx400, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringHex(rr.Rdata, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Flags, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Protocol, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.PublicKey, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Mbox, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Txt, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.TypeCovered, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Labels, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.OrigTtl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.SignerName, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.Signature, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Preference, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Host, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.TypeCovered, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Labels, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.OrigTtl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.SignerName, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringBase64(rr.Signature, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Usage, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Selector, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.MatchingType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Certificate, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Ns, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Mbox, msg, off, compression, compress)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Serial, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Refresh, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Retry, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expire, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Minttl, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringTxt(rr.Txt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Priority, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Weight, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Port, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.Target, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Type, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.FingerPrint, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.KeyTag, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Algorithm, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.DigestType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Digest, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.PreviousName, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packDomainName(rr.NextName, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Algorithm, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Inception, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint32(rr.Expiration, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Mode, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Error, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.KeySize, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Key, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.OtherLen, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.OtherData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint8(rr.Usage, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.Selector, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint8(rr.MatchingType, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.Certificate, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packDomainName(rr.Algorithm, msg, off, compression, false)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint48(rr.TimeSigned, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Fudge, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.MACSize, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.MAC, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.OrigId, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Error, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.OtherLen, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringHex(rr.OtherData, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packStringTxt(rr.Txt, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint32(rr.Uid, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packString(rr.Uinfo, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packUint16(rr.Priority, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packUint16(rr.Weight, msg, off)
+ if err != nil {
+ return off, err
+ }
+ off, err = packStringOctet(rr.Target, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
+ off, err = packString(rr.PSDNAddress, msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+// unpack*() functions
+
+func (rr *A) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.A, off, err = unpackDataA(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *AAAA) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.AAAA, off, err = unpackDataAAAA(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Subtype, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Hostname, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ return off, nil
+}
+
+func (rr *APL) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Prefixes, off, err = unpackDataApl(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *AVC) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Txt, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CAA) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Flag, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Tag, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Value, off, err = unpackStringOctet(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CDNSKEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CDS) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CERT) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Type, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CNAME) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Target, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *CSYNC) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Serial, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DHCID) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DLV) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DNAME) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Target, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DNSKEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *DS) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *EID) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *EUI48) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Address, off, err = unpackUint48(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *EUI64) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Address, off, err = unpackUint64(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *GID) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Gid, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *GPOS) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Longitude, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Latitude, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Altitude, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *HINFO) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Cpu, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Os, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *HIP) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.HitLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKeyLength, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength))
+ if err != nil {
+ return off, err
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength))
+ if err != nil {
+ return off, err
+ }
+ rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *KX) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Exchanger, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *L32) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Locator32, off, err = unpackDataA(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *L64) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Locator64, off, err = unpackUint64(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *LOC) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Version, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Size, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.HorizPre, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.VertPre, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Latitude, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Longitude, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Altitude, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *LP) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Fqdn, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MB) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Mb, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MD) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Md, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MF) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Mf, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MG) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Mg, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MINFO) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Rmail, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Email, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MR) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Mr, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *MX) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Mx, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NAPTR) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Order, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Flags, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Service, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Regexp, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Replacement, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NID) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.NodeID, off, err = unpackUint64(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NIMLOC) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NINFO) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.ZSData, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NS) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Ns, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSAPPTR) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Ptr, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSEC) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.NextDomain, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSEC3) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Hash, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Flags, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Iterations, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.SaltLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength))
+ if err != nil {
+ return off, err
+ }
+ rr.HashLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength))
+ if err != nil {
+ return off, err
+ }
+ rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NSEC3PARAM) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Hash, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Flags, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Iterations, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.SaltLength, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Data, off, err = unpackStringAny(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *OPT) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Option, off, err = unpackDataOpt(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *PTR) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Ptr, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *PX) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Map822, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Mapx400, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RFC3597) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RKEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Flags, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Protocol, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RP) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Mbox, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Txt, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RRSIG) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.TypeCovered, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Labels, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.OrigTtl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Expiration, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Inception, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.SignerName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *RT) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Preference, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Host, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SIG) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.TypeCovered, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Labels, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.OrigTtl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Expiration, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Inception, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.SignerName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SMIMEA) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Usage, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Selector, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.MatchingType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SOA) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Ns, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Mbox, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Serial, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Refresh, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Retry, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Expire, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Minttl, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SPF) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Txt, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SRV) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Priority, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Weight, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Port, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Target, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *SSHFP) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Type, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TA) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.KeyTag, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Algorithm, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.DigestType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TALINK) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.PreviousName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.NextName, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TKEY) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Algorithm, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Inception, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Expiration, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Mode, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Error, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.KeySize, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize))
+ if err != nil {
+ return off, err
+ }
+ rr.OtherLen, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TLSA) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Usage, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Selector, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.MatchingType, off, err = unpackUint8(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TSIG) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Algorithm, off, err = UnpackDomainName(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.TimeSigned, off, err = unpackUint48(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Fudge, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.MACSize, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize))
+ if err != nil {
+ return off, err
+ }
+ rr.OrigId, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Error, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.OtherLen, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen))
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *TXT) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Txt, off, err = unpackStringTxt(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *UID) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Uid, off, err = unpackUint32(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *UINFO) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Uinfo, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *URI) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.Priority, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Weight, off, err = unpackUint16(msg, off)
+ if err != nil {
+ return off, err
+ }
+ if off == len(msg) {
+ return off, nil
+ }
+ rr.Target, off, err = unpackStringOctet(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
+
+func (rr *X25) unpack(msg []byte, off int) (off1 int, err error) {
+ rdStart := off
+ _ = rdStart
+
+ rr.PSDNAddress, off, err = unpackString(msg, off)
+ if err != nil {
+ return off, err
+ }
+ return off, nil
+}
diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go
new file mode 100644
index 0000000..1cbd6d3
--- /dev/null
+++ b/vendor/github.com/miekg/dns/ztypes.go
@@ -0,0 +1,898 @@
+// Code generated by "go run types_generate.go"; DO NOT EDIT.
+
+package dns
+
+import (
+ "encoding/base64"
+ "net"
+)
+
+// TypeToRR is a map of constructors for each RR type.
+var TypeToRR = map[uint16]func() RR{
+ TypeA: func() RR { return new(A) },
+ TypeAAAA: func() RR { return new(AAAA) },
+ TypeAFSDB: func() RR { return new(AFSDB) },
+ TypeANY: func() RR { return new(ANY) },
+ TypeAPL: func() RR { return new(APL) },
+ TypeAVC: func() RR { return new(AVC) },
+ TypeCAA: func() RR { return new(CAA) },
+ TypeCDNSKEY: func() RR { return new(CDNSKEY) },
+ TypeCDS: func() RR { return new(CDS) },
+ TypeCERT: func() RR { return new(CERT) },
+ TypeCNAME: func() RR { return new(CNAME) },
+ TypeCSYNC: func() RR { return new(CSYNC) },
+ TypeDHCID: func() RR { return new(DHCID) },
+ TypeDLV: func() RR { return new(DLV) },
+ TypeDNAME: func() RR { return new(DNAME) },
+ TypeDNSKEY: func() RR { return new(DNSKEY) },
+ TypeDS: func() RR { return new(DS) },
+ TypeEID: func() RR { return new(EID) },
+ TypeEUI48: func() RR { return new(EUI48) },
+ TypeEUI64: func() RR { return new(EUI64) },
+ TypeGID: func() RR { return new(GID) },
+ TypeGPOS: func() RR { return new(GPOS) },
+ TypeHINFO: func() RR { return new(HINFO) },
+ TypeHIP: func() RR { return new(HIP) },
+ TypeKEY: func() RR { return new(KEY) },
+ TypeKX: func() RR { return new(KX) },
+ TypeL32: func() RR { return new(L32) },
+ TypeL64: func() RR { return new(L64) },
+ TypeLOC: func() RR { return new(LOC) },
+ TypeLP: func() RR { return new(LP) },
+ TypeMB: func() RR { return new(MB) },
+ TypeMD: func() RR { return new(MD) },
+ TypeMF: func() RR { return new(MF) },
+ TypeMG: func() RR { return new(MG) },
+ TypeMINFO: func() RR { return new(MINFO) },
+ TypeMR: func() RR { return new(MR) },
+ TypeMX: func() RR { return new(MX) },
+ TypeNAPTR: func() RR { return new(NAPTR) },
+ TypeNID: func() RR { return new(NID) },
+ TypeNIMLOC: func() RR { return new(NIMLOC) },
+ TypeNINFO: func() RR { return new(NINFO) },
+ TypeNS: func() RR { return new(NS) },
+ TypeNSAPPTR: func() RR { return new(NSAPPTR) },
+ TypeNSEC: func() RR { return new(NSEC) },
+ TypeNSEC3: func() RR { return new(NSEC3) },
+ TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
+ TypeNULL: func() RR { return new(NULL) },
+ TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
+ TypeOPT: func() RR { return new(OPT) },
+ TypePTR: func() RR { return new(PTR) },
+ TypePX: func() RR { return new(PX) },
+ TypeRKEY: func() RR { return new(RKEY) },
+ TypeRP: func() RR { return new(RP) },
+ TypeRRSIG: func() RR { return new(RRSIG) },
+ TypeRT: func() RR { return new(RT) },
+ TypeSIG: func() RR { return new(SIG) },
+ TypeSMIMEA: func() RR { return new(SMIMEA) },
+ TypeSOA: func() RR { return new(SOA) },
+ TypeSPF: func() RR { return new(SPF) },
+ TypeSRV: func() RR { return new(SRV) },
+ TypeSSHFP: func() RR { return new(SSHFP) },
+ TypeTA: func() RR { return new(TA) },
+ TypeTALINK: func() RR { return new(TALINK) },
+ TypeTKEY: func() RR { return new(TKEY) },
+ TypeTLSA: func() RR { return new(TLSA) },
+ TypeTSIG: func() RR { return new(TSIG) },
+ TypeTXT: func() RR { return new(TXT) },
+ TypeUID: func() RR { return new(UID) },
+ TypeUINFO: func() RR { return new(UINFO) },
+ TypeURI: func() RR { return new(URI) },
+ TypeX25: func() RR { return new(X25) },
+}
+
+// TypeToString is a map of strings for each RR type.
+var TypeToString = map[uint16]string{
+ TypeA: "A",
+ TypeAAAA: "AAAA",
+ TypeAFSDB: "AFSDB",
+ TypeANY: "ANY",
+ TypeAPL: "APL",
+ TypeATMA: "ATMA",
+ TypeAVC: "AVC",
+ TypeAXFR: "AXFR",
+ TypeCAA: "CAA",
+ TypeCDNSKEY: "CDNSKEY",
+ TypeCDS: "CDS",
+ TypeCERT: "CERT",
+ TypeCNAME: "CNAME",
+ TypeCSYNC: "CSYNC",
+ TypeDHCID: "DHCID",
+ TypeDLV: "DLV",
+ TypeDNAME: "DNAME",
+ TypeDNSKEY: "DNSKEY",
+ TypeDS: "DS",
+ TypeEID: "EID",
+ TypeEUI48: "EUI48",
+ TypeEUI64: "EUI64",
+ TypeGID: "GID",
+ TypeGPOS: "GPOS",
+ TypeHINFO: "HINFO",
+ TypeHIP: "HIP",
+ TypeISDN: "ISDN",
+ TypeIXFR: "IXFR",
+ TypeKEY: "KEY",
+ TypeKX: "KX",
+ TypeL32: "L32",
+ TypeL64: "L64",
+ TypeLOC: "LOC",
+ TypeLP: "LP",
+ TypeMAILA: "MAILA",
+ TypeMAILB: "MAILB",
+ TypeMB: "MB",
+ TypeMD: "MD",
+ TypeMF: "MF",
+ TypeMG: "MG",
+ TypeMINFO: "MINFO",
+ TypeMR: "MR",
+ TypeMX: "MX",
+ TypeNAPTR: "NAPTR",
+ TypeNID: "NID",
+ TypeNIMLOC: "NIMLOC",
+ TypeNINFO: "NINFO",
+ TypeNS: "NS",
+ TypeNSEC: "NSEC",
+ TypeNSEC3: "NSEC3",
+ TypeNSEC3PARAM: "NSEC3PARAM",
+ TypeNULL: "NULL",
+ TypeNXT: "NXT",
+ TypeNone: "None",
+ TypeOPENPGPKEY: "OPENPGPKEY",
+ TypeOPT: "OPT",
+ TypePTR: "PTR",
+ TypePX: "PX",
+ TypeRKEY: "RKEY",
+ TypeRP: "RP",
+ TypeRRSIG: "RRSIG",
+ TypeRT: "RT",
+ TypeReserved: "Reserved",
+ TypeSIG: "SIG",
+ TypeSMIMEA: "SMIMEA",
+ TypeSOA: "SOA",
+ TypeSPF: "SPF",
+ TypeSRV: "SRV",
+ TypeSSHFP: "SSHFP",
+ TypeTA: "TA",
+ TypeTALINK: "TALINK",
+ TypeTKEY: "TKEY",
+ TypeTLSA: "TLSA",
+ TypeTSIG: "TSIG",
+ TypeTXT: "TXT",
+ TypeUID: "UID",
+ TypeUINFO: "UINFO",
+ TypeUNSPEC: "UNSPEC",
+ TypeURI: "URI",
+ TypeX25: "X25",
+ TypeNSAPPTR: "NSAP-PTR",
+}
+
+func (rr *A) Header() *RR_Header { return &rr.Hdr }
+func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
+func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
+func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
+func (rr *APL) Header() *RR_Header { return &rr.Hdr }
+func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
+func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
+func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
+func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
+func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
+func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr }
+func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
+func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
+func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
+func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *DS) Header() *RR_Header { return &rr.Hdr }
+func (rr *EID) Header() *RR_Header { return &rr.Hdr }
+func (rr *EUI48) Header() *RR_Header { return &rr.Hdr }
+func (rr *EUI64) Header() *RR_Header { return &rr.Hdr }
+func (rr *GID) Header() *RR_Header { return &rr.Hdr }
+func (rr *GPOS) Header() *RR_Header { return &rr.Hdr }
+func (rr *HINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *HIP) Header() *RR_Header { return &rr.Hdr }
+func (rr *KEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *KX) Header() *RR_Header { return &rr.Hdr }
+func (rr *L32) Header() *RR_Header { return &rr.Hdr }
+func (rr *L64) Header() *RR_Header { return &rr.Hdr }
+func (rr *LOC) Header() *RR_Header { return &rr.Hdr }
+func (rr *LP) Header() *RR_Header { return &rr.Hdr }
+func (rr *MB) Header() *RR_Header { return &rr.Hdr }
+func (rr *MD) Header() *RR_Header { return &rr.Hdr }
+func (rr *MF) Header() *RR_Header { return &rr.Hdr }
+func (rr *MG) Header() *RR_Header { return &rr.Hdr }
+func (rr *MINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *MR) Header() *RR_Header { return &rr.Hdr }
+func (rr *MX) Header() *RR_Header { return &rr.Hdr }
+func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *NID) Header() *RR_Header { return &rr.Hdr }
+func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr }
+func (rr *NINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *NS) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
+func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
+func (rr *NULL) Header() *RR_Header { return &rr.Hdr }
+func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
+func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
+func (rr *PX) Header() *RR_Header { return &rr.Hdr }
+func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr }
+func (rr *RKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *RP) Header() *RR_Header { return &rr.Hdr }
+func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *RT) Header() *RR_Header { return &rr.Hdr }
+func (rr *SIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr }
+func (rr *SOA) Header() *RR_Header { return &rr.Hdr }
+func (rr *SPF) Header() *RR_Header { return &rr.Hdr }
+func (rr *SRV) Header() *RR_Header { return &rr.Hdr }
+func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr }
+func (rr *TA) Header() *RR_Header { return &rr.Hdr }
+func (rr *TALINK) Header() *RR_Header { return &rr.Hdr }
+func (rr *TKEY) Header() *RR_Header { return &rr.Hdr }
+func (rr *TLSA) Header() *RR_Header { return &rr.Hdr }
+func (rr *TSIG) Header() *RR_Header { return &rr.Hdr }
+func (rr *TXT) Header() *RR_Header { return &rr.Hdr }
+func (rr *UID) Header() *RR_Header { return &rr.Hdr }
+func (rr *UINFO) Header() *RR_Header { return &rr.Hdr }
+func (rr *URI) Header() *RR_Header { return &rr.Hdr }
+func (rr *X25) Header() *RR_Header { return &rr.Hdr }
+
+// len() functions
+func (rr *A) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ if len(rr.A) != 0 {
+ l += net.IPv4len
+ }
+ return l
+}
+func (rr *AAAA) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ if len(rr.AAAA) != 0 {
+ l += net.IPv6len
+ }
+ return l
+}
+func (rr *AFSDB) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Subtype
+ l += domainNameLen(rr.Hostname, off+l, compression, false)
+ return l
+}
+func (rr *ANY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ return l
+}
+func (rr *APL) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, x := range rr.Prefixes {
+ l += x.len()
+ }
+ return l
+}
+func (rr *AVC) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, x := range rr.Txt {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *CAA) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Flag
+ l += len(rr.Tag) + 1
+ l += len(rr.Value)
+ return l
+}
+func (rr *CERT) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Type
+ l += 2 // KeyTag
+ l++ // Algorithm
+ l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
+ return l
+}
+func (rr *CNAME) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Target, off+l, compression, true)
+ return l
+}
+func (rr *DHCID) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += base64.StdEncoding.DecodedLen(len(rr.Digest))
+ return l
+}
+func (rr *DNAME) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Target, off+l, compression, false)
+ return l
+}
+func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Flags
+ l++ // Protocol
+ l++ // Algorithm
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+func (rr *DS) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // KeyTag
+ l++ // Algorithm
+ l++ // DigestType
+ l += len(rr.Digest) / 2
+ return l
+}
+func (rr *EID) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Endpoint) / 2
+ return l
+}
+func (rr *EUI48) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 6 // Address
+ return l
+}
+func (rr *EUI64) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 8 // Address
+ return l
+}
+func (rr *GID) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 4 // Gid
+ return l
+}
+func (rr *GPOS) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Longitude) + 1
+ l += len(rr.Latitude) + 1
+ l += len(rr.Altitude) + 1
+ return l
+}
+func (rr *HINFO) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Cpu) + 1
+ l += len(rr.Os) + 1
+ return l
+}
+func (rr *HIP) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // HitLength
+ l++ // PublicKeyAlgorithm
+ l += 2 // PublicKeyLength
+ l += len(rr.Hit) / 2
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ for _, x := range rr.RendezvousServers {
+ l += domainNameLen(x, off+l, compression, false)
+ }
+ return l
+}
+func (rr *KX) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += domainNameLen(rr.Exchanger, off+l, compression, false)
+ return l
+}
+func (rr *L32) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ if len(rr.Locator32) != 0 {
+ l += net.IPv4len
+ }
+ return l
+}
+func (rr *L64) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += 8 // Locator64
+ return l
+}
+func (rr *LOC) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Version
+ l++ // Size
+ l++ // HorizPre
+ l++ // VertPre
+ l += 4 // Latitude
+ l += 4 // Longitude
+ l += 4 // Altitude
+ return l
+}
+func (rr *LP) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += domainNameLen(rr.Fqdn, off+l, compression, false)
+ return l
+}
+func (rr *MB) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Mb, off+l, compression, true)
+ return l
+}
+func (rr *MD) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Md, off+l, compression, true)
+ return l
+}
+func (rr *MF) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Mf, off+l, compression, true)
+ return l
+}
+func (rr *MG) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Mg, off+l, compression, true)
+ return l
+}
+func (rr *MINFO) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Rmail, off+l, compression, true)
+ l += domainNameLen(rr.Email, off+l, compression, true)
+ return l
+}
+func (rr *MR) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Mr, off+l, compression, true)
+ return l
+}
+func (rr *MX) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += domainNameLen(rr.Mx, off+l, compression, true)
+ return l
+}
+func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Order
+ l += 2 // Preference
+ l += len(rr.Flags) + 1
+ l += len(rr.Service) + 1
+ l += len(rr.Regexp) + 1
+ l += domainNameLen(rr.Replacement, off+l, compression, false)
+ return l
+}
+func (rr *NID) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += 8 // NodeID
+ return l
+}
+func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Locator) / 2
+ return l
+}
+func (rr *NINFO) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, x := range rr.ZSData {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *NS) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Ns, off+l, compression, true)
+ return l
+}
+func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Ptr, off+l, compression, false)
+ return l
+}
+func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Hash
+ l++ // Flags
+ l += 2 // Iterations
+ l++ // SaltLength
+ l += len(rr.Salt) / 2
+ return l
+}
+func (rr *NULL) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Data)
+ return l
+}
+func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+func (rr *PTR) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Ptr, off+l, compression, true)
+ return l
+}
+func (rr *PX) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += domainNameLen(rr.Map822, off+l, compression, false)
+ l += domainNameLen(rr.Mapx400, off+l, compression, false)
+ return l
+}
+func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Rdata) / 2
+ return l
+}
+func (rr *RKEY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Flags
+ l++ // Protocol
+ l++ // Algorithm
+ l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
+ return l
+}
+func (rr *RP) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Mbox, off+l, compression, false)
+ l += domainNameLen(rr.Txt, off+l, compression, false)
+ return l
+}
+func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // TypeCovered
+ l++ // Algorithm
+ l++ // Labels
+ l += 4 // OrigTtl
+ l += 4 // Expiration
+ l += 4 // Inception
+ l += 2 // KeyTag
+ l += domainNameLen(rr.SignerName, off+l, compression, false)
+ l += base64.StdEncoding.DecodedLen(len(rr.Signature))
+ return l
+}
+func (rr *RT) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Preference
+ l += domainNameLen(rr.Host, off+l, compression, false)
+ return l
+}
+func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Usage
+ l++ // Selector
+ l++ // MatchingType
+ l += len(rr.Certificate) / 2
+ return l
+}
+func (rr *SOA) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Ns, off+l, compression, true)
+ l += domainNameLen(rr.Mbox, off+l, compression, true)
+ l += 4 // Serial
+ l += 4 // Refresh
+ l += 4 // Retry
+ l += 4 // Expire
+ l += 4 // Minttl
+ return l
+}
+func (rr *SPF) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, x := range rr.Txt {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *SRV) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Priority
+ l += 2 // Weight
+ l += 2 // Port
+ l += domainNameLen(rr.Target, off+l, compression, false)
+ return l
+}
+func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Algorithm
+ l++ // Type
+ l += len(rr.FingerPrint) / 2
+ return l
+}
+func (rr *TA) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // KeyTag
+ l++ // Algorithm
+ l++ // DigestType
+ l += len(rr.Digest) / 2
+ return l
+}
+func (rr *TALINK) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.PreviousName, off+l, compression, false)
+ l += domainNameLen(rr.NextName, off+l, compression, false)
+ return l
+}
+func (rr *TKEY) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Algorithm, off+l, compression, false)
+ l += 4 // Inception
+ l += 4 // Expiration
+ l += 2 // Mode
+ l += 2 // Error
+ l += 2 // KeySize
+ l += len(rr.Key) / 2
+ l += 2 // OtherLen
+ l += len(rr.OtherData) / 2
+ return l
+}
+func (rr *TLSA) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l++ // Usage
+ l++ // Selector
+ l++ // MatchingType
+ l += len(rr.Certificate) / 2
+ return l
+}
+func (rr *TSIG) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += domainNameLen(rr.Algorithm, off+l, compression, false)
+ l += 6 // TimeSigned
+ l += 2 // Fudge
+ l += 2 // MACSize
+ l += len(rr.MAC) / 2
+ l += 2 // OrigId
+ l += 2 // Error
+ l += 2 // OtherLen
+ l += len(rr.OtherData) / 2
+ return l
+}
+func (rr *TXT) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ for _, x := range rr.Txt {
+ l += len(x) + 1
+ }
+ return l
+}
+func (rr *UID) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 4 // Uid
+ return l
+}
+func (rr *UINFO) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.Uinfo) + 1
+ return l
+}
+func (rr *URI) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += 2 // Priority
+ l += 2 // Weight
+ l += len(rr.Target)
+ return l
+}
+func (rr *X25) len(off int, compression map[string]struct{}) int {
+ l := rr.Hdr.len(off, compression)
+ l += len(rr.PSDNAddress) + 1
+ return l
+}
+
+// copy() functions
+func (rr *A) copy() RR {
+ return &A{rr.Hdr, copyIP(rr.A)}
+}
+func (rr *AAAA) copy() RR {
+ return &AAAA{rr.Hdr, copyIP(rr.AAAA)}
+}
+func (rr *AFSDB) copy() RR {
+ return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname}
+}
+func (rr *ANY) copy() RR {
+ return &ANY{rr.Hdr}
+}
+func (rr *APL) copy() RR {
+ Prefixes := make([]APLPrefix, len(rr.Prefixes))
+ for i := range rr.Prefixes {
+ Prefixes[i] = rr.Prefixes[i].copy()
+ }
+ return &APL{rr.Hdr, Prefixes}
+}
+func (rr *AVC) copy() RR {
+ Txt := make([]string, len(rr.Txt))
+ copy(Txt, rr.Txt)
+ return &AVC{rr.Hdr, Txt}
+}
+func (rr *CAA) copy() RR {
+ return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value}
+}
+func (rr *CERT) copy() RR {
+ return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate}
+}
+func (rr *CNAME) copy() RR {
+ return &CNAME{rr.Hdr, rr.Target}
+}
+func (rr *CSYNC) copy() RR {
+ TypeBitMap := make([]uint16, len(rr.TypeBitMap))
+ copy(TypeBitMap, rr.TypeBitMap)
+ return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap}
+}
+func (rr *DHCID) copy() RR {
+ return &DHCID{rr.Hdr, rr.Digest}
+}
+func (rr *DNAME) copy() RR {
+ return &DNAME{rr.Hdr, rr.Target}
+}
+func (rr *DNSKEY) copy() RR {
+ return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+}
+func (rr *DS) copy() RR {
+ return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+}
+func (rr *EID) copy() RR {
+ return &EID{rr.Hdr, rr.Endpoint}
+}
+func (rr *EUI48) copy() RR {
+ return &EUI48{rr.Hdr, rr.Address}
+}
+func (rr *EUI64) copy() RR {
+ return &EUI64{rr.Hdr, rr.Address}
+}
+func (rr *GID) copy() RR {
+ return &GID{rr.Hdr, rr.Gid}
+}
+func (rr *GPOS) copy() RR {
+ return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude}
+}
+func (rr *HINFO) copy() RR {
+ return &HINFO{rr.Hdr, rr.Cpu, rr.Os}
+}
+func (rr *HIP) copy() RR {
+ RendezvousServers := make([]string, len(rr.RendezvousServers))
+ copy(RendezvousServers, rr.RendezvousServers)
+ return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers}
+}
+func (rr *KX) copy() RR {
+ return &KX{rr.Hdr, rr.Preference, rr.Exchanger}
+}
+func (rr *L32) copy() RR {
+ return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)}
+}
+func (rr *L64) copy() RR {
+ return &L64{rr.Hdr, rr.Preference, rr.Locator64}
+}
+func (rr *LOC) copy() RR {
+ return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude}
+}
+func (rr *LP) copy() RR {
+ return &LP{rr.Hdr, rr.Preference, rr.Fqdn}
+}
+func (rr *MB) copy() RR {
+ return &MB{rr.Hdr, rr.Mb}
+}
+func (rr *MD) copy() RR {
+ return &MD{rr.Hdr, rr.Md}
+}
+func (rr *MF) copy() RR {
+ return &MF{rr.Hdr, rr.Mf}
+}
+func (rr *MG) copy() RR {
+ return &MG{rr.Hdr, rr.Mg}
+}
+func (rr *MINFO) copy() RR {
+ return &MINFO{rr.Hdr, rr.Rmail, rr.Email}
+}
+func (rr *MR) copy() RR {
+ return &MR{rr.Hdr, rr.Mr}
+}
+func (rr *MX) copy() RR {
+ return &MX{rr.Hdr, rr.Preference, rr.Mx}
+}
+func (rr *NAPTR) copy() RR {
+ return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement}
+}
+func (rr *NID) copy() RR {
+ return &NID{rr.Hdr, rr.Preference, rr.NodeID}
+}
+func (rr *NIMLOC) copy() RR {
+ return &NIMLOC{rr.Hdr, rr.Locator}
+}
+func (rr *NINFO) copy() RR {
+ ZSData := make([]string, len(rr.ZSData))
+ copy(ZSData, rr.ZSData)
+ return &NINFO{rr.Hdr, ZSData}
+}
+func (rr *NS) copy() RR {
+ return &NS{rr.Hdr, rr.Ns}
+}
+func (rr *NSAPPTR) copy() RR {
+ return &NSAPPTR{rr.Hdr, rr.Ptr}
+}
+func (rr *NSEC) copy() RR {
+ TypeBitMap := make([]uint16, len(rr.TypeBitMap))
+ copy(TypeBitMap, rr.TypeBitMap)
+ return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap}
+}
+func (rr *NSEC3) copy() RR {
+ TypeBitMap := make([]uint16, len(rr.TypeBitMap))
+ copy(TypeBitMap, rr.TypeBitMap)
+ return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap}
+}
+func (rr *NSEC3PARAM) copy() RR {
+ return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
+}
+func (rr *NULL) copy() RR {
+ return &NULL{rr.Hdr, rr.Data}
+}
+func (rr *OPENPGPKEY) copy() RR {
+ return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
+}
+func (rr *OPT) copy() RR {
+ Option := make([]EDNS0, len(rr.Option))
+ for i, e := range rr.Option {
+ Option[i] = e.copy()
+ }
+ return &OPT{rr.Hdr, Option}
+}
+func (rr *PTR) copy() RR {
+ return &PTR{rr.Hdr, rr.Ptr}
+}
+func (rr *PX) copy() RR {
+ return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400}
+}
+func (rr *RFC3597) copy() RR {
+ return &RFC3597{rr.Hdr, rr.Rdata}
+}
+func (rr *RKEY) copy() RR {
+ return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey}
+}
+func (rr *RP) copy() RR {
+ return &RP{rr.Hdr, rr.Mbox, rr.Txt}
+}
+func (rr *RRSIG) copy() RR {
+ return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature}
+}
+func (rr *RT) copy() RR {
+ return &RT{rr.Hdr, rr.Preference, rr.Host}
+}
+func (rr *SMIMEA) copy() RR {
+ return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
+}
+func (rr *SOA) copy() RR {
+ return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl}
+}
+func (rr *SPF) copy() RR {
+ Txt := make([]string, len(rr.Txt))
+ copy(Txt, rr.Txt)
+ return &SPF{rr.Hdr, Txt}
+}
+func (rr *SRV) copy() RR {
+ return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target}
+}
+func (rr *SSHFP) copy() RR {
+ return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint}
+}
+func (rr *TA) copy() RR {
+ return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest}
+}
+func (rr *TALINK) copy() RR {
+ return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName}
+}
+func (rr *TKEY) copy() RR {
+ return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData}
+}
+func (rr *TLSA) copy() RR {
+ return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate}
+}
+func (rr *TSIG) copy() RR {
+ return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData}
+}
+func (rr *TXT) copy() RR {
+ Txt := make([]string, len(rr.Txt))
+ copy(Txt, rr.Txt)
+ return &TXT{rr.Hdr, Txt}
+}
+func (rr *UID) copy() RR {
+ return &UID{rr.Hdr, rr.Uid}
+}
+func (rr *UINFO) copy() RR {
+ return &UINFO{rr.Hdr, rr.Uinfo}
+}
+func (rr *URI) copy() RR {
+ return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target}
+}
+func (rr *X25) copy() RR {
+ return &X25{rr.Hdr, rr.PSDNAddress}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE
new file mode 100644
index 0000000..4a772d1
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2012 Suryandaru Triandana
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
new file mode 100644
index 0000000..2259200
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrBatchCorrupted records reason of batch corruption. This error will be
+// wrapped with errors.ErrCorrupted.
+type ErrBatchCorrupted struct {
+ Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
+}
+
+const (
+ batchHeaderLen = 8 + 4
+ batchGrowRec = 3000
+ batchBufioSize = 16
+)
+
+// BatchReplay wraps basic batch operations.
+type BatchReplay interface {
+ Put(key, value []byte)
+ Delete(key []byte)
+}
+
+type batchIndex struct {
+ keyType keyType
+ keyPos, keyLen int
+ valuePos, valueLen int
+}
+
+func (index batchIndex) k(data []byte) []byte {
+ return data[index.keyPos : index.keyPos+index.keyLen]
+}
+
+func (index batchIndex) v(data []byte) []byte {
+ if index.valueLen != 0 {
+ return data[index.valuePos : index.valuePos+index.valueLen]
+ }
+ return nil
+}
+
+func (index batchIndex) kv(data []byte) (key, value []byte) {
+ return index.k(data), index.v(data)
+}
+
+// Batch is a write batch.
+type Batch struct {
+ data []byte
+ index []batchIndex
+
+ // internalLen is sums of key/value pair length plus 8-bytes internal key.
+ internalLen int
+}
+
+func (b *Batch) grow(n int) {
+ o := len(b.data)
+ if cap(b.data)-o < n {
+ div := 1
+ if len(b.index) > batchGrowRec {
+ div = len(b.index) / batchGrowRec
+ }
+ ndata := make([]byte, o, o+n+o/div)
+ copy(ndata, b.data)
+ b.data = ndata
+ }
+}
+
+func (b *Batch) appendRec(kt keyType, key, value []byte) {
+ n := 1 + binary.MaxVarintLen32 + len(key)
+ if kt == keyTypeVal {
+ n += binary.MaxVarintLen32 + len(value)
+ }
+ b.grow(n)
+ index := batchIndex{keyType: kt}
+ o := len(b.data)
+ data := b.data[:o+n]
+ data[o] = byte(kt)
+ o++
+ o += binary.PutUvarint(data[o:], uint64(len(key)))
+ index.keyPos = o
+ index.keyLen = len(key)
+ o += copy(data[o:], key)
+ if kt == keyTypeVal {
+ o += binary.PutUvarint(data[o:], uint64(len(value)))
+ index.valuePos = o
+ index.valueLen = len(value)
+ o += copy(data[o:], value)
+ }
+ b.data = data[:o]
+ b.index = append(b.index, index)
+ b.internalLen += index.keyLen + index.valueLen + 8
+}
+
+// Put appends 'put operation' of the given key/value pair to the batch.
+// It is safe to modify the contents of the argument after Put returns but not
+// before.
+func (b *Batch) Put(key, value []byte) {
+ b.appendRec(keyTypeVal, key, value)
+}
+
+// Delete appends 'delete operation' of the given key to the batch.
+// It is safe to modify the contents of the argument after Delete returns but
+// not before.
+func (b *Batch) Delete(key []byte) {
+ b.appendRec(keyTypeDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+ return b.data
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+ return b.decode(data, -1)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+ for _, index := range b.index {
+ switch index.keyType {
+ case keyTypeVal:
+ r.Put(index.k(b.data), index.v(b.data))
+ case keyTypeDel:
+ r.Delete(index.k(b.data))
+ }
+ }
+ return nil
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+ return len(b.index)
+}
+
+// Reset resets the batch.
+func (b *Batch) Reset() {
+ b.data = b.data[:0]
+ b.index = b.index[:0]
+ b.internalLen = 0
+}
+
+func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
+ for i, index := range b.index {
+ if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Batch) append(p *Batch) {
+ ob := len(b.data)
+ oi := len(b.index)
+ b.data = append(b.data, p.data...)
+ b.index = append(b.index, p.index...)
+ b.internalLen += p.internalLen
+
+ // Updating index offset.
+ if ob != 0 {
+ for ; oi < len(b.index); oi++ {
+ index := &b.index[oi]
+ index.keyPos += ob
+ if index.valueLen != 0 {
+ index.valuePos += ob
+ }
+ }
+ }
+}
+
+func (b *Batch) decode(data []byte, expectedLen int) error {
+ b.data = data
+ b.index = b.index[:0]
+ b.internalLen = 0
+ err := decodeBatch(data, func(i int, index batchIndex) error {
+ b.index = append(b.index, index)
+ b.internalLen += index.keyLen + index.valueLen + 8
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if expectedLen >= 0 && len(b.index) != expectedLen {
+ return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
+ }
+ return nil
+}
+
+func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
+ var ik []byte
+ for i, index := range b.index {
+ ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
+ if err := mdb.Put(ik, index.v(b.data)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
+ var ik []byte
+ for i, index := range b.index {
+ ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
+ if err := mdb.Delete(ik); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newBatch() interface{} {
+ return &Batch{}
+}
+
+func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
+ var index batchIndex
+ for i, o := 0, 0; o < len(data); i++ {
+ // Key type.
+ index.keyType = keyType(data[o])
+ if index.keyType > keyTypeVal {
+ return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
+ }
+ o++
+
+ // Key.
+ x, n := binary.Uvarint(data[o:])
+ o += n
+ if n <= 0 || o+int(x) > len(data) {
+ return newErrBatchCorrupted("bad record: invalid key length")
+ }
+ index.keyPos = o
+ index.keyLen = int(x)
+ o += index.keyLen
+
+ // Value.
+ if index.keyType == keyTypeVal {
+ x, n = binary.Uvarint(data[o:])
+ o += n
+ if n <= 0 || o+int(x) > len(data) {
+ return newErrBatchCorrupted("bad record: invalid value length")
+ }
+ index.valuePos = o
+ index.valueLen = int(x)
+ o += index.valueLen
+ } else {
+ index.valuePos = 0
+ index.valueLen = 0
+ }
+
+ if err := fn(i, index); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
+ seq, batchLen, err = decodeBatchHeader(data)
+ if err != nil {
+ return 0, 0, err
+ }
+ if seq < expectSeq {
+ return 0, 0, newErrBatchCorrupted("invalid sequence number")
+ }
+ data = data[batchHeaderLen:]
+ var ik []byte
+ var decodedLen int
+ err = decodeBatch(data, func(i int, index batchIndex) error {
+ if i >= batchLen {
+ return newErrBatchCorrupted("invalid records length")
+ }
+ ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
+ if err := mdb.Put(ik, index.v(data)); err != nil {
+ return err
+ }
+ decodedLen++
+ return nil
+ })
+ if err == nil && decodedLen != batchLen {
+ err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
+ }
+ return
+}
+
+func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
+ dst = ensureBuffer(dst, batchHeaderLen)
+ binary.LittleEndian.PutUint64(dst, seq)
+ binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
+ return dst
+}
+
+func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
+ if len(data) < batchHeaderLen {
+ return 0, 0, newErrBatchCorrupted("too short")
+ }
+
+ seq = binary.LittleEndian.Uint64(data)
+ batchLen = int(binary.LittleEndian.Uint32(data[8:]))
+ if batchLen < 0 {
+ return 0, 0, newErrBatchCorrupted("invalid records length")
+ }
+ return
+}
+
+func batchesLen(batches []*Batch) int {
+ batchLen := 0
+ for _, batch := range batches {
+ batchLen += batch.Len()
+ }
+ return batchLen
+}
+
+func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
+ if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
+ return err
+ }
+ for _, batch := range batches {
+ if _, err := wr.Write(batch.data); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
new file mode 100644
index 0000000..c36ad32
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -0,0 +1,704 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package cache provides interface and implementation of a cache algorithms.
+package cache
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be safe for concurrent use.
+type Cacher interface {
+ // Capacity returns cache capacity.
+ Capacity() int
+
+ // SetCapacity sets cache capacity.
+ SetCapacity(capacity int)
+
+ // Promote promotes the 'cache node'.
+ Promote(n *Node)
+
+ // Ban evicts the 'cache node' and prevent subsequent 'promote'.
+ Ban(n *Node)
+
+ // Evict evicts the 'cache node'.
+ Evict(n *Node)
+
+ // EvictNS evicts 'cache node' with the given namespace.
+ EvictNS(ns uint64)
+
+ // EvictAll evicts all 'cache node'.
+ EvictAll()
+
+ // Close closes the 'cache tree'
+ Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+// NamespaceGetter provides convenient wrapper for namespace.
+type NamespaceGetter struct {
+ Cache *Cache
+ NS uint64
+}
+
+// Get simply calls Cache.Get() method.
+func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+ return g.Cache.Get(g.NS, key, setFunc)
+}
+
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
+// Kunlong Zhang, and Michael Spear.
+// ACM Symposium on Principles of Distributed Computing, Jul 2014.
+
+const (
+ mInitialSize = 1 << 4
+ mOverflowThreshold = 1 << 5
+ mOverflowGrowThreshold = 1 << 7
+)
+
+type mBucket struct {
+ mu sync.Mutex
+ node []*Node
+ frozen bool
+}
+
+func (b *mBucket) freeze() []*Node {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.frozen {
+ b.frozen = true
+ }
+ return b.node
+}
+
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ for _, n := range b.node {
+ if n.hash == hash && n.ns == ns && n.key == key {
+ atomic.AddInt32(&n.ref, 1)
+ b.mu.Unlock()
+ return true, false, n
+ }
+ }
+
+ // Get only.
+ if noset {
+ b.mu.Unlock()
+ return true, false, nil
+ }
+
+ // Create node.
+ n = &Node{
+ r: r,
+ hash: hash,
+ ns: ns,
+ key: key,
+ ref: 1,
+ }
+ // Add node to bucket.
+ b.node = append(b.node, n)
+ bLen := len(b.node)
+ b.mu.Unlock()
+
+ // Update counter.
+ grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+ if bLen > mOverflowThreshold {
+ grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+ }
+
+ // Grow.
+ if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) << 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+
+ return true, true, n
+}
+
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ var (
+ n *Node
+ bLen int
+ )
+ for i := range b.node {
+ n = b.node[i]
+ if n.ns == ns && n.key == key {
+ if atomic.LoadInt32(&n.ref) == 0 {
+ deleted = true
+
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Remove node from bucket.
+ b.node = append(b.node[:i], b.node[i+1:]...)
+ bLen = len(b.node)
+ }
+ break
+ }
+ }
+ b.mu.Unlock()
+
+ if deleted {
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+
+ // Update counter.
+ atomic.AddInt32(&r.size, int32(n.size)*-1)
+ shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+ if bLen >= mOverflowThreshold {
+ atomic.AddInt32(&h.overflow, -1)
+ }
+
+ // Shrink.
+ if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) >> 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+ }
+
+ return true, deleted
+}
+
+type mNode struct {
+ buckets []unsafe.Pointer // []*mBucket
+ mask uint32
+ pred unsafe.Pointer // *mNode
+ resizeInProgess int32
+
+ overflow int32
+ growThreshold int32
+ shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+ if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+ return b
+ }
+
+ p := (*mNode)(atomic.LoadPointer(&n.pred))
+ if p != nil {
+ var node []*Node
+ if n.mask > p.mask {
+ // Grow.
+ pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+ if pb == nil {
+ pb = p.initBucket(i & p.mask)
+ }
+ m := pb.freeze()
+ // Split nodes.
+ for _, x := range m {
+ if x.hash&n.mask == i {
+ node = append(node, x)
+ }
+ }
+ } else {
+ // Shrink.
+ pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+ if pb0 == nil {
+ pb0 = p.initBucket(i)
+ }
+ pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+ if pb1 == nil {
+ pb1 = p.initBucket(i + uint32(len(n.buckets)))
+ }
+ m0 := pb0.freeze()
+ m1 := pb1.freeze()
+ // Merge nodes.
+ node = make([]*Node, 0, len(m0)+len(m1))
+ node = append(node, m0...)
+ node = append(node, m1...)
+ }
+ b := &mBucket{node: node}
+ if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+ if len(node) > mOverflowThreshold {
+ atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+ }
+ return b
+ }
+ }
+
+ return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+ for i := range n.buckets {
+ n.initBucket(uint32(i))
+ }
+ atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+ mu sync.RWMutex
+ mHead unsafe.Pointer // *mNode
+ nodes int32
+ size int32
+ cacher Cacher
+ closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+ h := &mNode{
+ buckets: make([]unsafe.Pointer, mInitialSize),
+ mask: mInitialSize - 1,
+ growThreshold: int32(mInitialSize * mOverflowThreshold),
+ shrinkThreshold: 0,
+ }
+ for i := range h.buckets {
+ h.buckets[i] = unsafe.Pointer(&mBucket{})
+ }
+ r := &Cache{
+ mHead: unsafe.Pointer(h),
+ cacher: cacher,
+ }
+ return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+ h := (*mNode)(atomic.LoadPointer(&r.mHead))
+ i := hash & h.mask
+ b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+ if b == nil {
+ b = h.initBucket(i)
+ }
+ return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+ for {
+ h, b := r.getBucket(n.hash)
+ done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+ if done {
+ return deleted
+ }
+ }
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+ return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+ return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+ if r.cacher == nil {
+ return 0
+ }
+ return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+ if r.cacher != nil {
+ r.cacher.SetCapacity(capacity)
+ }
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return nil
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+ if done {
+ if n != nil {
+ n.mu.Lock()
+ if n.value == nil {
+ if setFunc == nil {
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+
+ n.size, n.value = setFunc()
+ if n.value == nil {
+ n.size = 0
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+ atomic.AddInt32(&r.size, int32(n.size))
+ }
+ n.mu.Unlock()
+ if r.cacher != nil {
+ r.cacher.Promote(n)
+ }
+ return &Handle{unsafe.Pointer(n)}
+ }
+
+ break
+ }
+ }
+ return nil
+}
+
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if onDel != nil {
+ n.mu.Lock()
+ n.onDel = append(n.onDel, onDel)
+ n.mu.Unlock()
+ }
+ if r.cacher != nil {
+ r.cacher.Ban(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ if onDel != nil {
+ onDel()
+ }
+
+ return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if r.cacher != nil {
+ r.cacher.Evict(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictNS(ns)
+ }
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ }
+}
+
+// Close closes the 'cache map' and forcefully releases all 'cache node'.
+func (r *Cache) Close() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+
+ h := (*mNode)(r.mHead)
+ h.initBuckets()
+
+ for i := range h.buckets {
+ b := (*mBucket)(h.buckets[i])
+ for _, n := range b.node {
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+ n.onDel = nil
+ }
+ }
+ }
+ r.mu.Unlock()
+
+ // Avoid deadlock.
+ if r.cacher != nil {
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
+// unlike Close it doesn't forcefully releases 'cache node'.
+func (r *Cache) CloseWeak() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+ }
+ r.mu.Unlock()
+
+ // Avoid deadlock.
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+ r *Cache
+
+ hash uint32
+ ns, key uint64
+
+ mu sync.Mutex
+ size int
+ value Value
+
+ ref int32
+ onDel []func()
+
+ CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+ return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+ return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+ return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+ return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+ return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+ if atomic.AddInt32(&n.ref, 1) <= 1 {
+ panic("BUG: Node.GetHandle on zero ref")
+ }
+ return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.delete(n)
+ }
+}
+
+func (n *Node) unrefLocked() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.mu.RLock()
+ if !n.r.closed {
+ n.r.delete(n)
+ }
+ n.r.mu.RUnlock()
+ }
+}
+
+// Handle is a 'cache handle' of a 'cache node'.
+type Handle struct {
+ n unsafe.Pointer // *Node
+}
+
+// Value returns the value of the 'cache node'.
+func (h *Handle) Value() Value {
+ n := (*Node)(atomic.LoadPointer(&h.n))
+ if n != nil {
+ return n.value
+ }
+ return nil
+}
+
+// Release releases this 'cache handle'.
+// It is safe to call release multiple times.
+func (h *Handle) Release() {
+ nPtr := atomic.LoadPointer(&h.n)
+ if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+ n := (*Node)(nPtr)
+ n.unrefLocked()
+ }
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+ const (
+ m = uint32(0x5bd1e995)
+ r = 24
+ )
+
+ k1 := uint32(ns >> 32)
+ k2 := uint32(ns)
+ k3 := uint32(key >> 32)
+ k4 := uint32(key)
+
+ k1 *= m
+ k1 ^= k1 >> r
+ k1 *= m
+
+ k2 *= m
+ k2 ^= k2 >> r
+ k2 *= m
+
+ k3 *= m
+ k3 ^= k3 >> r
+ k3 *= m
+
+ k4 *= m
+ k4 ^= k4 >> r
+ k4 *= m
+
+ h := seed
+
+ h *= m
+ h ^= k1
+ h *= m
+ h ^= k2
+ h *= m
+ h ^= k3
+ h *= m
+ h ^= k4
+
+ h ^= h >> 13
+ h *= m
+ h ^= h >> 15
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 0000000..d9a84cd
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type lruNode struct {
+ n *Node
+ h *Handle
+ ban bool
+
+ next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+ x := at.next
+ at.next = n
+ n.prev = at
+ n.next = x
+ x.prev = n
+}
+
+func (n *lruNode) remove() {
+ if n.prev != nil {
+ n.prev.next = n.next
+ n.next.prev = n.prev
+ n.prev = nil
+ n.next = nil
+ } else {
+ panic("BUG: removing removed node")
+ }
+}
+
+type lru struct {
+ mu sync.Mutex
+ capacity int
+ used int
+ recent lruNode
+}
+
+func (r *lru) reset() {
+ r.recent.next = &r.recent
+ r.recent.prev = &r.recent
+ r.used = 0
+}
+
+func (r *lru) Capacity() int {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ r.capacity = capacity
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Promote(n *Node) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ if n.CacheData == nil {
+ if n.Size() <= r.capacity {
+ rn := &lruNode{n: n, h: n.GetHandle()}
+ rn.insert(&r.recent)
+ n.CacheData = unsafe.Pointer(rn)
+ r.used += n.Size()
+
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.insert(&r.recent)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Ban(n *Node) {
+ r.mu.Lock()
+ if n.CacheData == nil {
+ n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.ban = true
+ r.used -= rn.n.Size()
+ r.mu.Unlock()
+
+ rn.h.Release()
+ rn.h = nil
+ return
+ }
+ }
+ r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+ r.mu.Lock()
+ rn := (*lruNode)(n.CacheData)
+ if rn == nil || rn.ban {
+ r.mu.Unlock()
+ return
+ }
+ n.CacheData = nil
+ r.mu.Unlock()
+
+ rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ for e := r.recent.prev; e != &r.recent; {
+ rn := e
+ e = e.prev
+ if rn.n.NS() == ns {
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) EvictAll() {
+ r.mu.Lock()
+ back := r.recent.prev
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.n.CacheData = nil
+ }
+ r.reset()
+ r.mu.Unlock()
+
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Close() error {
+ return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+ r := &lru{capacity: capacity}
+ r.reset()
+ return r
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
new file mode 100644
index 0000000..448402b
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+)
+
+type iComparer struct {
+ ucmp comparer.Comparer
+}
+
+func (icmp *iComparer) uName() string {
+ return icmp.ucmp.Name()
+}
+
+func (icmp *iComparer) uCompare(a, b []byte) int {
+ return icmp.ucmp.Compare(a, b)
+}
+
+func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
+ return icmp.ucmp.Separator(dst, a, b)
+}
+
+func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
+ return icmp.ucmp.Successor(dst, b)
+}
+
+func (icmp *iComparer) Name() string {
+ return icmp.uName()
+}
+
+func (icmp *iComparer) Compare(a, b []byte) int {
+ x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
+ if x == 0 {
+ if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
+ return -1
+ } else if m < n {
+ return 1
+ }
+ }
+ return x
+}
+
+func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
+ ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
+ dst = icmp.uSeparator(dst, ua, ub)
+ if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
+ // Append earliest possible number.
+ return append(dst, keyMaxNumBytes...)
+ }
+ return nil
+}
+
+func (icmp *iComparer) Successor(dst, b []byte) []byte {
+ ub := internalKey(b).ukey()
+ dst = icmp.uSuccessor(dst, ub)
+ if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
+ // Append earliest possible number.
+ return append(dst, keyMaxNumBytes...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
new file mode 100644
index 0000000..abf9fb6
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package comparer
+
+import "bytes"
+
+type bytesComparer struct{}
+
+func (bytesComparer) Compare(a, b []byte) int {
+ return bytes.Compare(a, b)
+}
+
+func (bytesComparer) Name() string {
+ return "leveldb.BytewiseComparator"
+}
+
+func (bytesComparer) Separator(dst, a, b []byte) []byte {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for ; i < n && a[i] == b[i]; i++ {
+ }
+ if i >= n {
+ // Do not shorten if one string is a prefix of the other
+ } else if c := a[i]; c < 0xff && c+1 < b[i] {
+ dst = append(dst, a[:i+1]...)
+ dst[len(dst)-1]++
+ return dst
+ }
+ return nil
+}
+
+func (bytesComparer) Successor(dst, b []byte) []byte {
+ for i, c := range b {
+ if c != 0xff {
+ dst = append(dst, b[:i+1]...)
+ dst[len(dst)-1]++
+ return dst
+ }
+ }
+ return nil
+}
+
+// DefaultComparer are default implementation of the Comparer interface.
+// It uses the natural ordering, consistent with bytes.Compare.
+var DefaultComparer = bytesComparer{}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
new file mode 100644
index 0000000..2c522db
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package comparer provides interface and implementation for ordering
+// sets of data.
+package comparer
+
+// BasicComparer is the interface that wraps the basic Compare method.
+type BasicComparer interface {
+ // Compare returns -1, 0, or +1 depending on whether a is 'less than',
+ // 'equal to' or 'greater than' b. The two arguments can only be 'equal'
+ // if their contents are exactly equal. Furthermore, the empty slice
+ // must be 'less than' any non-empty slice.
+ Compare(a, b []byte) int
+}
+
+// Comparer defines a total ordering over the space of []byte keys: a 'less
+// than' relationship.
+type Comparer interface {
+ BasicComparer
+
+ // Name returns name of the comparer.
+ //
+ // The Level-DB on-disk format stores the comparer name, and opening a
+ // database with a different comparer from the one it was created with
+ // will result in an error.
+ //
+ // An implementation to a new name whenever the comparer implementation
+ // changes in a way that will cause the relative ordering of any two keys
+ // to change.
+ //
+ // Names starting with "leveldb." are reserved and should not be used
+ // by any users of this package.
+ Name() string
+
+ // Bellow are advanced functions used to reduce the space requirements
+ // for internal data structures such as index blocks.
+
+ // Separator appends a sequence of bytes x to dst such that a <= x && x < b,
+ // where 'less than' is consistent with Compare. An implementation should
+ // return nil if x equal to a.
+ //
+ // Either contents of a or b should not by any means modified. Doing so
+ // may cause corruption on the internal state.
+ Separator(dst, a, b []byte) []byte
+
+ // Successor appends a sequence of bytes x to dst such that x >= b, where
+ // 'less than' is consistent with Compare. An implementation should return
+ // nil if x equal to b.
+ //
+ // Contents of b should not by any means modified. Doing so may cause
+ // corruption on the internal state.
+ Successor(dst, b []byte) []byte
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
new file mode 100644
index 0000000..90fedf7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -0,0 +1,1179 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// DB is a LevelDB database.
+type DB struct {
+ // Need 64-bit alignment.
+ seq uint64
+
+ // Stats. Need 64-bit alignment.
+ cWriteDelay int64 // The cumulative duration of write delays
+ cWriteDelayN int32 // The cumulative number of write delays
+ inWritePaused int32 // The indicator whether write operation is paused by compaction
+ aliveSnaps, aliveIters int32
+
+ // Session.
+ s *session
+
+ // MemDB.
+ memMu sync.RWMutex
+ memPool chan *memdb.DB
+ mem, frozenMem *memDB
+ journal *journal.Writer
+ journalWriter storage.Writer
+ journalFd storage.FileDesc
+ frozenJournalFd storage.FileDesc
+ frozenSeq uint64
+
+ // Snapshot.
+ snapsMu sync.Mutex
+ snapsList *list.List
+
+ // Write.
+ batchPool sync.Pool
+ writeMergeC chan writeMerge
+ writeMergedC chan bool
+ writeLockC chan struct{}
+ writeAckC chan error
+ writeDelay time.Duration
+ writeDelayN int
+ tr *Transaction
+
+ // Compaction.
+ compCommitLk sync.Mutex
+ tcompCmdC chan cCmd
+ tcompPauseC chan chan<- struct{}
+ mcompCmdC chan cCmd
+ compErrC chan error
+ compPerErrC chan error
+ compErrSetC chan error
+ compWriteLocking bool
+ compStats cStats
+ memdbMaxLevel int // For testing.
+
+ // Close.
+ closeW sync.WaitGroup
+ closeC chan struct{}
+ closed uint32
+ closer io.Closer
+}
+
+func openDB(s *session) (*DB, error) {
+ s.log("db@open opening")
+ start := time.Now()
+ db := &DB{
+ s: s,
+ // Initial sequence
+ seq: s.stSeqNum,
+ // MemDB
+ memPool: make(chan *memdb.DB, 1),
+ // Snapshot
+ snapsList: list.New(),
+ // Write
+ batchPool: sync.Pool{New: newBatch},
+ writeMergeC: make(chan writeMerge),
+ writeMergedC: make(chan bool),
+ writeLockC: make(chan struct{}, 1),
+ writeAckC: make(chan error),
+ // Compaction
+ tcompCmdC: make(chan cCmd),
+ tcompPauseC: make(chan chan<- struct{}),
+ mcompCmdC: make(chan cCmd),
+ compErrC: make(chan error),
+ compPerErrC: make(chan error),
+ compErrSetC: make(chan error),
+ // Close
+ closeC: make(chan struct{}),
+ }
+
+ // Read-only mode.
+ readOnly := s.o.GetReadOnly()
+
+ if readOnly {
+ // Recover journals (read-only mode).
+ if err := db.recoverJournalRO(); err != nil {
+ return nil, err
+ }
+ } else {
+ // Recover journals.
+ if err := db.recoverJournal(); err != nil {
+ return nil, err
+ }
+
+ // Remove any obsolete files.
+ if err := db.checkAndCleanFiles(); err != nil {
+ // Close journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return nil, err
+ }
+
+ }
+
+ // Doesn't need to be included in the wait group.
+ go db.compactionError()
+ go db.mpoolDrain()
+
+ if readOnly {
+ db.SetReadOnly()
+ } else {
+ db.closeW.Add(2)
+ go db.tCompaction()
+ go db.mCompaction()
+ // go db.jWriter()
+ }
+
+ s.logf("db@open done T·%v", time.Since(start))
+
+ runtime.SetFinalizer(db, (*DB).Close)
+ return db, nil
+}
+
+// Open opens or creates a DB for the given storage.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist Open will returns
+// os.ErrExist error.
+//
+// Open will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Use errors.IsCorrupted to test whether an error is
+// due to corruption. Corrupted DB can be recovered with Recover function.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = s.recover()
+ if err != nil {
+ if !os.IsNotExist(err) || s.o.GetErrorIfMissing() || s.o.GetReadOnly() {
+ return
+ }
+ err = s.create()
+ if err != nil {
+ return
+ }
+ } else if s.o.GetErrorIfExist() {
+ err = os.ErrExist
+ return
+ }
+
+ return openDB(s)
+}
+
+// OpenFile opens or creates a DB for the given path.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist OpenFile will returns
+// os.ErrExist error.
+//
+// OpenFile uses standard file-system backed storage implementation as
+// described in the leveldb/storage package.
+//
+// OpenFile will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Use errors.IsCorrupted to test whether an error is
+// due to corruption. Corrupted DB can be recovered with Recover function.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func OpenFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path, o.GetReadOnly())
+ if err != nil {
+ return
+ }
+ db, err = Open(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+// Recover recovers and opens a DB with missing or corrupted manifest files
+// for the given storage. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = recoverTable(s, o)
+ if err != nil {
+ return
+ }
+ return openDB(s)
+}
+
+// RecoverFile recovers and opens a DB with missing or corrupted manifest files
+// for the given path. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// RecoverFile uses standard file-system backed storage implementation as described
+// in the leveldb/storage package.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path, false)
+ if err != nil {
+ return
+ }
+ db, err = Recover(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+func recoverTable(s *session, o *opt.Options) error {
+ o = dupOptions(o)
+ // Mask StrictReader, lets StrictRecovery doing its job.
+ o.Strict &= ^opt.StrictReader
+
+ // Get all tables and sort it by file number.
+ fds, err := s.stor.List(storage.TypeTable)
+ if err != nil {
+ return err
+ }
+ sortFds(fds)
+
+ var (
+ maxSeq uint64
+ recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+ // We will drop corrupted table.
+ strict = o.GetStrict(opt.StrictRecovery)
+ noSync = o.GetNoSync()
+
+ rec = &sessionRecord{}
+ bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+ )
+ buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
+ tmpFd = s.newTemp()
+ writer, err := s.stor.Create(tmpFd)
+ if err != nil {
+ return
+ }
+ defer func() {
+ writer.Close()
+ if err != nil {
+ s.stor.Remove(tmpFd)
+ tmpFd = storage.FileDesc{}
+ }
+ }()
+
+ // Copy entries.
+ tw := table.NewWriter(writer, o)
+ for iter.Next() {
+ key := iter.Key()
+ if validInternalKey(key) {
+ err = tw.Append(key, iter.Value())
+ if err != nil {
+ return
+ }
+ }
+ }
+ err = iter.Error()
+ if err != nil && !errors.IsCorrupted(err) {
+ return
+ }
+ err = tw.Close()
+ if err != nil {
+ return
+ }
+ if !noSync {
+ err = writer.Sync()
+ if err != nil {
+ return
+ }
+ }
+ size = int64(tw.BytesLen())
+ return
+ }
+ recoverTable := func(fd storage.FileDesc) error {
+ s.logf("table@recovery recovering @%d", fd.Num)
+ reader, err := s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+ var closed bool
+ defer func() {
+ if !closed {
+ reader.Close()
+ }
+ }()
+
+ // Get file size.
+ size, err := reader.Seek(0, 2)
+ if err != nil {
+ return err
+ }
+
+ var (
+ tSeq uint64
+ tgoodKey, tcorruptedKey, tcorruptedBlock int
+ imin, imax []byte
+ )
+ tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
+ if err != nil {
+ return err
+ }
+ iter := tr.NewIterator(nil, nil)
+ if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+ itererr.SetErrorCallback(func(err error) {
+ if errors.IsCorrupted(err) {
+ s.logf("table@recovery block corruption @%d %q", fd.Num, err)
+ tcorruptedBlock++
+ }
+ })
+ }
+
+ // Scan the table.
+ for iter.Next() {
+ key := iter.Key()
+ _, seq, _, kerr := parseInternalKey(key)
+ if kerr != nil {
+ tcorruptedKey++
+ continue
+ }
+ tgoodKey++
+ if seq > tSeq {
+ tSeq = seq
+ }
+ if imin == nil {
+ imin = append([]byte{}, key...)
+ }
+ imax = append(imax[:0], key...)
+ }
+ if err := iter.Error(); err != nil && !errors.IsCorrupted(err) {
+ iter.Release()
+ return err
+ }
+ iter.Release()
+
+ goodKey += tgoodKey
+ corruptedKey += tcorruptedKey
+ corruptedBlock += tcorruptedBlock
+
+ if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+ droppedTable++
+ s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ return nil
+ }
+
+ if tgoodKey > 0 {
+ if tcorruptedKey > 0 || tcorruptedBlock > 0 {
+ // Rebuild the table.
+ s.logf("table@recovery rebuilding @%d", fd.Num)
+ iter := tr.NewIterator(nil, nil)
+ tmpFd, newSize, err := buildTable(iter)
+ iter.Release()
+ if err != nil {
+ return err
+ }
+ closed = true
+ reader.Close()
+ if err := s.stor.Rename(tmpFd, fd); err != nil {
+ return err
+ }
+ size = newSize
+ }
+ if tSeq > maxSeq {
+ maxSeq = tSeq
+ }
+ recoveredKey += tgoodKey
+ // Add table to level 0.
+ rec.addTable(0, fd.Num, size, imin, imax)
+ s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ } else {
+ droppedTable++
+ s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
+ }
+
+ return nil
+ }
+
+ // Recover all tables.
+ if len(fds) > 0 {
+ s.logf("table@recovery F·%d", len(fds))
+
+ // Mark file number as used.
+ s.markFileNum(fds[len(fds)-1].Num)
+
+ for _, fd := range fds {
+ if err := recoverTable(fd); err != nil {
+ return err
+ }
+ }
+
+ s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
+ }
+
+ // Set sequence number.
+ rec.setSeqNum(maxSeq)
+
+ // Create new manifest.
+ if err := s.create(); err != nil {
+ return err
+ }
+
+ // Commit.
+ return s.commit(rec)
+}
+
+func (db *DB) recoverJournal() error {
+ // Get all journals and sort it by file number.
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ sortFds(rawFds)
+
+ // Journals that will be recovered.
+ var fds []storage.FileDesc
+ for _, fd := range rawFds {
+ if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
+ fds = append(fds, fd)
+ }
+ }
+
+ var (
+ ofd storage.FileDesc // Obsolete file.
+ rec = &sessionRecord{}
+ )
+
+ // Recover journals.
+ if len(fds) > 0 {
+ db.logf("journal@recovery F·%d", len(fds))
+
+ // Mark file number as used.
+ db.s.markFileNum(fds[len(fds)-1].Num)
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ jr *journal.Reader
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ buf = &util.Buffer{}
+ batchSeq uint64
+ batchLen int
+ )
+
+ for _, fd := range fds {
+ db.logf("journal@recovery recovering @%d", fd.Num)
+
+ fr, err := db.s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
+ }
+
+ // Flush memdb and remove obsolete journal file.
+ if !ofd.Zero() {
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+ }
+
+ rec.setJournalNum(fd.Num)
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ fr.Close()
+ return err
+ }
+ rec.resetAddedTables()
+
+ db.s.stor.Remove(ofd)
+ ofd = storage.FileDesc{}
+ }
+
+ // Replay journal to memdb.
+ mdb.Reset()
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+ batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
+ if err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ // Save sequence number.
+ db.seq = batchSeq + uint64(batchLen)
+
+ // Flush it if large enough.
+ if mdb.Size() >= writeBuffer {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+
+ mdb.Reset()
+ }
+ }
+
+ fr.Close()
+ ofd = fd
+ }
+
+ // Flush the last memdb.
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Create a new journal.
+ if _, err := db.newMem(0); err != nil {
+ return err
+ }
+
+ // Commit.
+ rec.setJournalNum(db.journalFd.Num)
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ // Close journal on error.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return err
+ }
+
+ // Remove the last obsolete journal file.
+ if !ofd.Zero() {
+ db.s.stor.Remove(ofd)
+ }
+
+ return nil
+}
+
+func (db *DB) recoverJournalRO() error {
+ // Get all journals and sort it by file number.
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ sortFds(rawFds)
+
+ // Journals that will be recovered.
+ var fds []storage.FileDesc
+ for _, fd := range rawFds {
+ if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
+ fds = append(fds, fd)
+ }
+ }
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ )
+
+ // Recover journals.
+ if len(fds) > 0 {
+ db.logf("journal@recovery RO·Mode F·%d", len(fds))
+
+ var (
+ jr *journal.Reader
+ buf = &util.Buffer{}
+ batchSeq uint64
+ batchLen int
+ )
+
+ for _, fd := range fds {
+ db.logf("journal@recovery recovering @%d", fd.Num)
+
+ fr, err := db.s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
+ }
+
+ // Replay journal to memdb.
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+ batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
+ if err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ // Save sequence number.
+ db.seq = batchSeq + uint64(batchLen)
+ }
+
+ fr.Close()
+ }
+ }
+
+ // Set memDB.
+ db.mem = &memDB{db: db, DB: mdb, ref: 1}
+
+ return nil
+}
+
+func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
+ mk, mv, err := mdb.Find(ikey)
+ if err == nil {
+ ukey, _, kt, kerr := parseInternalKey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if icmp.uCompare(ukey, ikey.ukey()) == 0 {
+ if kt == keyTypeDel {
+ return true, nil, ErrNotFound
+ }
+ return true, mv, nil
+
+ }
+ } else if err != ErrNotFound {
+ return true, nil, err
+ }
+ return
+}
+
+func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
+
+ if auxm != nil {
+ if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
+ return append([]byte{}, mv...), me
+ }
+ }
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
+ return append([]byte{}, mv...), me
+ }
+ }
+
+ v := db.s.version()
+ value, cSched, err := v.get(auxt, ikey, ro, false)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ return
+}
+
+func nilIfNotFound(err error) error {
+ if err == ErrNotFound {
+ return nil
+ }
+ return err
+}
+
+func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
+
+ if auxm != nil {
+ if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
+ return me == nil, nilIfNotFound(me)
+ }
+ }
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
+ return me == nil, nilIfNotFound(me)
+ }
+ }
+
+ v := db.s.version()
+ _, cSched, err := v.get(auxt, ikey, ro, true)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ if err == nil {
+ ret = true
+ } else if err == ErrNotFound {
+ err = nil
+ }
+ return
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.get(nil, nil, key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Has returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.has(nil, nil, key, se.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the
+// underlying DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// WARNING: Any slice returned by interator (e.g. slice returned by calling
+// Iterator.Key() or Iterator.Key() methods), its content should not be modified
+// unless noted otherwise.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ // Iterator holds 'version' lock, 'version' is immutable so snapshot
+ // can be released after iterator created.
+ return db.newIterator(nil, nil, se.seq, slice, ro)
+}
+
+// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
+// is a frozen snapshot of a DB state at a particular point in time. The
+// content of snapshot are guaranteed to be consistent.
+//
+// The snapshot must be released after use, by calling Release method.
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ return db.newSnapshot(), nil
+}
+
+// GetProperty returns value of the given property name.
+//
+// Property names:
+// leveldb.num-files-at-level{n}
+// Returns the number of files at level 'n'.
+// leveldb.stats
+// Returns statistics of the underlying DB.
+// leveldb.iostats
+// Returns statistics of effective disk read and write.
+// leveldb.writedelay
+// Returns cumulative write delay caused by compaction.
+// leveldb.sstables
+// Returns sstables list for each level.
+// leveldb.blockpool
+// Returns block pool stats.
+// leveldb.cachedblock
+// Returns size of cached block.
+// leveldb.openedtables
+// Returns number of opened tables.
+// leveldb.alivesnaps
+// Returns number of alive snapshots.
+// leveldb.aliveiters
+// Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ const prefix = "leveldb."
+ if !strings.HasPrefix(name, prefix) {
+ return "", ErrNotFound
+ }
+ p := name[len(prefix):]
+
+ v := db.s.version()
+ defer v.release()
+
+ numFilesPrefix := "num-files-at-level"
+ switch {
+ case strings.HasPrefix(p, numFilesPrefix):
+ var level uint
+ var rest string
+ n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+ if n != 1 {
+ err = ErrNotFound
+ } else {
+ value = fmt.Sprint(v.tLen(int(level)))
+ }
+ case p == "stats":
+ value = "Compactions\n" +
+ " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
+ "-------+------------+---------------+---------------+---------------+---------------\n"
+ for level, tables := range v.levels {
+ duration, read, write := db.compStats.getStat(level)
+ if len(tables) == 0 && duration == 0 {
+ continue
+ }
+ value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
+ level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
+ float64(read)/1048576.0, float64(write)/1048576.0)
+ }
+ case p == "iostats":
+ value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f",
+ float64(db.s.stor.reads())/1048576.0,
+ float64(db.s.stor.writes())/1048576.0)
+ case p == "writedelay":
+ writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
+ paused := atomic.LoadInt32(&db.inWritePaused) == 1
+ value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused)
+ case p == "sstables":
+ for level, tables := range v.levels {
+ value += fmt.Sprintf("--- level %d ---\n", level)
+ for _, t := range tables {
+ value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
+ }
+ }
+ case p == "blockpool":
+ value = fmt.Sprintf("%v", db.s.tops.bpool)
+ case p == "cachedblock":
+ if db.s.tops.bcache != nil {
+ value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+ } else {
+ value = ""
+ }
+ case p == "openedtables":
+ value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+ case p == "alivesnaps":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+ case p == "aliveiters":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
+ default:
+ err = ErrNotFound
+ }
+
+ return
+}
+
+// DBStats is database statistics.
+type DBStats struct {
+ WriteDelayCount int32
+ WriteDelayDuration time.Duration
+ WritePaused bool
+
+ AliveSnapshots int32
+ AliveIterators int32
+
+ IOWrite uint64
+ IORead uint64
+
+ BlockCacheSize int
+ OpenedTablesCount int
+
+ LevelSizes []int64
+ LevelTablesCounts []int
+ LevelRead []int64
+ LevelWrite []int64
+ LevelDurations []time.Duration
+}
+
+// Stats populates s with database statistics.
+func (db *DB) Stats(s *DBStats) error {
+ err := db.ok()
+ if err != nil {
+ return err
+ }
+
+ s.IORead = db.s.stor.reads()
+ s.IOWrite = db.s.stor.writes()
+ s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN)
+ s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay))
+ s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1
+
+ s.OpenedTablesCount = db.s.tops.cache.Size()
+ if db.s.tops.bcache != nil {
+ s.BlockCacheSize = db.s.tops.bcache.Size()
+ } else {
+ s.BlockCacheSize = 0
+ }
+
+ s.AliveIterators = atomic.LoadInt32(&db.aliveIters)
+ s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps)
+
+ s.LevelDurations = s.LevelDurations[:0]
+ s.LevelRead = s.LevelRead[:0]
+ s.LevelWrite = s.LevelWrite[:0]
+ s.LevelSizes = s.LevelSizes[:0]
+ s.LevelTablesCounts = s.LevelTablesCounts[:0]
+
+ v := db.s.version()
+ defer v.release()
+
+ for level, tables := range v.levels {
+ duration, read, write := db.compStats.getStat(level)
+ if len(tables) == 0 && duration == 0 {
+ continue
+ }
+ s.LevelDurations = append(s.LevelDurations, duration)
+ s.LevelRead = append(s.LevelRead, read)
+ s.LevelWrite = append(s.LevelWrite, write)
+ s.LevelSizes = append(s.LevelSizes, tables.size())
+ s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables))
+ }
+
+ return nil
+}
+
+// SizeOf calculates approximate sizes of the given key ranges.
+// The length of the returned sizes are equal with the length of the given
+// ranges. The returned sizes measure storage space usage, so if the user
+// data compresses by a factor of ten, the returned sizes will be one-tenth
+// the size of the corresponding user data size.
+// The results may not include the sizes of recently written data.
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ v := db.s.version()
+ defer v.release()
+
+ sizes := make(Sizes, 0, len(ranges))
+ for _, r := range ranges {
+ imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
+ imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
+ start, err := v.offsetOf(imin)
+ if err != nil {
+ return nil, err
+ }
+ limit, err := v.offsetOf(imax)
+ if err != nil {
+ return nil, err
+ }
+ var size int64
+ if limit >= start {
+ size = limit - start
+ }
+ sizes = append(sizes, size)
+ }
+
+ return sizes, nil
+}
+
+// Close closes the DB. This will also releases any outstanding snapshot,
+// abort any in-flight compaction and discard open transaction.
+//
+// It is not safe to close a DB until all outstanding iterators are released.
+// It is valid to call Close multiple times. Other methods should not be
+// called after the DB has been closed.
+func (db *DB) Close() error {
+ if !db.setClosed() {
+ return ErrClosed
+ }
+
+ start := time.Now()
+ db.log("db@close closing")
+
+ // Clear the finalizer.
+ runtime.SetFinalizer(db, nil)
+
+ // Get compaction error.
+ var err error
+ select {
+ case err = <-db.compErrC:
+ if err == ErrReadOnly {
+ err = nil
+ }
+ default:
+ }
+
+ // Signal all goroutines.
+ close(db.closeC)
+
+ // Discard open transaction.
+ if db.tr != nil {
+ db.tr.Discard()
+ }
+
+ // Acquire writer lock.
+ db.writeLockC <- struct{}{}
+
+ // Wait for all gorotines to exit.
+ db.closeW.Wait()
+
+ // Closes journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ db.journal = nil
+ db.journalWriter = nil
+ }
+
+ if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ }
+
+ // Close session.
+ db.s.close()
+ db.logf("db@close done T·%v", time.Since(start))
+ db.s.release()
+
+ if db.closer != nil {
+ if err1 := db.closer.Close(); err == nil {
+ err = err1
+ }
+ db.closer = nil
+ }
+
+ // Clear memdbs.
+ db.clearMems()
+
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
new file mode 100644
index 0000000..0c1b9a5
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -0,0 +1,854 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+var (
+ errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
+)
+
+type cStat struct {
+ duration time.Duration
+ read int64
+ write int64
+}
+
+func (p *cStat) add(n *cStatStaging) {
+ p.duration += n.duration
+ p.read += n.read
+ p.write += n.write
+}
+
+func (p *cStat) get() (duration time.Duration, read, write int64) {
+ return p.duration, p.read, p.write
+}
+
+type cStatStaging struct {
+ start time.Time
+ duration time.Duration
+ on bool
+ read int64
+ write int64
+}
+
+func (p *cStatStaging) startTimer() {
+ if !p.on {
+ p.start = time.Now()
+ p.on = true
+ }
+}
+
+func (p *cStatStaging) stopTimer() {
+ if p.on {
+ p.duration += time.Since(p.start)
+ p.on = false
+ }
+}
+
+type cStats struct {
+ lk sync.Mutex
+ stats []cStat
+}
+
+func (p *cStats) addStat(level int, n *cStatStaging) {
+ p.lk.Lock()
+ if level >= len(p.stats) {
+ newStats := make([]cStat, level+1)
+ copy(newStats, p.stats)
+ p.stats = newStats
+ }
+ p.stats[level].add(n)
+ p.lk.Unlock()
+}
+
+func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
+ p.lk.Lock()
+ defer p.lk.Unlock()
+ if level < len(p.stats) {
+ return p.stats[level].get()
+ }
+ return
+}
+
+func (db *DB) compactionError() {
+ var err error
+noerr:
+ // No error.
+ for {
+ select {
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ goto haserr
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+haserr:
+ // Transient error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ goto noerr
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+hasperr:
+ // Persistent error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case db.compPerErrC <- err:
+ case db.writeLockC <- struct{}{}:
+ // Hold write lock, so that write won't pass-through.
+ db.compWriteLocking = true
+ case <-db.closeC:
+ if db.compWriteLocking {
+ // We should release the lock or Close will hang.
+ <-db.writeLockC
+ }
+ return
+ }
+ }
+}
+
+type compactionTransactCounter int
+
+func (cnt *compactionTransactCounter) incr() {
+ *cnt++
+}
+
+type compactionTransactInterface interface {
+ run(cnt *compactionTransactCounter) error
+ revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
+ defer func() {
+ if x := recover(); x != nil {
+ if x == errCompactionTransactExiting {
+ if err := t.revert(); err != nil {
+ db.logf("%s revert error %q", name, err)
+ }
+ }
+ panic(x)
+ }
+ }()
+
+ const (
+ backoffMin = 1 * time.Second
+ backoffMax = 8 * time.Second
+ backoffMul = 2 * time.Second
+ )
+ var (
+ backoff = backoffMin
+ backoffT = time.NewTimer(backoff)
+ lastCnt = compactionTransactCounter(0)
+
+ disableBackoff = db.s.o.GetDisableCompactionBackoff()
+ )
+ for n := 0; ; n++ {
+ // Check whether the DB is closed.
+ if db.isClosed() {
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ } else if n > 0 {
+ db.logf("%s retrying N·%d", name, n)
+ }
+
+ // Execute.
+ cnt := compactionTransactCounter(0)
+ err := t.run(&cnt)
+ if err != nil {
+ db.logf("%s error I·%d %q", name, cnt, err)
+ }
+
+ // Set compaction error status.
+ select {
+ case db.compErrSetC <- err:
+ case perr := <-db.compPerErrC:
+ if err != nil {
+ db.logf("%s exiting (persistent error %q)", name, perr)
+ db.compactionExitTransact()
+ }
+ case <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ if err == nil {
+ return
+ }
+ if errors.IsCorrupted(err) {
+ db.logf("%s exiting (corruption detected)", name)
+ db.compactionExitTransact()
+ }
+
+ if !disableBackoff {
+ // Reset backoff duration if counter is advancing.
+ if cnt > lastCnt {
+ backoff = backoffMin
+ lastCnt = cnt
+ }
+
+ // Backoff.
+ backoffT.Reset(backoff)
+ if backoff < backoffMax {
+ backoff *= backoffMul
+ if backoff > backoffMax {
+ backoff = backoffMax
+ }
+ }
+ select {
+ case <-backoffT.C:
+ case <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ }
+ }
+}
+
+type compactionTransactFunc struct {
+ runFunc func(cnt *compactionTransactCounter) error
+ revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+ return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+ if t.revertFunc != nil {
+ return t.revertFunc()
+ }
+ return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+ db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
+ panic(errCompactionTransactExiting)
+}
+
+func (db *DB) compactionCommit(name string, rec *sessionRecord) {
+ db.compCommitLk.Lock()
+ defer db.compCommitLk.Unlock() // Defer is necessary.
+ db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
+ return db.s.commit(rec)
+ }, nil)
+}
+
+func (db *DB) memCompaction() {
+ mdb := db.getFrozenMem()
+ if mdb == nil {
+ return
+ }
+ defer mdb.decref()
+
+ db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
+
+ // Don't compact empty memdb.
+ if mdb.Len() == 0 {
+ db.logf("memdb@flush skipping")
+ // drop frozen memdb
+ db.dropFrozenMem()
+ return
+ }
+
+ // Pause table compaction.
+ resumeC := make(chan struct{})
+ select {
+ case db.tcompPauseC <- (chan<- struct{})(resumeC):
+ case <-db.compPerErrC:
+ close(resumeC)
+ resumeC = nil
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+
+ var (
+ rec = &sessionRecord{}
+ stats = &cStatStaging{}
+ flushLevel int
+ )
+
+ // Generate tables.
+ db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
+ stats.startTimer()
+ flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
+ stats.stopTimer()
+ return
+ }, func() error {
+ for _, r := range rec.addedTables {
+ db.logf("memdb@flush revert @%d", r.num)
+ if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ rec.setJournalNum(db.journalFd.Num)
+ rec.setSeqNum(db.frozenSeq)
+
+ // Commit.
+ stats.startTimer()
+ db.compactionCommit("memdb", rec)
+ stats.stopTimer()
+
+ db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
+
+ for _, r := range rec.addedTables {
+ stats.write += r.size
+ }
+ db.compStats.addStat(flushLevel, stats)
+
+ // Drop frozen memdb.
+ db.dropFrozenMem()
+
+ // Resume table compaction.
+ if resumeC != nil {
+ select {
+ case <-resumeC:
+ close(resumeC)
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+ }
+
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+}
+
+type tableCompactionBuilder struct {
+ db *DB
+ s *session
+ c *compaction
+ rec *sessionRecord
+ stat0, stat1 *cStatStaging
+
+ snapHasLastUkey bool
+ snapLastUkey []byte
+ snapLastSeq uint64
+ snapIter int
+ snapKerrCnt int
+ snapDropCnt int
+
+ kerrCnt int
+ dropCnt int
+
+ minSeq uint64
+ strict bool
+ tableSize int
+
+ tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+ // Create new table if not already.
+ if b.tw == nil {
+ // Check for pause event.
+ if b.db != nil {
+ select {
+ case ch := <-b.db.tcompPauseC:
+ b.db.pauseCompaction(ch)
+ case <-b.db.closeC:
+ b.db.compactionExitTransact()
+ default:
+ }
+ }
+
+ // Create new table.
+ var err error
+ b.tw, err = b.s.tops.create()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write key/value into table.
+ return b.tw.append(key, value)
+}
+
+func (b *tableCompactionBuilder) needFlush() bool {
+ return b.tw.tw.BytesLen() >= b.tableSize
+}
+
+func (b *tableCompactionBuilder) flush() error {
+ t, err := b.tw.finish()
+ if err != nil {
+ return err
+ }
+ b.rec.addTableFile(b.c.sourceLevel+1, t)
+ b.stat1.write += t.size
+ b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+ b.tw = nil
+ return nil
+}
+
+func (b *tableCompactionBuilder) cleanup() {
+ if b.tw != nil {
+ b.tw.drop()
+ b.tw = nil
+ }
+}
+
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+ snapResumed := b.snapIter > 0
+ hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+ lastUkey := append([]byte{}, b.snapLastUkey...)
+ lastSeq := b.snapLastSeq
+ b.kerrCnt = b.snapKerrCnt
+ b.dropCnt = b.snapDropCnt
+ // Restore compaction state.
+ b.c.restore()
+
+ defer b.cleanup()
+
+ b.stat1.startTimer()
+ defer b.stat1.stopTimer()
+
+ iter := b.c.newIterator()
+ defer iter.Release()
+ for i := 0; iter.Next(); i++ {
+ // Incr transact counter.
+ cnt.incr()
+
+ // Skip until last state.
+ if i < b.snapIter {
+ continue
+ }
+
+ resumed := false
+ if snapResumed {
+ resumed = true
+ snapResumed = false
+ }
+
+ ikey := iter.Key()
+ ukey, seq, kt, kerr := parseInternalKey(ikey)
+
+ if kerr == nil {
+ shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+ if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+ // First occurrence of this user key.
+
+ // Only rotate tables if ukey doesn't hop across.
+ if b.tw != nil && (shouldStop || b.needFlush()) {
+ if err := b.flush(); err != nil {
+ return err
+ }
+
+ // Creates snapshot of the state.
+ b.c.save()
+ b.snapHasLastUkey = hasLastUkey
+ b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+ b.snapLastSeq = lastSeq
+ b.snapIter = i
+ b.snapKerrCnt = b.kerrCnt
+ b.snapDropCnt = b.dropCnt
+ }
+
+ hasLastUkey = true
+ lastUkey = append(lastUkey[:0], ukey...)
+ lastSeq = keyMaxSeq
+ }
+
+ switch {
+ case lastSeq <= b.minSeq:
+ // Dropped because newer entry for same user key exist
+ fallthrough // (A)
+ case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+ // For this user key:
+ // (1) there is no data in higher levels
+ // (2) data in lower levels will have larger seq numbers
+ // (3) data in layers that are being compacted here and have
+ // smaller seq numbers will be dropped in the next
+ // few iterations of this loop (by rule (A) above).
+ // Therefore this deletion marker is obsolete and can be dropped.
+ lastSeq = seq
+ b.dropCnt++
+ continue
+ default:
+ lastSeq = seq
+ }
+ } else {
+ if b.strict {
+ return kerr
+ }
+
+ // Don't drop corrupted keys.
+ hasLastUkey = false
+ lastUkey = lastUkey[:0]
+ lastSeq = keyMaxSeq
+ b.kerrCnt++
+ }
+
+ if err := b.appendKV(ikey, iter.Value()); err != nil {
+ return err
+ }
+ }
+
+ if err := iter.Error(); err != nil {
+ return err
+ }
+
+ // Finish last table.
+ if b.tw != nil && !b.tw.empty() {
+ return b.flush()
+ }
+ return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+ for _, at := range b.rec.addedTables {
+ b.s.logf("table@build revert @%d", at.num)
+ if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+ defer c.release()
+
+ rec := &sessionRecord{}
+ rec.addCompPtr(c.sourceLevel, c.imax)
+
+ if !noTrivial && c.trivial() {
+ t := c.levels[0][0]
+ db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
+ rec.delTable(c.sourceLevel, t.fd.Num)
+ rec.addTableFile(c.sourceLevel+1, t)
+ db.compactionCommit("table-move", rec)
+ return
+ }
+
+ var stats [2]cStatStaging
+ for i, tables := range c.levels {
+ for _, t := range tables {
+ stats[i].read += t.size
+ // Insert deleted tables into record
+ rec.delTable(c.sourceLevel+i, t.fd.Num)
+ }
+ }
+ sourceSize := int(stats[0].read + stats[1].read)
+ minSeq := db.minSeq()
+ db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
+
+ b := &tableCompactionBuilder{
+ db: db,
+ s: db.s,
+ c: c,
+ rec: rec,
+ stat1: &stats[1],
+ minSeq: minSeq,
+ strict: db.s.o.GetStrict(opt.StrictCompaction),
+ tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
+ }
+ db.compactionTransact("table@build", b)
+
+ // Commit.
+ stats[1].startTimer()
+ db.compactionCommit("table", rec)
+ stats[1].stopTimer()
+
+ resultSize := int(stats[1].write)
+ db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
+
+ // Save compaction stats
+ for i := range stats {
+ db.compStats.addStat(c.sourceLevel+1, &stats[i])
+ }
+}
+
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
+ db.logf("table@compaction range L%d %q:%q", level, umin, umax)
+ if level >= 0 {
+ if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
+ db.tableCompaction(c, true)
+ }
+ } else {
+ // Retry until nothing to compact.
+ for {
+ compacted := false
+
+ // Scan for maximum level with overlapped tables.
+ v := db.s.version()
+ m := 1
+ for i := m; i < len(v.levels); i++ {
+ tables := v.levels[i]
+ if tables.overlaps(db.s.icmp, umin, umax, false) {
+ m = i
+ }
+ }
+ v.release()
+
+ for level := 0; level < m; level++ {
+ if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
+ db.tableCompaction(c, true)
+ compacted = true
+ }
+ }
+
+ if !compacted {
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func (db *DB) tableAutoCompaction() {
+ if c := db.s.pickCompaction(); c != nil {
+ db.tableCompaction(c, false)
+ }
+}
+
+func (db *DB) tableNeedCompaction() bool {
+ v := db.s.version()
+ defer v.release()
+ return v.needCompaction()
+}
+
+// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
+func (db *DB) resumeWrite() bool {
+ v := db.s.version()
+ defer v.release()
+ if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
+ return true
+ }
+ return false
+}
+
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
+ select {
+ case ch <- struct{}{}:
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+}
+
+type cCmd interface {
+ ack(err error)
+}
+
+type cAuto struct {
+ // Note for table compaction, an non-empty ackC represents it's a compaction waiting command.
+ ackC chan<- error
+}
+
+func (r cAuto) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+type cRange struct {
+ level int
+ min, max []byte
+ ackC chan<- error
+}
+
+func (r cRange) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compTrigger(compC chan<- cCmd) {
+ select {
+ case compC <- cAuto{}:
+ default:
+ }
+}
+
+// This will trigger auto compaction and/or wait for all compaction to be done.
+func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cAuto{ch}:
+ case err = <-db.compErrC:
+ return
+ case <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+// Send range compaction request.
+func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cRange{level, min, max, ch}:
+ case err := <-db.compErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+func (db *DB) mCompaction() {
+ var x cCmd
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ select {
+ case x = <-db.mcompCmdC:
+ switch x.(type) {
+ case cAuto:
+ db.memCompaction()
+ x.ack(nil)
+ x = nil
+ default:
+ panic("leveldb: unknown command")
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+}
+
+func (db *DB) tCompaction() {
+ var (
+ x cCmd
+ waitQ []cCmd
+ )
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ for i := range waitQ {
+ waitQ[i].ack(ErrClosed)
+ waitQ[i] = nil
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ if db.tableNeedCompaction() {
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case <-db.closeC:
+ return
+ default:
+ }
+ // Resume write operation as soon as possible.
+ if len(waitQ) > 0 && db.resumeWrite() {
+ for i := range waitQ {
+ waitQ[i].ack(nil)
+ waitQ[i] = nil
+ }
+ waitQ = waitQ[:0]
+ }
+ } else {
+ for i := range waitQ {
+ waitQ[i].ack(nil)
+ waitQ[i] = nil
+ }
+ waitQ = waitQ[:0]
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case <-db.closeC:
+ return
+ }
+ }
+ if x != nil {
+ switch cmd := x.(type) {
+ case cAuto:
+ if cmd.ackC != nil {
+ // Check the write pause state before caching it.
+ if db.resumeWrite() {
+ x.ack(nil)
+ } else {
+ waitQ = append(waitQ, x)
+ }
+ }
+ case cRange:
+ x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
+ default:
+ panic("leveldb: unknown command")
+ }
+ x = nil
+ }
+ db.tableAutoCompaction()
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
new file mode 100644
index 0000000..03c24cd
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -0,0 +1,360 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
+)
+
+type memdbReleaser struct {
+ once sync.Once
+ m *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+ mr.once.Do(func() {
+ mr.m.decref()
+ })
+}
+
+func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+ em, fm := db.getMems()
+ v := db.s.version()
+
+ tableIts := v.getIterators(slice, ro)
+ n := len(tableIts) + len(auxt) + 3
+ its := make([]iterator.Iterator, 0, n)
+
+ if auxm != nil {
+ ami := auxm.NewIterator(slice)
+ ami.SetReleaser(&memdbReleaser{m: auxm})
+ its = append(its, ami)
+ }
+ for _, t := range auxt {
+ its = append(its, v.s.tops.newIterator(t, slice, ro))
+ }
+
+ emi := em.NewIterator(slice)
+ emi.SetReleaser(&memdbReleaser{m: em})
+ its = append(its, emi)
+ if fm != nil {
+ fmi := fm.NewIterator(slice)
+ fmi.SetReleaser(&memdbReleaser{m: fm})
+ its = append(its, fmi)
+ }
+ its = append(its, tableIts...)
+ mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
+ mi.SetReleaser(&versionReleaser{v: v})
+ return mi
+}
+
+func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
+ var islice *util.Range
+ if slice != nil {
+ islice = &util.Range{}
+ if slice.Start != nil {
+ islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
+ }
+ if slice.Limit != nil {
+ islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
+ }
+ }
+ rawIter := db.newRawIterator(auxm, auxt, islice, ro)
+ iter := &dbIter{
+ db: db,
+ icmp: db.s.icmp,
+ iter: rawIter,
+ seq: seq,
+ strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
+ key: make([]byte, 0),
+ value: make([]byte, 0),
+ }
+ atomic.AddInt32(&db.aliveIters, 1)
+ runtime.SetFinalizer(iter, (*dbIter).Release)
+ return iter
+}
+
+func (db *DB) iterSamplingRate() int {
+ return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+// dbIter represent an interator states over a database session.
+type dbIter struct {
+ db *DB
+ icmp *iComparer
+ iter iterator.Iterator
+ seq uint64
+ strict bool
+
+ smaplingGap int
+ dir dir
+ key []byte
+ value []byte
+ err error
+ releaser util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+ ikey := i.iter.Key()
+ i.smaplingGap -= len(ikey) + len(i.iter.Value())
+ for i.smaplingGap < 0 {
+ i.smaplingGap += i.db.iterSamplingRate()
+ i.db.sampleSeek(ikey)
+ }
+}
+
+func (i *dbIter) setErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+}
+
+func (i *dbIter) iterErr() {
+ if err := i.iter.Error(); err != nil {
+ i.setErr(err)
+ }
+}
+
+func (i *dbIter) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *dbIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.First() {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.Last() {
+ return i.prev()
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
+ if i.iter.Seek(ikey) {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) next() bool {
+ for {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ switch kt {
+ case keyTypeDel:
+ // Skip deleted key.
+ i.key = append(i.key[:0], ukey...)
+ i.dir = dirForward
+ case keyTypeVal:
+ if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ i.dir = dirForward
+ return true
+ }
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ break
+ }
+ if !i.iter.Next() {
+ i.dir = dirEOI
+ i.iterErr()
+ break
+ }
+ }
+ return false
+}
+
+func (i *dbIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) {
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+ }
+ return i.next()
+}
+
+func (i *dbIter) prev() bool {
+ i.dir = dirBackward
+ del := true
+ if i.iter.Valid() {
+ for {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ if !del && i.icmp.uCompare(ukey, i.key) < 0 {
+ return true
+ }
+ del = (kt == keyTypeDel)
+ if !del {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ if !i.iter.Prev() {
+ break
+ }
+ }
+ }
+ if del {
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+ return true
+}
+
+func (i *dbIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ for i.iter.Prev() {
+ if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if i.icmp.uCompare(ukey, i.key) < 0 {
+ goto cont
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+
+cont:
+ return i.prev()
+}
+
+func (i *dbIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *dbIter) Release() {
+ if i.dir != dirReleased {
+ // Clear the finalizer.
+ runtime.SetFinalizer(i, nil)
+
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+
+ i.dir = dirReleased
+ i.key = nil
+ i.value = nil
+ i.iter.Release()
+ i.iter = nil
+ atomic.AddInt32(&i.db.aliveIters, -1)
+ i.db = nil
+ }
+}
+
+func (i *dbIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *dbIter) Error() error {
+ return i.err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
new file mode 100644
index 0000000..c2ad70c
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type snapshotElement struct {
+ seq uint64
+ ref int
+ e *list.Element
+}
+
+// Acquires a snapshot, based on latest sequence.
+func (db *DB) acquireSnapshot() *snapshotElement {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ seq := db.getSeq()
+
+ if e := db.snapsList.Back(); e != nil {
+ se := e.Value.(*snapshotElement)
+ if se.seq == seq {
+ se.ref++
+ return se
+ } else if seq < se.seq {
+ panic("leveldb: sequence number is not increasing")
+ }
+ }
+ se := &snapshotElement{seq: seq, ref: 1}
+ se.e = db.snapsList.PushBack(se)
+ return se
+}
+
+// Releases given snapshot element.
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ se.ref--
+ if se.ref == 0 {
+ db.snapsList.Remove(se.e)
+ se.e = nil
+ } else if se.ref < 0 {
+ panic("leveldb: Snapshot: negative element reference")
+ }
+}
+
+// Gets minimum sequence that not being snapshotted.
+func (db *DB) minSeq() uint64 {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ if e := db.snapsList.Front(); e != nil {
+ return e.Value.(*snapshotElement).seq
+ }
+
+ return db.getSeq()
+}
+
+// Snapshot is a DB snapshot.
+type Snapshot struct {
+ db *DB
+ elem *snapshotElement
+ mu sync.RWMutex
+ released bool
+}
+
+// Creates new snapshot object.
+func (db *DB) newSnapshot() *Snapshot {
+ snap := &Snapshot{
+ db: db,
+ elem: db.acquireSnapshot(),
+ }
+ atomic.AddInt32(&db.aliveSnaps, 1)
+ runtime.SetFinalizer(snap, (*Snapshot).Release)
+ return snap
+}
+
+func (snap *Snapshot) String() string {
+ return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if
+// the DB does not contains the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.get(nil, nil, key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.has(nil, nil, key, snap.elem.seq, ro)
+}
+
+// NewIterator returns an iterator for the snapshot of the underlying DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// WARNING: Any slice returned by interator (e.g. slice returned by calling
+// Iterator.Key() or Iterator.Value() methods), its content should not be
+// modified unless noted otherwise.
+//
+// The iterator must be released after use, by calling Release method.
+// Releasing the snapshot doesn't mean releasing the iterator too, the
+// iterator would be still valid until released.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := snap.db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+ if snap.released {
+ return iterator.NewEmptyIterator(ErrSnapshotReleased)
+ }
+ // Since iterator already hold version ref, it doesn't need to
+ // hold snapshot ref.
+ return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
+}
+
+// Release releases the snapshot. This will not release any returned
+// iterators, the iterators would still be valid until released or the
+// underlying DB is closed.
+//
+// Other methods should not be called after the snapshot has been released.
+func (snap *Snapshot) Release() {
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+
+ if !snap.released {
+ // Clear the finalizer.
+ runtime.SetFinalizer(snap, nil)
+
+ snap.released = true
+ snap.db.releaseSnapshot(snap.elem)
+ atomic.AddInt32(&snap.db.aliveSnaps, -1)
+ snap.db = nil
+ snap.elem = nil
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
new file mode 100644
index 0000000..65e1c54
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+var (
+ errHasFrozenMem = errors.New("has frozen mem")
+)
+
+type memDB struct {
+ db *DB
+ *memdb.DB
+ ref int32
+}
+
+func (m *memDB) getref() int32 {
+ return atomic.LoadInt32(&m.ref)
+}
+
+func (m *memDB) incref() {
+ atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+ if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+ // Only put back memdb with std capacity.
+ if m.Capacity() == m.db.s.o.GetWriteBuffer() {
+ m.Reset()
+ m.db.mpoolPut(m.DB)
+ }
+ m.db = nil
+ m.DB = nil
+ } else if ref < 0 {
+ panic("negative memdb ref")
+ }
+}
+
+// Get latest sequence number.
+func (db *DB) getSeq() uint64 {
+ return atomic.LoadUint64(&db.seq)
+}
+
+// Atomically adds delta to seq.
+func (db *DB) addSeq(delta uint64) {
+ atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) setSeq(seq uint64) {
+ atomic.StoreUint64(&db.seq, seq)
+}
+
+func (db *DB) sampleSeek(ikey internalKey) {
+ v := db.s.version()
+ if v.sampleSeek(ikey) {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+ if !db.isClosed() {
+ select {
+ case db.memPool <- mem:
+ default:
+ }
+ }
+}
+
+func (db *DB) mpoolGet(n int) *memDB {
+ var mdb *memdb.DB
+ select {
+ case mdb = <-db.memPool:
+ default:
+ }
+ if mdb == nil || mdb.Capacity() < n {
+ mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+ }
+ return &memDB{
+ db: db,
+ DB: mdb,
+ }
+}
+
+func (db *DB) mpoolDrain() {
+ ticker := time.NewTicker(30 * time.Second)
+ for {
+ select {
+ case <-ticker.C:
+ select {
+ case <-db.memPool:
+ default:
+ }
+ case <-db.closeC:
+ ticker.Stop()
+ // Make sure the pool is drained.
+ select {
+ case <-db.memPool:
+ case <-time.After(time.Second):
+ }
+ close(db.memPool)
+ return
+ }
+ }
+}
+
+// Create new memdb and froze the old one; need external synchronization.
+// newMem only called synchronously by the writer.
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+ fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
+ w, err := db.s.stor.Create(fd)
+ if err != nil {
+ db.s.reuseFileNum(fd.Num)
+ return
+ }
+
+ db.memMu.Lock()
+ defer db.memMu.Unlock()
+
+ if db.frozenMem != nil {
+ return nil, errHasFrozenMem
+ }
+
+ if db.journal == nil {
+ db.journal = journal.NewWriter(w)
+ } else {
+ db.journal.Reset(w)
+ db.journalWriter.Close()
+ db.frozenJournalFd = db.journalFd
+ }
+ db.journalWriter = w
+ db.journalFd = fd
+ db.frozenMem = db.mem
+ mem = db.mpoolGet(n)
+ mem.incref() // for self
+ mem.incref() // for caller
+ db.mem = mem
+ // The seq only incremented by the writer. And whoever called newMem
+ // should hold write lock, so no need additional synchronization here.
+ db.frozenSeq = db.seq
+ return
+}
+
+// Get all memdbs.
+func (db *DB) getMems() (e, f *memDB) {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem != nil {
+ db.mem.incref()
+ } else if !db.isClosed() {
+ panic("nil effective mem")
+ }
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.mem, db.frozenMem
+}
+
+// Get effective memdb.
+func (db *DB) getEffectiveMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem != nil {
+ db.mem.incref()
+ } else if !db.isClosed() {
+ panic("nil effective mem")
+ }
+ return db.mem
+}
+
+// Check whether we has frozen memdb.
+func (db *DB) hasFrozenMem() bool {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ return db.frozenMem != nil
+}
+
+// Get frozen memdb.
+func (db *DB) getFrozenMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.frozenMem
+}
+
+// Drop frozen memdb; assume that frozen memdb isn't nil.
+func (db *DB) dropFrozenMem() {
+ db.memMu.Lock()
+ if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
+ db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
+ } else {
+ db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
+ }
+ db.frozenJournalFd = storage.FileDesc{}
+ db.frozenMem.decref()
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Clear mems ptr; used by DB.Close().
+func (db *DB) clearMems() {
+ db.memMu.Lock()
+ db.mem = nil
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Set closed flag; return true if not already closed.
+func (db *DB) setClosed() bool {
+ return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
+}
+
+// Check whether DB was closed.
+func (db *DB) isClosed() bool {
+ return atomic.LoadUint32(&db.closed) != 0
+}
+
+// Check read ok status.
+func (db *DB) ok() error {
+ if db.isClosed() {
+ return ErrClosed
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
new file mode 100644
index 0000000..1a00001
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
@@ -0,0 +1,329 @@
+// Copyright (c) 2016, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var errTransactionDone = errors.New("leveldb: transaction already closed")
+
+// Transaction is the transaction handle.
+type Transaction struct {
+ db *DB
+ lk sync.RWMutex
+ seq uint64
+ mem *memDB
+ tables tFiles
+ ikScratch []byte
+ rec sessionRecord
+ stats cStatStaging
+ closed bool
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return nil, errTransactionDone
+ }
+ return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Has returns.
+func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return false, errTransactionDone
+ }
+ return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the transaction.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently while writes to the
+// transaction. The resultant key/value pairs are guaranteed to be consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// WARNING: Any slice returned by interator (e.g. slice returned by calling
+// Iterator.Key() or Iterator.Key() methods), its content should not be modified
+// unless noted otherwise.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return iterator.NewEmptyIterator(errTransactionDone)
+ }
+ tr.mem.incref()
+ return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro)
+}
+
+func (tr *Transaction) flush() error {
+ // Flush memdb.
+ if tr.mem.Len() != 0 {
+ tr.stats.startTimer()
+ iter := tr.mem.NewIterator(nil)
+ t, n, err := tr.db.s.tops.createFrom(iter)
+ iter.Release()
+ tr.stats.stopTimer()
+ if err != nil {
+ return err
+ }
+ if tr.mem.getref() == 1 {
+ tr.mem.Reset()
+ } else {
+ tr.mem.decref()
+ tr.mem = tr.db.mpoolGet(0)
+ tr.mem.incref()
+ }
+ tr.tables = append(tr.tables, t)
+ tr.rec.addTableFile(0, t)
+ tr.stats.write += t.size
+ tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
+ }
+ return nil
+}
+
+func (tr *Transaction) put(kt keyType, key, value []byte) error {
+ tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
+ if tr.mem.Free() < len(tr.ikScratch)+len(value) {
+ if err := tr.flush(); err != nil {
+ return err
+ }
+ }
+ if err := tr.mem.Put(tr.ikScratch, value); err != nil {
+ return err
+ }
+ tr.seq++
+ return nil
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return tr.put(keyTypeVal, key, value)
+}
+
+// Delete deletes the value for the given key.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return tr.put(keyTypeDel, key, nil)
+}
+
+// Write apply the given batch to the transaction. The batch will be applied
+// sequentially.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Write returns.
+func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
+ if b == nil || b.Len() == 0 {
+ return nil
+ }
+
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return b.replayInternal(func(i int, kt keyType, k, v []byte) error {
+ return tr.put(kt, k, v)
+ })
+}
+
+func (tr *Transaction) setDone() {
+ tr.closed = true
+ tr.db.tr = nil
+ tr.mem.decref()
+ <-tr.db.writeLockC
+}
+
+// Commit commits the transaction. If error is not nil, then the transaction is
+// not committed, it can then either be retried or discarded.
+//
+// Other methods should not be called after transaction has been committed.
+func (tr *Transaction) Commit() error {
+ if err := tr.db.ok(); err != nil {
+ return err
+ }
+
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ if err := tr.flush(); err != nil {
+ // Return error, lets user decide either to retry or discard
+ // transaction.
+ return err
+ }
+ if len(tr.tables) != 0 {
+ // Committing transaction.
+ tr.rec.setSeqNum(tr.seq)
+ tr.db.compCommitLk.Lock()
+ tr.stats.startTimer()
+ var cerr error
+ for retry := 0; retry < 3; retry++ {
+ cerr = tr.db.s.commit(&tr.rec)
+ if cerr != nil {
+ tr.db.logf("transaction@commit error R·%d %q", retry, cerr)
+ select {
+ case <-time.After(time.Second):
+ case <-tr.db.closeC:
+ tr.db.logf("transaction@commit exiting")
+ tr.db.compCommitLk.Unlock()
+ return cerr
+ }
+ } else {
+ // Success. Set db.seq.
+ tr.db.setSeq(tr.seq)
+ break
+ }
+ }
+ tr.stats.stopTimer()
+ if cerr != nil {
+ // Return error, lets user decide either to retry or discard
+ // transaction.
+ return cerr
+ }
+
+ // Update compaction stats. This is safe as long as we hold compCommitLk.
+ tr.db.compStats.addStat(0, &tr.stats)
+
+ // Trigger table auto-compaction.
+ tr.db.compTrigger(tr.db.tcompCmdC)
+ tr.db.compCommitLk.Unlock()
+
+ // Additionally, wait compaction when certain threshold reached.
+ // Ignore error, returns error only if transaction can't be committed.
+ tr.db.waitCompaction()
+ }
+ // Only mark as done if transaction committed successfully.
+ tr.setDone()
+ return nil
+}
+
+func (tr *Transaction) discard() {
+ // Discard transaction.
+ for _, t := range tr.tables {
+ tr.db.logf("transaction@discard @%d", t.fd.Num)
+ if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil {
+ tr.db.s.reuseFileNum(t.fd.Num)
+ }
+ }
+}
+
+// Discard discards the transaction.
+//
+// Other methods should not be called after transaction has been discarded.
+func (tr *Transaction) Discard() {
+ tr.lk.Lock()
+ if !tr.closed {
+ tr.discard()
+ tr.setDone()
+ }
+ tr.lk.Unlock()
+}
+
+func (db *DB) waitCompaction() error {
+ if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() {
+ return db.compTriggerWait(db.tcompCmdC)
+ }
+ return nil
+}
+
+// OpenTransaction opens an atomic DB transaction. Only one transaction can be
+// opened at a time. Subsequent call to Write and OpenTransaction will be blocked
+// until in-flight transaction is committed or discarded.
+// The returned transaction handle is safe for concurrent use.
+//
+// Transaction is expensive and can overwhelm compaction, especially if
+// transaction size is small. Use with caution.
+//
+// The transaction must be closed once done, either by committing or discarding
+// the transaction.
+// Closing the DB will discard open transaction.
+func (db *DB) OpenTransaction() (*Transaction, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ // The write happen synchronously.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return nil, err
+ case <-db.closeC:
+ return nil, ErrClosed
+ }
+
+ if db.tr != nil {
+ panic("leveldb: has open transaction")
+ }
+
+ // Flush current memdb.
+ if db.mem != nil && db.mem.Len() != 0 {
+ if _, err := db.rotateMem(0, true); err != nil {
+ return nil, err
+ }
+ }
+
+ // Wait compaction when certain threshold reached.
+ if err := db.waitCompaction(); err != nil {
+ return nil, err
+ }
+
+ tr := &Transaction{
+ db: db,
+ seq: db.seq,
+ mem: db.mpoolGet(0),
+ }
+ tr.mem.incref()
+ db.tr = tr
+ return tr, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
new file mode 100644
index 0000000..3f06548
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader is the interface that wraps basic Get and NewIterator methods.
+// This interface implemented by both DB and Snapshot.
+type Reader interface {
+ Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
+ NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
+}
+
+// Sizes is list of size.
+type Sizes []int64
+
+// Sum returns sum of the sizes.
+func (sizes Sizes) Sum() int64 {
+ var sum int64
+ for _, size := range sizes {
+ sum += size
+ }
+ return sum
+}
+
+// Logging.
+func (db *DB) log(v ...interface{}) { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
+// Check and clean files.
+func (db *DB) checkAndCleanFiles() error {
+ v := db.s.version()
+ defer v.release()
+
+ tmap := make(map[int64]bool)
+ for _, tables := range v.levels {
+ for _, t := range tables {
+ tmap[t.fd.Num] = false
+ }
+ }
+
+ fds, err := db.s.stor.List(storage.TypeAll)
+ if err != nil {
+ return err
+ }
+
+ var nt int
+ var rem []storage.FileDesc
+ for _, fd := range fds {
+ keep := true
+ switch fd.Type {
+ case storage.TypeManifest:
+ keep = fd.Num >= db.s.manifestFd.Num
+ case storage.TypeJournal:
+ if !db.frozenJournalFd.Zero() {
+ keep = fd.Num >= db.frozenJournalFd.Num
+ } else {
+ keep = fd.Num >= db.journalFd.Num
+ }
+ case storage.TypeTable:
+ _, keep = tmap[fd.Num]
+ if keep {
+ tmap[fd.Num] = true
+ nt++
+ }
+ }
+
+ if !keep {
+ rem = append(rem, fd)
+ }
+ }
+
+ if nt != len(tmap) {
+ var mfds []storage.FileDesc
+ for num, present := range tmap {
+ if !present {
+ mfds = append(mfds, storage.FileDesc{Type: storage.TypeTable, Num: num})
+ db.logf("db@janitor table missing @%d", num)
+ }
+ }
+ return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
+ }
+
+ db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
+ for _, fd := range rem {
+ db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
+ if err := db.s.stor.Remove(fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
new file mode 100644
index 0000000..db0c1be
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -0,0 +1,464 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error {
+ wr, err := db.journal.Next()
+ if err != nil {
+ return err
+ }
+ if err := writeBatchesWithHeader(wr, batches, seq); err != nil {
+ return err
+ }
+ if err := db.journal.Flush(); err != nil {
+ return err
+ }
+ if sync {
+ return db.journalWriter.Sync()
+ }
+ return nil
+}
+
+func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
+ retryLimit := 3
+retry:
+ // Wait for pending memdb compaction.
+ err = db.compTriggerWait(db.mcompCmdC)
+ if err != nil {
+ return
+ }
+ retryLimit--
+
+ // Create new memdb and journal.
+ mem, err = db.newMem(n)
+ if err != nil {
+ if err == errHasFrozenMem {
+ if retryLimit <= 0 {
+ panic("BUG: still has frozen memdb")
+ }
+ goto retry
+ }
+ return
+ }
+
+ // Schedule memdb compaction.
+ if wait {
+ err = db.compTriggerWait(db.mcompCmdC)
+ } else {
+ db.compTrigger(db.mcompCmdC)
+ }
+ return
+}
+
+func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
+ delayed := false
+ slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger()
+ pauseTrigger := db.s.o.GetWriteL0PauseTrigger()
+ flush := func() (retry bool) {
+ mdb = db.getEffectiveMem()
+ if mdb == nil {
+ err = ErrClosed
+ return false
+ }
+ defer func() {
+ if retry {
+ mdb.decref()
+ mdb = nil
+ }
+ }()
+ tLen := db.s.tLen(0)
+ mdbFree = mdb.Free()
+ switch {
+ case tLen >= slowdownTrigger && !delayed:
+ delayed = true
+ time.Sleep(time.Millisecond)
+ case mdbFree >= n:
+ return false
+ case tLen >= pauseTrigger:
+ delayed = true
+ // Set the write paused flag explicitly.
+ atomic.StoreInt32(&db.inWritePaused, 1)
+ err = db.compTriggerWait(db.tcompCmdC)
+ // Unset the write paused flag.
+ atomic.StoreInt32(&db.inWritePaused, 0)
+ if err != nil {
+ return false
+ }
+ default:
+ // Allow memdb to grow if it has no entry.
+ if mdb.Len() == 0 {
+ mdbFree = n
+ } else {
+ mdb.decref()
+ mdb, err = db.rotateMem(n, false)
+ if err == nil {
+ mdbFree = mdb.Free()
+ } else {
+ mdbFree = 0
+ }
+ }
+ return false
+ }
+ return true
+ }
+ start := time.Now()
+ for flush() {
+ }
+ if delayed {
+ db.writeDelay += time.Since(start)
+ db.writeDelayN++
+ } else if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN))
+ atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay))
+ db.writeDelay = 0
+ db.writeDelayN = 0
+ }
+ return
+}
+
+type writeMerge struct {
+ sync bool
+ batch *Batch
+ keyType keyType
+ key, value []byte
+}
+
+func (db *DB) unlockWrite(overflow bool, merged int, err error) {
+ for i := 0; i < merged; i++ {
+ db.writeAckC <- err
+ }
+ if overflow {
+ // Pass lock to the next write (that failed to merge).
+ db.writeMergedC <- false
+ } else {
+ // Release lock.
+ <-db.writeLockC
+ }
+}
+
+// ourBatch is batch that we can modify.
+func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error {
+ // Try to flush memdb. This method would also trying to throttle writes
+ // if it is too fast and compaction cannot catch-up.
+ mdb, mdbFree, err := db.flush(batch.internalLen)
+ if err != nil {
+ db.unlockWrite(false, 0, err)
+ return err
+ }
+ defer mdb.decref()
+
+ var (
+ overflow bool
+ merged int
+ batches = []*Batch{batch}
+ )
+
+ if merge {
+ // Merge limit.
+ var mergeLimit int
+ if batch.internalLen > 128<<10 {
+ mergeLimit = (1 << 20) - batch.internalLen
+ } else {
+ mergeLimit = 128 << 10
+ }
+ mergeCap := mdbFree - batch.internalLen
+ if mergeLimit > mergeCap {
+ mergeLimit = mergeCap
+ }
+
+ merge:
+ for mergeLimit > 0 {
+ select {
+ case incoming := <-db.writeMergeC:
+ if incoming.batch != nil {
+ // Merge batch.
+ if incoming.batch.internalLen > mergeLimit {
+ overflow = true
+ break merge
+ }
+ batches = append(batches, incoming.batch)
+ mergeLimit -= incoming.batch.internalLen
+ } else {
+ // Merge put.
+ internalLen := len(incoming.key) + len(incoming.value) + 8
+ if internalLen > mergeLimit {
+ overflow = true
+ break merge
+ }
+ if ourBatch == nil {
+ ourBatch = db.batchPool.Get().(*Batch)
+ ourBatch.Reset()
+ batches = append(batches, ourBatch)
+ }
+ // We can use same batch since concurrent write doesn't
+ // guarantee write order.
+ ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value)
+ mergeLimit -= internalLen
+ }
+ sync = sync || incoming.sync
+ merged++
+ db.writeMergedC <- true
+
+ default:
+ break merge
+ }
+ }
+ }
+
+ // Release ourBatch if any.
+ if ourBatch != nil {
+ defer db.batchPool.Put(ourBatch)
+ }
+
+ // Seq number.
+ seq := db.seq + 1
+
+ // Write journal.
+ if err := db.writeJournal(batches, seq, sync); err != nil {
+ db.unlockWrite(overflow, merged, err)
+ return err
+ }
+
+ // Put batches.
+ for _, batch := range batches {
+ if err := batch.putMem(seq, mdb.DB); err != nil {
+ panic(err)
+ }
+ seq += uint64(batch.Len())
+ }
+
+ // Incr seq number.
+ db.addSeq(uint64(batchesLen(batches)))
+
+ // Rotate memdb if it's reach the threshold.
+ if batch.internalLen >= mdbFree {
+ db.rotateMem(0, false)
+ }
+
+ db.unlockWrite(overflow, merged, nil)
+ return nil
+}
+
+// Write apply the given batch to the DB. The batch records will be applied
+// sequentially. Write might be used concurrently, when used concurrently and
+// batch is small enough, write will try to merge the batches. Set NoWriteMerge
+// option to true to disable write merge.
+//
+// It is safe to modify the contents of the arguments after Write returns but
+// not before. Write will not modify content of the batch.
+func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error {
+ if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 {
+ return err
+ }
+
+ // If the batch size is larger than write buffer, it may justified to write
+ // using transaction instead. Using transaction the batch will be written
+ // into tables directly, skipping the journaling.
+ if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
+ tr, err := db.OpenTransaction()
+ if err != nil {
+ return err
+ }
+ if err := tr.Write(batch, wo); err != nil {
+ tr.Discard()
+ return err
+ }
+ return tr.Commit()
+ }
+
+ merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
+ sync := wo.GetSync() && !db.s.o.GetNoSync()
+
+ // Acquire write lock.
+ if merge {
+ select {
+ case db.writeMergeC <- writeMerge{sync: sync, batch: batch}:
+ if <-db.writeMergedC {
+ // Write is merged.
+ return <-db.writeAckC
+ }
+ // Write is not merged, the write lock is handed to us. Continue.
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ } else {
+ select {
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ }
+
+ return db.writeLocked(batch, nil, merge, sync)
+}
+
+func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
+ sync := wo.GetSync() && !db.s.o.GetNoSync()
+
+ // Acquire write lock.
+ if merge {
+ select {
+ case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}:
+ if <-db.writeMergedC {
+ // Write is merged.
+ return <-db.writeAckC
+ }
+ // Write is not merged, the write lock is handed to us. Continue.
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ } else {
+ select {
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ }
+
+ batch := db.batchPool.Get().(*Batch)
+ batch.Reset()
+ batch.appendRec(kt, key, value)
+ return db.writeLocked(batch, batch, merge, sync)
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map. Write merge also applies for Put, see
+// Write.
+//
+// It is safe to modify the contents of the arguments after Put returns but not
+// before.
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+ return db.putRec(keyTypeVal, key, value, wo)
+}
+
+// Delete deletes the value for the given key. Delete will not returns error if
+// key doesn't exist. Write merge also applies for Delete, see Write.
+//
+// It is safe to modify the contents of the arguments after Delete returns but
+// not before.
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+ return db.putRec(keyTypeDel, key, nil, wo)
+}
+
+func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
+ iter := mem.NewIterator(nil)
+ defer iter.Release()
+ return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
+ (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
+}
+
+// CompactRange compacts the underlying DB for the given key range.
+// In particular, deleted and overwritten versions are discarded,
+// and the data is rearranged to reduce the cost of operations
+// needed to access the data. This operation should typically only
+// be invoked by users who understand the underlying implementation.
+//
+// A nil Range.Start is treated as a key before all keys in the DB.
+// And a nil Range.Limit is treated as a key after all keys in the DB.
+// Therefore if both is nil then it will compact entire DB.
+func (db *DB) CompactRange(r util.Range) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ // Check for overlaps in memdb.
+ mdb := db.getEffectiveMem()
+ if mdb == nil {
+ return ErrClosed
+ }
+ defer mdb.decref()
+ if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
+ // Memdb compaction.
+ if _, err := db.rotateMem(0, false); err != nil {
+ <-db.writeLockC
+ return err
+ }
+ <-db.writeLockC
+ if err := db.compTriggerWait(db.mcompCmdC); err != nil {
+ return err
+ }
+ } else {
+ <-db.writeLockC
+ }
+
+ // Table compaction.
+ return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit)
+}
+
+// SetReadOnly makes DB read-only. It will stay read-only until reopened.
+func (db *DB) SetReadOnly() error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ db.compWriteLocking = true
+ case err := <-db.compPerErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ // Set compaction read-only.
+ select {
+ case db.compErrSetC <- ErrReadOnly:
+ case perr := <-db.compPerErrC:
+ return perr
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
new file mode 100644
index 0000000..be768e5
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package leveldb provides implementation of LevelDB key/value database.
+//
+// Create or open a database:
+//
+// // The returned DB instance is safe for concurrent use. Which mean that all
+// // DB's methods may be called concurrently from multiple goroutine.
+// db, err := leveldb.OpenFile("path/to/db", nil)
+// ...
+// defer db.Close()
+// ...
+//
+// Read or modify the database content:
+//
+// // Remember that the contents of the returned slice should not be modified.
+// data, err := db.Get([]byte("key"), nil)
+// ...
+// err = db.Put([]byte("key"), []byte("value"), nil)
+// ...
+// err = db.Delete([]byte("key"), nil)
+// ...
+//
+// Iterate over database content:
+//
+// iter := db.NewIterator(nil, nil)
+// for iter.Next() {
+// // Remember that the contents of the returned slice should not be modified, and
+// // only valid until the next call to Next.
+// key := iter.Key()
+// value := iter.Value()
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content with a particular prefix:
+// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Seek-then-Iterate:
+//
+// iter := db.NewIterator(nil, nil)
+// for ok := iter.Seek(key); ok; ok = iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content:
+//
+// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Batch writes:
+//
+// batch := new(leveldb.Batch)
+// batch.Put([]byte("foo"), []byte("value"))
+// batch.Put([]byte("bar"), []byte("another value"))
+// batch.Delete([]byte("baz"))
+// err = db.Write(batch, nil)
+// ...
+//
+// Use bloom filter:
+//
+// o := &opt.Options{
+// Filter: filter.NewBloomFilter(10),
+// }
+// db, err := leveldb.OpenFile("path/to/db", o)
+// ...
+// defer db.Close()
+// ...
+package leveldb
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
new file mode 100644
index 0000000..de26498
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+// Common errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReadOnly = errors.New("leveldb: read-only mode")
+ ErrSnapshotReleased = errors.New("leveldb: snapshot released")
+ ErrIterReleased = errors.New("leveldb: iterator released")
+ ErrClosed = errors.New("leveldb: closed")
+)
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 0000000..8d6146b
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Common errors.
+var (
+ ErrNotFound = New("leveldb: not found")
+ ErrReleased = util.ErrReleased
+ ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+ return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+ Fd storage.FileDesc
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if !e.Fd.Zero() {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
+ }
+ return e.Err.Error()
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(fd storage.FileDesc, err error) error {
+ return &ErrCorrupted{fd, err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ case *storage.ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files. ErrMissingFiles always wrapped with ErrCorrupted.
+type ErrMissingFiles struct {
+ Fds []storage.FileDesc
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFd sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFd(err error, fd storage.FileDesc) error {
+ switch x := err.(type) {
+ case *ErrCorrupted:
+ x.Fd = fd
+ return x
+ }
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
new file mode 100644
index 0000000..e961e42
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+type iFilter struct {
+ filter.Filter
+}
+
+func (f iFilter) Contains(filter, key []byte) bool {
+ return f.Filter.Contains(filter, internalKey(key).ukey())
+}
+
+func (f iFilter) NewGenerator() filter.FilterGenerator {
+ return iFilterGenerator{f.Filter.NewGenerator()}
+}
+
+type iFilterGenerator struct {
+ filter.FilterGenerator
+}
+
+func (g iFilterGenerator) Add(key []byte) {
+ g.FilterGenerator.Add(internalKey(key).ukey())
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
new file mode 100644
index 0000000..bab0e99
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package filter
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func bloomHash(key []byte) uint32 {
+ return util.Hash(key, 0xbc9f1d34)
+}
+
+type bloomFilter int
+
+// The bloom filter serializes its parameters and is backward compatible
+// with respect to them. Therefor, its parameters are not added to its
+// name.
+func (bloomFilter) Name() string {
+ return "leveldb.BuiltinBloomFilter"
+}
+
+func (f bloomFilter) Contains(filter, key []byte) bool {
+ nBytes := len(filter) - 1
+ if nBytes < 1 {
+ return false
+ }
+ nBits := uint32(nBytes * 8)
+
+ // Use the encoded k so that we can read filters generated by
+ // bloom filters created using different parameters.
+ k := filter[nBytes]
+ if k > 30 {
+ // Reserved for potentially new encodings for short bloom filters.
+ // Consider it a match.
+ return true
+ }
+
+ kh := bloomHash(key)
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < k; j++ {
+ bitpos := kh % nBits
+ if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
+ return false
+ }
+ kh += delta
+ }
+ return true
+}
+
+func (f bloomFilter) NewGenerator() FilterGenerator {
+ // Round down to reduce probing cost a little bit.
+ k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
+ if k < 1 {
+ k = 1
+ } else if k > 30 {
+ k = 30
+ }
+ return &bloomFilterGenerator{
+ n: int(f),
+ k: k,
+ }
+}
+
+type bloomFilterGenerator struct {
+ n int
+ k uint8
+
+ keyHashes []uint32
+}
+
+func (g *bloomFilterGenerator) Add(key []byte) {
+ // Use double-hashing to generate a sequence of hash values.
+ // See analysis in [Kirsch,Mitzenmacher 2006].
+ g.keyHashes = append(g.keyHashes, bloomHash(key))
+}
+
+func (g *bloomFilterGenerator) Generate(b Buffer) {
+ // Compute bloom filter size (in both bits and bytes)
+ nBits := uint32(len(g.keyHashes) * g.n)
+ // For small n, we can see a very high false positive rate. Fix it
+ // by enforcing a minimum bloom filter length.
+ if nBits < 64 {
+ nBits = 64
+ }
+ nBytes := (nBits + 7) / 8
+ nBits = nBytes * 8
+
+ dest := b.Alloc(int(nBytes) + 1)
+ dest[nBytes] = g.k
+ for _, kh := range g.keyHashes {
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < g.k; j++ {
+ bitpos := kh % nBits
+ dest[bitpos/8] |= (1 << (bitpos % 8))
+ kh += delta
+ }
+ }
+
+ g.keyHashes = g.keyHashes[:0]
+}
+
+// NewBloomFilter creates a new initialized bloom filter for given
+// bitsPerKey.
+//
+// Since bitsPerKey is persisted individually for each bloom filter
+// serialization, bloom filters are backwards compatible with respect to
+// changing bitsPerKey. This means that no big performance penalty will
+// be experienced when changing the parameter. See documentation for
+// opt.Options.Filter for more information.
+func NewBloomFilter(bitsPerKey int) Filter {
+ return bloomFilter(bitsPerKey)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
new file mode 100644
index 0000000..7a925c5
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package filter provides interface and implementation of probabilistic
+// data structure.
+//
+// The filter is resposible for creating small filter from a set of keys.
+// These filter will then used to test whether a key is a member of the set.
+// In many cases, a filter can cut down the number of disk seeks from a
+// handful to a single disk seek per DB.Get call.
+package filter
+
+// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods.
+type Buffer interface {
+ // Alloc allocs n bytes of slice from the buffer. This also advancing
+ // write offset.
+ Alloc(n int) []byte
+
+ // Write appends the contents of p to the buffer.
+ Write(p []byte) (n int, err error)
+
+ // WriteByte appends the byte c to the buffer.
+ WriteByte(c byte) error
+}
+
+// Filter is the filter.
+type Filter interface {
+ // Name returns the name of this policy.
+ //
+ // Note that if the filter encoding changes in an incompatible way,
+ // the name returned by this method must be changed. Otherwise, old
+ // incompatible filters may be passed to methods of this type.
+ Name() string
+
+ // NewGenerator creates a new filter generator.
+ NewGenerator() FilterGenerator
+
+ // Contains returns true if the filter contains the given key.
+ //
+ // The filter are filters generated by the filter generator.
+ Contains(filter, key []byte) bool
+}
+
+// FilterGenerator is the filter generator.
+type FilterGenerator interface {
+ // Add adds a key to the filter generator.
+ //
+ // The key may become invalid after call to this method end, therefor
+ // key must be copied if implementation require keeping key for later
+ // use. The key should not modified directly, doing so may cause
+ // undefined results.
+ Add(key []byte)
+
+ // Generate generates filters based on keys passed so far. After call
+ // to Generate the filter generator maybe resetted, depends on implementation.
+ Generate(b Buffer)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
new file mode 100644
index 0000000..a23ab05
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -0,0 +1,184 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// BasicArray is the interface that wraps basic Len and Search method.
+type BasicArray interface {
+ // Len returns length of the array.
+ Len() int
+
+ // Search finds smallest index that point to a key that is greater
+ // than or equal to the given key.
+ Search(key []byte) int
+}
+
+// Array is the interface that wraps BasicArray and basic Index method.
+type Array interface {
+ BasicArray
+
+ // Index returns key/value pair with index of i.
+ Index(i int) (key, value []byte)
+}
+
+// Array is the interface that wraps BasicArray and basic Get method.
+type ArrayIndexer interface {
+ BasicArray
+
+ // Get returns a new data iterator with index of i.
+ Get(i int) Iterator
+}
+
+type basicArrayIterator struct {
+ util.BasicReleaser
+ array BasicArray
+ pos int
+ err error
+}
+
+func (i *basicArrayIterator) Valid() bool {
+ return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
+}
+
+func (i *basicArrayIterator) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.array.Len() == 0 {
+ i.pos = -1
+ return false
+ }
+ i.pos = 0
+ return true
+}
+
+func (i *basicArrayIterator) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = n - 1
+ return true
+}
+
+func (i *basicArrayIterator) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = i.array.Search(key)
+ if i.pos >= n {
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos++
+ if n := i.array.Len(); i.pos >= n {
+ i.pos = n
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos--
+ if i.pos < 0 {
+ i.pos = -1
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Error() error { return i.err }
+
+type arrayIterator struct {
+ basicArrayIterator
+ array Array
+ pos int
+ key, value []byte
+}
+
+func (i *arrayIterator) updateKV() {
+ if i.pos == i.basicArrayIterator.pos {
+ return
+ }
+ i.pos = i.basicArrayIterator.pos
+ if i.Valid() {
+ i.key, i.value = i.array.Index(i.pos)
+ } else {
+ i.key = nil
+ i.value = nil
+ }
+}
+
+func (i *arrayIterator) Key() []byte {
+ i.updateKV()
+ return i.key
+}
+
+func (i *arrayIterator) Value() []byte {
+ i.updateKV()
+ return i.value
+}
+
+type arrayIteratorIndexer struct {
+ basicArrayIterator
+ array ArrayIndexer
+}
+
+func (i *arrayIteratorIndexer) Get() Iterator {
+ if i.Valid() {
+ return i.array.Get(i.basicArrayIterator.pos)
+ }
+ return nil
+}
+
+// NewArrayIterator returns an iterator from the given array.
+func NewArrayIterator(array Array) Iterator {
+ return &arrayIterator{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ pos: -1,
+ }
+}
+
+// NewArrayIndexer returns an index iterator from the given array.
+func NewArrayIndexer(array ArrayIndexer) IteratorIndexer {
+ return &arrayIteratorIndexer{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
new file mode 100644
index 0000000..939adbb
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// IteratorIndexer is the interface that wraps CommonIterator and basic Get
+// method. IteratorIndexer provides index for indexed iterator.
+type IteratorIndexer interface {
+ CommonIterator
+
+ // Get returns a new data iterator for the current position, or nil if
+ // done.
+ Get() Iterator
+}
+
+type indexedIterator struct {
+ util.BasicReleaser
+ index IteratorIndexer
+ strict bool
+
+ data Iterator
+ err error
+ errf func(err error)
+ closed bool
+}
+
+func (i *indexedIterator) setData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = i.index.Get()
+}
+
+func (i *indexedIterator) clearData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = nil
+}
+
+func (i *indexedIterator) indexErr() {
+ if err := i.index.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ i.err = err
+ }
+}
+
+func (i *indexedIterator) dataErr() bool {
+ if err := i.data.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *indexedIterator) Valid() bool {
+ return i.data != nil && i.data.Valid()
+}
+
+func (i *indexedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.First() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ return i.Next()
+}
+
+func (i *indexedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Last() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ return true
+}
+
+func (i *indexedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Seek(key) {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Seek(key) {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Next() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Next():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Next() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Prev() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Prev():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Prev() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ }
+ return true
+}
+
+func (i *indexedIterator) Key() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Key()
+}
+
+func (i *indexedIterator) Value() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Value()
+}
+
+func (i *indexedIterator) Release() {
+ i.clearData()
+ i.index.Release()
+ i.BasicReleaser.Release()
+}
+
+func (i *indexedIterator) Error() error {
+ if i.err != nil {
+ return i.err
+ }
+ if err := i.index.Error(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (i *indexedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
+// iterator that contains actual key/value pairs.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+ return &indexedIterator{index: index, strict: strict}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
new file mode 100644
index 0000000..96fb0f6
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package iterator provides interface and implementation to traverse over
+// contents of a database.
+package iterator
+
+import (
+ "errors"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
+// IteratorSeeker is the interface that wraps the 'seeks method'.
+type IteratorSeeker interface {
+ // First moves the iterator to the first key/value pair. If the iterator
+ // only contains one key/value pair then First and Last would moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ First() bool
+
+ // Last moves the iterator to the last key/value pair. If the iterator
+ // only contains one key/value pair then First and Last would moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ Last() bool
+
+ // Seek moves the iterator to the first key/value pair whose key is greater
+ // than or equal to the given key.
+ // It returns whether such pair exist.
+ //
+ // It is safe to modify the contents of the argument after Seek returns.
+ Seek(key []byte) bool
+
+ // Next moves the iterator to the next key/value pair.
+ // It returns false if the iterator is exhausted.
+ Next() bool
+
+ // Prev moves the iterator to the previous key/value pair.
+ // It returns false if the iterator is exhausted.
+ Prev() bool
+}
+
+// CommonIterator is the interface that wraps common iterator methods.
+type CommonIterator interface {
+ IteratorSeeker
+
+ // util.Releaser is the interface that wraps basic Release method.
+ // When called Release will releases any resources associated with the
+ // iterator.
+ util.Releaser
+
+ // util.ReleaseSetter is the interface that wraps the basic SetReleaser
+ // method.
+ util.ReleaseSetter
+
+ // TODO: Remove this when ready.
+ Valid() bool
+
+ // Error returns any accumulated error. Exhausting all the key/value pairs
+ // is not considered to be an error.
+ Error() error
+}
+
+// Iterator iterates over a DB's key/value pairs in key order.
+//
+// When encounter an error any 'seeks method' will return false and will
+// yield no key/value pairs. The error can be queried by calling the Error
+// method. Calling Release is still necessary.
+//
+// An iterator must be released after use, but it is not necessary to read
+// an iterator until exhaustion.
+// Also, an iterator is not necessarily safe for concurrent use, but it is
+// safe to use multiple iterators concurrently, with each in a dedicated
+// goroutine.
+type Iterator interface {
+ CommonIterator
+
+ // Key returns the key of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Key() []byte
+
+ // Value returns the value of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Value() []byte
+}
+
+// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback
+// method.
+//
+// ErrorCallbackSetter implemented by indexed and merged iterator.
+type ErrorCallbackSetter interface {
+ // SetErrorCallback allows set an error callback of the corresponding
+ // iterator. Use nil to clear the callback.
+ SetErrorCallback(f func(err error))
+}
+
+type emptyIterator struct {
+ util.BasicReleaser
+ err error
+}
+
+func (i *emptyIterator) rErr() {
+ if i.err == nil && i.Released() {
+ i.err = ErrIterReleased
+ }
+}
+
+func (*emptyIterator) Valid() bool { return false }
+func (i *emptyIterator) First() bool { i.rErr(); return false }
+func (i *emptyIterator) Last() bool { i.rErr(); return false }
+func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false }
+func (i *emptyIterator) Next() bool { i.rErr(); return false }
+func (i *emptyIterator) Prev() bool { i.rErr(); return false }
+func (*emptyIterator) Key() []byte { return nil }
+func (*emptyIterator) Value() []byte { return nil }
+func (i *emptyIterator) Error() error { return i.err }
+
+// NewEmptyIterator creates an empty iterator. The err parameter can be
+// nil, but if not nil the given err will be returned by Error method.
+func NewEmptyIterator(err error) Iterator {
+ return &emptyIterator{err: err}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
new file mode 100644
index 0000000..1a7e29d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type mergedIterator struct {
+ cmp comparer.Comparer
+ iters []Iterator
+ strict bool
+
+ keys [][]byte
+ index int
+ dir dir
+ err error
+ errf func(err error)
+ releaser util.Releaser
+}
+
+func assertKey(key []byte) []byte {
+ if key == nil {
+ panic("leveldb/iterator: nil key")
+ }
+ return key
+}
+
+func (i *mergedIterator) iterErr(iter Iterator) bool {
+ if err := iter.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *mergedIterator) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *mergedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.First():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirEOI
+ return i.prev()
+}
+
+func (i *mergedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Seek(key):
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) next() bool {
+ var key []byte
+ if i.dir == dirForward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirEOI
+ return false
+ }
+ i.dir = dirForward
+ return true
+}
+
+func (i *mergedIterator) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirSOI:
+ return i.First()
+ case dirBackward:
+ key := append([]byte{}, i.keys[i.index]...)
+ if !i.Seek(key) {
+ return false
+ }
+ return i.Next()
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Next():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.next()
+}
+
+func (i *mergedIterator) prev() bool {
+ var key []byte
+ if i.dir == dirBackward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirSOI
+ return false
+ }
+ i.dir = dirBackward
+ return true
+}
+
+func (i *mergedIterator) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ key := append([]byte{}, i.keys[i.index]...)
+ for x, iter := range i.iters {
+ if x == i.index {
+ continue
+ }
+ seek := iter.Seek(key)
+ switch {
+ case seek && iter.Prev(), !seek && iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Prev():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.prev()
+}
+
+func (i *mergedIterator) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.keys[i.index]
+}
+
+func (i *mergedIterator) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.iters[i.index].Value()
+}
+
+func (i *mergedIterator) Release() {
+ if i.dir != dirReleased {
+ i.dir = dirReleased
+ for _, iter := range i.iters {
+ iter.Release()
+ }
+ i.iters = nil
+ i.keys = nil
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *mergedIterator) Error() error {
+ return i.err
+}
+
+func (i *mergedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewMergedIterator returns an iterator that merges its input. Walking the
+// resultant iterator will return all key/value pairs of all input iterators
+// in strictly increasing key order, as defined by cmp.
+// The input's key ranges may overlap, but there are assumed to be no duplicate
+// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
+// None of the iters may be nil.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
+func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
+ return &mergedIterator{
+ iters: iters,
+ cmp: cmp,
+ strict: strict,
+ keys: make([][]byte, len(iters)),
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
new file mode 100644
index 0000000..d094c3d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -0,0 +1,524 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0
+// License, authors and contributors informations can be found at bellow URLs respectively:
+// https://code.google.com/p/leveldb-go/source/browse/LICENSE
+// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
+// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
+
+// Package journal reads and writes sequences of journals. Each journal is a stream
+// of bytes that completes before the next journal starts.
+//
+// When reading, call Next to obtain an io.Reader for the next journal. Next will
+// return io.EOF when there are no more journals. It is valid to call Next
+// without reading the current journal to exhaustion.
+//
+// When writing, call Next to obtain an io.Writer for the next journal. Calling
+// Next finishes the current journal. Call Close to finish the final journal.
+//
+// Optionally, call Flush to finish the current journal and flush the underlying
+// writer without starting a new journal. To start a new journal after flushing,
+// call Next.
+//
+// Neither Readers or Writers are safe to use concurrently.
+//
+// Example code:
+// func read(r io.Reader) ([]string, error) {
+// var ss []string
+// journals := journal.NewReader(r, nil, true, true)
+// for {
+// j, err := journals.Next()
+// if err == io.EOF {
+// break
+// }
+// if err != nil {
+// return nil, err
+// }
+// s, err := ioutil.ReadAll(j)
+// if err != nil {
+// return nil, err
+// }
+// ss = append(ss, string(s))
+// }
+// return ss, nil
+// }
+//
+// func write(w io.Writer, ss []string) error {
+// journals := journal.NewWriter(w)
+// for _, s := range ss {
+// j, err := journals.Next()
+// if err != nil {
+// return err
+// }
+// if _, err := j.Write([]byte(s)), err != nil {
+// return err
+// }
+// }
+// return journals.Close()
+// }
+//
+// The wire format is that the stream is divided into 32KiB blocks, and each
+// block contains a number of tightly packed chunks. Chunks cannot cross block
+// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a
+// block must be zero.
+//
+// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4
+// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type)
+// followed by a payload. The checksum is over the chunk type and the payload.
+//
+// There are four chunk types: whether the chunk is the full journal, or the
+// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal
+// has one first chunk, zero or more middle chunks, and one last chunk.
+//
+// The wire format allows for limited recovery in the face of data corruption:
+// on a format error (such as a checksum mismatch), the reader moves to the
+// next block and looks for the next full or first chunk.
+package journal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// These constants are part of the wire format and should not be changed.
+const (
+ fullChunkType = 1
+ firstChunkType = 2
+ middleChunkType = 3
+ lastChunkType = 4
+)
+
+const (
+ blockSize = 32 * 1024
+ headerSize = 7
+)
+
+type flusher interface {
+ Flush() error
+}
+
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
+ Size int
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
+}
+
+// Dropper is the interface that wrap simple Drop method. The Drop
+// method will be called when the journal reader dropping a block or chunk.
+type Dropper interface {
+ Drop(err error)
+}
+
+// Reader reads journals from an underlying io.Reader.
+type Reader struct {
+ // r is the underlying reader.
+ r io.Reader
+ // the dropper.
+ dropper Dropper
+ // strict flag.
+ strict bool
+ // checksum flag.
+ checksum bool
+ // seq is the sequence number of the current journal.
+ seq int
+ // buf[i:j] is the unread portion of the current chunk's payload.
+ // The low bound, i, excludes the chunk header.
+ i, j int
+ // n is the number of bytes of buf that are valid. Once reading has started,
+ // only the final block can have n < blockSize.
+ n int
+ // last is whether the current chunk is the last chunk of the journal.
+ last bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewReader returns a new reader. The dropper may be nil, and if
+// strict is true then corrupted or invalid chunk will halt the journal
+// reader entirely.
+func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
+ return &Reader{
+ r: r,
+ dropper: dropper,
+ strict: strict,
+ checksum: checksum,
+ last: true,
+ }
+}
+
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+ if r.dropper != nil {
+ r.dropper.Drop(&ErrCorrupted{n, reason})
+ }
+ if r.strict && !skip {
+ r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason})
+ return r.err
+ }
+ return errSkip
+}
+
+// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
+// next block into the buffer if necessary.
+func (r *Reader) nextChunk(first bool) error {
+ for {
+ if r.j+headerSize <= r.n {
+ checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
+ length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
+ chunkType := r.buf[r.j+6]
+ unprocBlock := r.n - r.j
+ if checksum == 0 && length == 0 && chunkType == 0 {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "zero header", false)
+ }
+ if chunkType < fullChunkType || chunkType > lastChunkType {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false)
+ }
+ r.i = r.j + headerSize
+ r.j = r.j + headerSize + int(length)
+ if r.j > r.n {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "chunk length overflows block", false)
+ } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "checksum mismatch", false)
+ }
+ if first && chunkType != fullChunkType && chunkType != firstChunkType {
+ chunkLength := (r.j - r.i) + headerSize
+ r.i = r.j
+ // Report the error, but skip it.
+ return r.corrupt(chunkLength, "orphan chunk", true)
+ }
+ r.last = chunkType == fullChunkType || chunkType == lastChunkType
+ return nil
+ }
+
+ // The last block.
+ if r.n < blockSize && r.n > 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+
+ // Read block.
+ n, err := io.ReadFull(r.r, r.buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
+ }
+ if n == 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+ r.i, r.j, r.n = 0, 0, n
+ }
+}
+
+// Next returns a reader for the next journal. It returns io.EOF if there are no
+// more journals. The reader returned becomes stale after the next Next call,
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
+func (r *Reader) Next() (io.Reader, error) {
+ r.seq++
+ if r.err != nil {
+ return nil, r.err
+ }
+ r.i = r.j
+ for {
+ if err := r.nextChunk(true); err == nil {
+ break
+ } else if err != errSkip {
+ return nil, err
+ }
+ }
+ return &singleReader{r, r.seq, nil}, nil
+}
+
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
+func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
+ r.seq++
+ err := r.err
+ r.r = reader
+ r.dropper = dropper
+ r.strict = strict
+ r.checksum = checksum
+ r.i = 0
+ r.j = 0
+ r.n = 0
+ r.last = true
+ r.err = nil
+ return err
+}
+
+type singleReader struct {
+ r *Reader
+ seq int
+ err error
+}
+
+func (x *singleReader) Read(p []byte) (int, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ n := copy(p, r.buf[r.i:r.j])
+ r.i += n
+ return n, nil
+}
+
+func (x *singleReader) ReadByte() (byte, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ c := r.buf[r.i]
+ r.i++
+ return c, nil
+}
+
+// Writer writes journals to an underlying io.Writer.
+type Writer struct {
+ // w is the underlying writer.
+ w io.Writer
+ // seq is the sequence number of the current journal.
+ seq int
+ // f is w as a flusher.
+ f flusher
+ // buf[i:j] is the bytes that will become the current chunk.
+ // The low bound, i, includes the chunk header.
+ i, j int
+ // buf[:written] has already been written to w.
+ // written is zero unless Flush has been called.
+ written int
+ // first is whether the current chunk is the first chunk of the journal.
+ first bool
+ // pending is whether a chunk is buffered but not yet written.
+ pending bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewWriter returns a new Writer.
+func NewWriter(w io.Writer) *Writer {
+ f, _ := w.(flusher)
+ return &Writer{
+ w: w,
+ f: f,
+ }
+}
+
+// fillHeader fills in the header for the pending chunk.
+func (w *Writer) fillHeader(last bool) {
+ if w.i+headerSize > w.j || w.j > blockSize {
+ panic("leveldb/journal: bad writer state")
+ }
+ if last {
+ if w.first {
+ w.buf[w.i+6] = fullChunkType
+ } else {
+ w.buf[w.i+6] = lastChunkType
+ }
+ } else {
+ if w.first {
+ w.buf[w.i+6] = firstChunkType
+ } else {
+ w.buf[w.i+6] = middleChunkType
+ }
+ }
+ binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value())
+ binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize))
+}
+
+// writeBlock writes the buffered block to the underlying writer, and reserves
+// space for the next chunk's header.
+func (w *Writer) writeBlock() {
+ _, w.err = w.w.Write(w.buf[w.written:])
+ w.i = 0
+ w.j = headerSize
+ w.written = 0
+}
+
+// writePending finishes the current journal and writes the buffer to the
+// underlying writer.
+func (w *Writer) writePending() {
+ if w.err != nil {
+ return
+ }
+ if w.pending {
+ w.fillHeader(true)
+ w.pending = false
+ }
+ _, w.err = w.w.Write(w.buf[w.written:w.j])
+ w.written = w.j
+}
+
+// Close finishes the current journal and closes the writer.
+func (w *Writer) Close() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ w.err = errors.New("leveldb/journal: closed Writer")
+ return nil
+}
+
+// Flush finishes the current journal, writes to the underlying writer, and
+// flushes it if that writer implements interface{ Flush() error }.
+func (w *Writer) Flush() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ if w.f != nil {
+ w.err = w.f.Flush()
+ return w.err
+ }
+ return nil
+}
+
+// Reset resets the journal writer, allows reuse of the journal writer. Reset
+// will also closes the journal writer if not already.
+func (w *Writer) Reset(writer io.Writer) (err error) {
+ w.seq++
+ if w.err == nil {
+ w.writePending()
+ err = w.err
+ }
+ w.w = writer
+ w.f, _ = writer.(flusher)
+ w.i = 0
+ w.j = 0
+ w.written = 0
+ w.first = false
+ w.pending = false
+ w.err = nil
+ return
+}
+
+// Next returns a writer for the next journal. The writer returned becomes stale
+// after the next Close, Flush or Next call, and should no longer be used.
+func (w *Writer) Next() (io.Writer, error) {
+ w.seq++
+ if w.err != nil {
+ return nil, w.err
+ }
+ if w.pending {
+ w.fillHeader(true)
+ }
+ w.i = w.j
+ w.j = w.j + headerSize
+ // Check if there is room in the block for the header.
+ if w.j > blockSize {
+ // Fill in the rest of the block with zeroes.
+ for k := w.i; k < blockSize; k++ {
+ w.buf[k] = 0
+ }
+ w.writeBlock()
+ if w.err != nil {
+ return nil, w.err
+ }
+ }
+ w.first = true
+ w.pending = true
+ return singleWriter{w, w.seq}, nil
+}
+
+type singleWriter struct {
+ w *Writer
+ seq int
+}
+
+func (x singleWriter) Write(p []byte) (int, error) {
+ w := x.w
+ if w.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale writer")
+ }
+ if w.err != nil {
+ return 0, w.err
+ }
+ n0 := len(p)
+ for len(p) > 0 {
+ // Write a block, if it is full.
+ if w.j == blockSize {
+ w.fillHeader(false)
+ w.writeBlock()
+ if w.err != nil {
+ return 0, w.err
+ }
+ w.first = false
+ }
+ // Copy bytes into the buffer.
+ n := copy(w.buf[w.j:], p)
+ w.j += n
+ p = p[n:]
+ }
+ return n0, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
new file mode 100644
index 0000000..ad8f51e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrInternalKeyCorrupted records internal key corruption.
+type ErrInternalKeyCorrupted struct {
+ Ikey []byte
+ Reason string
+}
+
+func (e *ErrInternalKeyCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type keyType uint
+
+func (kt keyType) String() string {
+ switch kt {
+ case keyTypeDel:
+ return "d"
+ case keyTypeVal:
+ return "v"
+ }
+ return fmt.Sprintf("", uint(kt))
+}
+
+// Value types encoded as the last component of internal keys.
+// Don't modify; this value are saved to disk.
+const (
+ keyTypeDel = keyType(0)
+ keyTypeVal = keyType(1)
+)
+
+// keyTypeSeek defines the keyType that should be passed when constructing an
+// internal key for seeking to a particular sequence number (since we
+// sort sequence numbers in decreasing order and the value type is
+// embedded as the low 8 bits in the sequence number in internal keys,
+// we need to use the highest-numbered ValueType, not the lowest).
+const keyTypeSeek = keyTypeVal
+
+const (
+ // Maximum value possible for sequence number; the 8-bits are
+ // used by value type, so its can packed together in single
+ // 64-bit integer.
+ keyMaxSeq = (uint64(1) << 56) - 1
+ // Maximum value possible for packed sequence number and type.
+ keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
+)
+
+// Maximum number encoded in bytes.
+var keyMaxNumBytes = make([]byte, 8)
+
+func init() {
+ binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
+}
+
+type internalKey []byte
+
+func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
+ if seq > keyMaxSeq {
+ panic("leveldb: invalid sequence number")
+ } else if kt > keyTypeVal {
+ panic("leveldb: invalid type")
+ }
+
+ dst = ensureBuffer(dst, len(ukey)+8)
+ copy(dst, ukey)
+ binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
+ return internalKey(dst)
+}
+
+func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
+ if len(ik) < 8 {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
+ }
+ num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
+ }
+ ukey = ik[:len(ik)-8]
+ return
+}
+
+func validInternalKey(ik []byte) bool {
+ _, _, _, err := parseInternalKey(ik)
+ return err == nil
+}
+
+func (ik internalKey) assert() {
+ if ik == nil {
+ panic("leveldb: nil internalKey")
+ }
+ if len(ik) < 8 {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
+ }
+}
+
+func (ik internalKey) ukey() []byte {
+ ik.assert()
+ return ik[:len(ik)-8]
+}
+
+func (ik internalKey) num() uint64 {
+ ik.assert()
+ return binary.LittleEndian.Uint64(ik[len(ik)-8:])
+}
+
+func (ik internalKey) parseNum() (seq uint64, kt keyType) {
+ num := ik.num()
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+ }
+ return
+}
+
+func (ik internalKey) String() string {
+ if ik == nil {
+ return ""
+ }
+
+ if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
+ return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+ }
+ return fmt.Sprintf("", []byte(ik))
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
new file mode 100644
index 0000000..824e47f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -0,0 +1,479 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package memdb provides in-memory key/value database implementation.
+package memdb
+
+import (
+ "math/rand"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Common errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrIterReleased = errors.New("leveldb/memdb: iterator released")
+)
+
+const tMaxHeight = 12
+
+type dbIter struct {
+ util.BasicReleaser
+ p *DB
+ slice *util.Range
+ node int
+ forward bool
+ key, value []byte
+ err error
+}
+
+func (i *dbIter) fill(checkStart, checkLimit bool) bool {
+ if i.node != 0 {
+ n := i.p.nodeData[i.node]
+ m := n + i.p.nodeData[i.node+nKey]
+ i.key = i.p.kvData[n:m]
+ if i.slice != nil {
+ switch {
+ case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0:
+ fallthrough
+ case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0:
+ i.node = 0
+ goto bail
+ }
+ }
+ i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]]
+ return true
+ }
+bail:
+ i.key = nil
+ i.value = nil
+ return false
+}
+
+func (i *dbIter) Valid() bool {
+ return i.node != 0
+}
+
+func (i *dbIter) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil {
+ i.node, _ = i.p.findGE(i.slice.Start, false)
+ } else {
+ i.node = i.p.nodeData[nNext]
+ }
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Limit != nil {
+ i.node = i.p.findLT(i.slice.Limit)
+ } else {
+ i.node = i.p.findLast()
+ }
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 {
+ key = i.slice.Start
+ }
+ i.node, _ = i.p.findGE(key, false)
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if !i.forward {
+ return i.First()
+ }
+ return false
+ }
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.nodeData[i.node+nNext]
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if i.forward {
+ return i.Last()
+ }
+ return false
+ }
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.findLT(i.key)
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Key() []byte {
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ return i.value
+}
+
+func (i *dbIter) Error() error { return i.err }
+
+func (i *dbIter) Release() {
+ if !i.Released() {
+ i.p = nil
+ i.node = 0
+ i.key = nil
+ i.value = nil
+ i.BasicReleaser.Release()
+ }
+}
+
+const (
+ nKV = iota
+ nKey
+ nVal
+ nHeight
+ nNext
+)
+
+// DB is an in-memory key/value database.
+type DB struct {
+ cmp comparer.BasicComparer
+ rnd *rand.Rand
+
+ mu sync.RWMutex
+ kvData []byte
+ // Node data:
+ // [0] : KV offset
+ // [1] : Key length
+ // [2] : Value length
+ // [3] : Height
+ // [3..height] : Next nodes
+ nodeData []int
+ prevNode [tMaxHeight]int
+ maxHeight int
+ n int
+ kvSize int
+}
+
+func (p *DB) randHeight() (h int) {
+ const branching = 4
+ h = 1
+ for h < tMaxHeight && p.rnd.Int()%branching == 0 {
+ h++
+ }
+ return
+}
+
+// Must hold RW-lock if prev == true, as it use shared prevNode slice.
+func (p *DB) findGE(key []byte, prev bool) (int, bool) {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ cmp := 1
+ if next != 0 {
+ o := p.nodeData[next]
+ cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key)
+ }
+ if cmp < 0 {
+ // Keep searching in this list
+ node = next
+ } else {
+ if prev {
+ p.prevNode[h] = node
+ } else if cmp == 0 {
+ return next, true
+ }
+ if h == 0 {
+ return next, cmp == 0
+ }
+ h--
+ }
+ }
+}
+
+func (p *DB) findLT(key []byte) int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ o := p.nodeData[next]
+ if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+func (p *DB) findLast() int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ if next == 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (p *DB) Put(key []byte, value []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if node, exact := p.findGE(key, true); exact {
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ p.nodeData[node] = kvOffset
+ m := p.nodeData[node+nVal]
+ p.nodeData[node+nVal] = len(value)
+ p.kvSize += len(value) - m
+ return nil
+ }
+
+ h := p.randHeight()
+ if h > p.maxHeight {
+ for i := p.maxHeight; i < h; i++ {
+ p.prevNode[i] = 0
+ }
+ p.maxHeight = h
+ }
+
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ // Node
+ node := len(p.nodeData)
+ p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
+ for i, n := range p.prevNode[:h] {
+ m := n + nNext + i
+ p.nodeData = append(p.nodeData, p.nodeData[m])
+ p.nodeData[m] = node
+ }
+
+ p.kvSize += len(key) + len(value)
+ p.n++
+ return nil
+}
+
+// Delete deletes the value for the given key. It returns ErrNotFound if
+// the DB does not contain the key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (p *DB) Delete(key []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ node, exact := p.findGE(key, true)
+ if !exact {
+ return ErrNotFound
+ }
+
+ h := p.nodeData[node+nHeight]
+ for i, n := range p.prevNode[:h] {
+ m := n + nNext + i
+ p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
+ }
+
+ p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal]
+ p.n--
+ return nil
+}
+
+// Contains returns true if the given key are in the DB.
+//
+// It is safe to modify the contents of the arguments after Contains returns.
+func (p *DB) Contains(key []byte) bool {
+ p.mu.RLock()
+ _, exact := p.findGE(key, false)
+ p.mu.RUnlock()
+ return exact
+}
+
+// Get gets the value for the given key. It returns error.ErrNotFound if the
+// DB does not contain the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (p *DB) Get(key []byte) (value []byte, err error) {
+ p.mu.RLock()
+ if node, exact := p.findGE(key, false); exact {
+ o := p.nodeData[node] + p.nodeData[node+nKey]
+ value = p.kvData[o : o+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Find returns.
+func (p *DB) Find(key []byte) (rkey, value []byte, err error) {
+ p.mu.RLock()
+ if node, _ := p.findGE(key, false); node != 0 {
+ n := p.nodeData[node]
+ m := n + p.nodeData[node+nKey]
+ rkey = p.kvData[n:m]
+ value = p.kvData[m : m+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// NewIterator returns an iterator of the DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. However, the resultant key/value pairs are not guaranteed
+// to be a consistent snapshot of the DB at a particular point in time.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// WARNING: Any slice returned by interator (e.g. slice returned by calling
+// Iterator.Key() or Iterator.Key() methods), its content should not be modified
+// unless noted otherwise.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (p *DB) NewIterator(slice *util.Range) iterator.Iterator {
+ return &dbIter{p: p, slice: slice}
+}
+
+// Capacity returns keys/values buffer capacity.
+func (p *DB) Capacity() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData)
+}
+
+// Size returns sum of keys and values length. Note that deleted
+// key/value will not be accounted for, but it will still consume
+// the buffer, since the buffer is append only.
+func (p *DB) Size() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.kvSize
+}
+
+// Free returns keys/values free buffer before need to grow.
+func (p *DB) Free() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData) - len(p.kvData)
+}
+
+// Len returns the number of entries in the DB.
+func (p *DB) Len() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.n
+}
+
+// Reset resets the DB to initial empty state. Allows reuse the buffer.
+func (p *DB) Reset() {
+ p.mu.Lock()
+ p.rnd = rand.New(rand.NewSource(0xdeadbeef))
+ p.maxHeight = 1
+ p.n = 0
+ p.kvSize = 0
+ p.kvData = p.kvData[:0]
+ p.nodeData = p.nodeData[:nNext+tMaxHeight]
+ p.nodeData[nKV] = 0
+ p.nodeData[nKey] = 0
+ p.nodeData[nVal] = 0
+ p.nodeData[nHeight] = tMaxHeight
+ for n := 0; n < tMaxHeight; n++ {
+ p.nodeData[nNext+n] = 0
+ p.prevNode[n] = 0
+ }
+ p.mu.Unlock()
+}
+
+// New creates a new initialized in-memory key/value DB. The capacity
+// is the initial key/value buffer capacity. The capacity is advisory,
+// not enforced.
+//
+// This DB is append-only, deleting an entry would remove entry node but not
+// reclaim KV buffer.
+//
+// The returned DB instance is safe for concurrent use.
+func New(cmp comparer.BasicComparer, capacity int) *DB {
+ p := &DB{
+ cmp: cmp,
+ rnd: rand.New(rand.NewSource(0xdeadbeef)),
+ maxHeight: 1,
+ kvData: make([]byte, 0, capacity),
+ nodeData: make([]int, 4+tMaxHeight),
+ }
+ p.nodeData[nHeight] = tMaxHeight
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
new file mode 100644
index 0000000..528b164
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -0,0 +1,697 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package opt provides sets of options used by LevelDB.
+package opt
+
+import (
+ "math"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+const (
+ KiB = 1024
+ MiB = KiB * 1024
+ GiB = MiB * 1024
+)
+
+var (
+ DefaultBlockCacher = LRUCacher
+ DefaultBlockCacheCapacity = 8 * MiB
+ DefaultBlockRestartInterval = 16
+ DefaultBlockSize = 4 * KiB
+ DefaultCompactionExpandLimitFactor = 25
+ DefaultCompactionGPOverlapsFactor = 10
+ DefaultCompactionL0Trigger = 4
+ DefaultCompactionSourceLimitFactor = 1
+ DefaultCompactionTableSize = 2 * MiB
+ DefaultCompactionTableSizeMultiplier = 1.0
+ DefaultCompactionTotalSize = 10 * MiB
+ DefaultCompactionTotalSizeMultiplier = 10.0
+ DefaultCompressionType = SnappyCompression
+ DefaultIteratorSamplingRate = 1 * MiB
+ DefaultOpenFilesCacher = LRUCacher
+ DefaultOpenFilesCacheCapacity = 500
+ DefaultWriteBuffer = 4 * MiB
+ DefaultWriteL0PauseTrigger = 12
+ DefaultWriteL0SlowdownTrigger = 8
+)
+
+// Cacher is a caching algorithm.
+type Cacher interface {
+ New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+ NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+ if f.NewFunc != nil {
+ return f.NewFunc(capacity)
+ }
+ return nil
+}
+
+func noCacher(int) cache.Cacher { return nil }
+
+var (
+ // LRUCacher is the LRU-cache algorithm.
+ LRUCacher = &CacherFunc{cache.NewLRU}
+
+ // NoCacher is the value to disable caching algorithm.
+ NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
+type Compression uint
+
+func (c Compression) String() string {
+ switch c {
+ case DefaultCompression:
+ return "default"
+ case NoCompression:
+ return "none"
+ case SnappyCompression:
+ return "snappy"
+ }
+ return "invalid"
+}
+
+const (
+ DefaultCompression Compression = iota
+ NoCompression
+ SnappyCompression
+ nCompression
+)
+
+// Strict is the DB 'strict level'.
+type Strict uint
+
+const (
+ // If present then a corrupted or invalid chunk or block in manifest
+ // journal will cause an error instead of being dropped.
+ // This will prevent database with corrupted manifest to be opened.
+ StrictManifest Strict = 1 << iota
+
+ // If present then journal chunk checksum will be verified.
+ StrictJournalChecksum
+
+ // If present then a corrupted or invalid chunk or block in journal
+ // will cause an error instead of being dropped.
+ // This will prevent database with corrupted journal to be opened.
+ StrictJournal
+
+ // If present then 'sorted table' block checksum will be verified.
+ // This has effect on both 'read operation' and compaction.
+ StrictBlockChecksum
+
+ // If present then a corrupted 'sorted table' will fails compaction.
+ // The database will enter read-only mode.
+ StrictCompaction
+
+ // If present then a corrupted 'sorted table' will halts 'read operation'.
+ StrictReader
+
+ // If present then leveldb.Recover will drop corrupted 'sorted table'.
+ StrictRecovery
+
+ // This only applicable for ReadOptions, if present then this ReadOptions
+ // 'strict level' will override global ones.
+ StrictOverride
+
+ // StrictAll enables all strict flags.
+ StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
+
+ // DefaultStrict is the default strict flags. Specify any strict flags
+ // will override default strict flags as whole (i.e. not OR'ed).
+ DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
+
+ // NoStrict disables all strict flags. Override default strict flags.
+ NoStrict = ^StrictAll
+)
+
+// Options holds the optional parameters for the DB at large.
+type Options struct {
+ // AltFilters defines one or more 'alternative filters'.
+ // 'alternative filters' will be used during reads if a filter block
+ // does not match with the 'effective filter'.
+ //
+ // The default value is nil
+ AltFilters []filter.Filter
+
+ // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ BlockCacher Cacher
+
+ // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+ //
+ // The default value is 8MiB.
+ BlockCacheCapacity int
+
+ // BlockCacheEvictRemoved allows enable forced-eviction on cached block belonging
+ // to removed 'sorted table'.
+ //
+ // The default if false.
+ BlockCacheEvictRemoved bool
+
+ // BlockRestartInterval is the number of keys between restart points for
+ // delta encoding of keys.
+ //
+ // The default value is 16.
+ BlockRestartInterval int
+
+ // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
+ // block.
+ //
+ // The default value is 4KiB.
+ BlockSize int
+
+ // CompactionExpandLimitFactor limits compaction size after expanded.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 25.
+ CompactionExpandLimitFactor int
+
+ // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+ // single 'sorted table' generates.
+ // This will be multiplied by table size limit at grandparent level.
+ //
+ // The default value is 10.
+ CompactionGPOverlapsFactor int
+
+ // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+ // trigger compaction.
+ //
+ // The default value is 4.
+ CompactionL0Trigger int
+
+ // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+ // level-0.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 1.
+ CompactionSourceLimitFactor int
+
+ // CompactionTableSize limits size of 'sorted table' that compaction generates.
+ // The limits for each level will be calculated as:
+ // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+ //
+ // The default value is 2MiB.
+ CompactionTableSize int
+
+ // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+ //
+ // The default value is 1.
+ CompactionTableSizeMultiplier float64
+
+ // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTableSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTableSizeMultiplierPerLevel []float64
+
+ // CompactionTotalSize limits total size of 'sorted table' for each level.
+ // The limits for each level will be calculated as:
+ // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using
+ // CompactionTotalSizeMultiplierPerLevel.
+ //
+ // The default value is 10MiB.
+ CompactionTotalSize int
+
+ // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+ //
+ // The default value is 10.
+ CompactionTotalSizeMultiplier float64
+
+ // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTotalSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTotalSizeMultiplierPerLevel []float64
+
+ // Comparer defines a total ordering over the space of []byte keys: a 'less
+ // than' relationship. The same comparison algorithm must be used for reads
+ // and writes over the lifetime of the DB.
+ //
+ // The default value uses the same ordering as bytes.Compare.
+ Comparer comparer.Comparer
+
+ // Compression defines the 'sorted table' block compression to use.
+ //
+ // The default value (DefaultCompression) uses snappy compression.
+ Compression Compression
+
+ // DisableBufferPool allows disable use of util.BufferPool functionality.
+ //
+ // The default value is false.
+ DisableBufferPool bool
+
+ // DisableBlockCache allows disable use of cache.Cache functionality on
+ // 'sorted table' block.
+ //
+ // The default value is false.
+ DisableBlockCache bool
+
+ // DisableCompactionBackoff allows disable compaction retry backoff.
+ //
+ // The default value is false.
+ DisableCompactionBackoff bool
+
+ // DisableLargeBatchTransaction allows disabling switch-to-transaction mode
+ // on large batch write. If enable batch writes large than WriteBuffer will
+ // use transaction.
+ //
+ // The default is false.
+ DisableLargeBatchTransaction bool
+
+ // ErrorIfExist defines whether an error should returned if the DB already
+ // exist.
+ //
+ // The default value is false.
+ ErrorIfExist bool
+
+ // ErrorIfMissing defines whether an error should returned if the DB is
+ // missing. If false then the database will be created if missing, otherwise
+ // an error will be returned.
+ //
+ // The default value is false.
+ ErrorIfMissing bool
+
+ // Filter defines an 'effective filter' to use. An 'effective filter'
+ // if defined will be used to generate per-table filter block.
+ // The filter name will be stored on disk.
+ // During reads LevelDB will try to find matching filter from
+ // 'effective filter' and 'alternative filters'.
+ //
+ // Filter can be changed after a DB has been created. It is recommended
+ // to put old filter to the 'alternative filters' to mitigate lack of
+ // filter during transition period.
+ //
+ // A filter is used to reduce disk reads when looking for a specific key.
+ //
+ // The default value is nil.
+ Filter filter.Filter
+
+ // IteratorSamplingRate defines approximate gap (in bytes) between read
+ // sampling of an iterator. The samples will be used to determine when
+ // compaction should be triggered.
+ //
+ // The default is 1MiB.
+ IteratorSamplingRate int
+
+ // NoSync allows completely disable fsync.
+ //
+ // The default is false.
+ NoSync bool
+
+ // NoWriteMerge allows disabling write merge.
+ //
+ // The default is false.
+ NoWriteMerge bool
+
+ // OpenFilesCacher provides cache algorithm for open files caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ OpenFilesCacher Cacher
+
+ // OpenFilesCacheCapacity defines the capacity of the open files caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+ //
+ // The default value is 500.
+ OpenFilesCacheCapacity int
+
+ // If true then opens DB in read-only mode.
+ //
+ // The default value is false.
+ ReadOnly bool
+
+ // Strict defines the DB strict level.
+ Strict Strict
+
+ // WriteBuffer defines maximum size of a 'memdb' before flushed to
+ // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
+ // unsorted journal.
+ //
+ // LevelDB may held up to two 'memdb' at the same time.
+ //
+ // The default value is 4MiB.
+ WriteBuffer int
+
+ // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+ // pause write.
+ //
+ // The default value is 12.
+ WriteL0PauseTrigger int
+
+ // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+ // will trigger write slowdown.
+ //
+ // The default value is 8.
+ WriteL0SlowdownTrigger int
+}
+
+func (o *Options) GetAltFilters() []filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.AltFilters
+}
+
+func (o *Options) GetBlockCacher() Cacher {
+ if o == nil || o.BlockCacher == nil {
+ return DefaultBlockCacher
+ } else if o.BlockCacher == NoCacher {
+ return nil
+ }
+ return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+ if o == nil || o.BlockCacheCapacity == 0 {
+ return DefaultBlockCacheCapacity
+ } else if o.BlockCacheCapacity < 0 {
+ return 0
+ }
+ return o.BlockCacheCapacity
+}
+
+func (o *Options) GetBlockCacheEvictRemoved() bool {
+ if o == nil {
+ return false
+ }
+ return o.BlockCacheEvictRemoved
+}
+
+func (o *Options) GetBlockRestartInterval() int {
+ if o == nil || o.BlockRestartInterval <= 0 {
+ return DefaultBlockRestartInterval
+ }
+ return o.BlockRestartInterval
+}
+
+func (o *Options) GetBlockSize() int {
+ if o == nil || o.BlockSize <= 0 {
+ return DefaultBlockSize
+ }
+ return o.BlockSize
+}
+
+func (o *Options) GetCompactionExpandLimit(level int) int {
+ factor := DefaultCompactionExpandLimitFactor
+ if o != nil && o.CompactionExpandLimitFactor > 0 {
+ factor = o.CompactionExpandLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+ factor := DefaultCompactionGPOverlapsFactor
+ if o != nil && o.CompactionGPOverlapsFactor > 0 {
+ factor = o.CompactionGPOverlapsFactor
+ }
+ return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+ if o == nil || o.CompactionL0Trigger == 0 {
+ return DefaultCompactionL0Trigger
+ }
+ return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+ factor := DefaultCompactionSourceLimitFactor
+ if o != nil && o.CompactionSourceLimitFactor > 0 {
+ factor = o.CompactionSourceLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+ var (
+ base = DefaultCompactionTableSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTableSize > 0 {
+ base = o.CompactionTableSize
+ }
+ if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTableSizeMultiplierPerLevel[level]
+ } else if o.CompactionTableSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+ }
+ return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+ var (
+ base = DefaultCompactionTotalSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTotalSize > 0 {
+ base = o.CompactionTotalSize
+ }
+ if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+ } else if o.CompactionTotalSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+ }
+ return int64(float64(base) * mult)
+}
+
+func (o *Options) GetComparer() comparer.Comparer {
+ if o == nil || o.Comparer == nil {
+ return comparer.DefaultComparer
+ }
+ return o.Comparer
+}
+
+func (o *Options) GetCompression() Compression {
+ if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
+ return DefaultCompressionType
+ }
+ return o.Compression
+}
+
+func (o *Options) GetDisableBufferPool() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBufferPool
+}
+
+func (o *Options) GetDisableBlockCache() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBlockCache
+}
+
+func (o *Options) GetDisableCompactionBackoff() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableCompactionBackoff
+}
+
+func (o *Options) GetDisableLargeBatchTransaction() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableLargeBatchTransaction
+}
+
+func (o *Options) GetErrorIfExist() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfExist
+}
+
+func (o *Options) GetErrorIfMissing() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfMissing
+}
+
+func (o *Options) GetFilter() filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.Filter
+}
+
+func (o *Options) GetIteratorSamplingRate() int {
+ if o == nil || o.IteratorSamplingRate <= 0 {
+ return DefaultIteratorSamplingRate
+ }
+ return o.IteratorSamplingRate
+}
+
+func (o *Options) GetNoSync() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoSync
+}
+
+func (o *Options) GetNoWriteMerge() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoWriteMerge
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+ if o == nil || o.OpenFilesCacher == nil {
+ return DefaultOpenFilesCacher
+ }
+ if o.OpenFilesCacher == NoCacher {
+ return nil
+ }
+ return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+ if o == nil || o.OpenFilesCacheCapacity == 0 {
+ return DefaultOpenFilesCacheCapacity
+ } else if o.OpenFilesCacheCapacity < 0 {
+ return 0
+ }
+ return o.OpenFilesCacheCapacity
+}
+
+func (o *Options) GetReadOnly() bool {
+ if o == nil {
+ return false
+ }
+ return o.ReadOnly
+}
+
+func (o *Options) GetStrict(strict Strict) bool {
+ if o == nil || o.Strict == 0 {
+ return DefaultStrict&strict != 0
+ }
+ return o.Strict&strict != 0
+}
+
+func (o *Options) GetWriteBuffer() int {
+ if o == nil || o.WriteBuffer <= 0 {
+ return DefaultWriteBuffer
+ }
+ return o.WriteBuffer
+}
+
+func (o *Options) GetWriteL0PauseTrigger() int {
+ if o == nil || o.WriteL0PauseTrigger == 0 {
+ return DefaultWriteL0PauseTrigger
+ }
+ return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+ if o == nil || o.WriteL0SlowdownTrigger == 0 {
+ return DefaultWriteL0SlowdownTrigger
+ }
+ return o.WriteL0SlowdownTrigger
+}
+
+// ReadOptions holds the optional parameters for 'read operation'. The
+// 'read operation' includes Get, Find and NewIterator.
+type ReadOptions struct {
+ // DontFillCache defines whether block reads for this 'read operation'
+ // should be cached. If false then the block will be cached. This does
+ // not affects already cached block.
+ //
+ // The default value is false.
+ DontFillCache bool
+
+ // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+ // is present. Currently only StrictReader that has effect here.
+ Strict Strict
+}
+
+func (ro *ReadOptions) GetDontFillCache() bool {
+ if ro == nil {
+ return false
+ }
+ return ro.DontFillCache
+}
+
+func (ro *ReadOptions) GetStrict(strict Strict) bool {
+ if ro == nil {
+ return false
+ }
+ return ro.Strict&strict != 0
+}
+
+// WriteOptions holds the optional parameters for 'write operation'. The
+// 'write operation' includes Write, Put and Delete.
+type WriteOptions struct {
+ // NoWriteMerge allows disabling write merge.
+ //
+ // The default is false.
+ NoWriteMerge bool
+
+ // Sync is whether to sync underlying writes from the OS buffer cache
+ // through to actual disk, if applicable. Setting Sync can result in
+ // slower writes.
+ //
+ // If false, and the machine crashes, then some recent writes may be lost.
+ // Note that if it is just the process that crashes (and the machine does
+ // not) then no writes will be lost.
+ //
+ // In other words, Sync being false has the same semantics as a write
+ // system call. Sync being true means write followed by fsync.
+ //
+ // The default value is false.
+ Sync bool
+}
+
+func (wo *WriteOptions) GetNoWriteMerge() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.NoWriteMerge
+}
+
+func (wo *WriteOptions) GetSync() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.Sync
+}
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+ if ro.GetStrict(StrictOverride) {
+ return ro.GetStrict(strict)
+ } else {
+ return o.GetStrict(strict) || ro.GetStrict(strict)
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
new file mode 100644
index 0000000..b072b1a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func dupOptions(o *opt.Options) *opt.Options {
+ newo := &opt.Options{}
+ if o != nil {
+ *newo = *o
+ }
+ if newo.Strict == 0 {
+ newo.Strict = opt.DefaultStrict
+ }
+ return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+ no := dupOptions(o)
+ // Alternative filters.
+ if filters := o.GetAltFilters(); len(filters) > 0 {
+ no.AltFilters = make([]filter.Filter, len(filters))
+ for i, filter := range filters {
+ no.AltFilters[i] = &iFilter{filter}
+ }
+ }
+ // Comparer.
+ s.icmp = &iComparer{o.GetComparer()}
+ no.Comparer = s.icmp
+ // Filter.
+ if filter := o.GetFilter(); filter != nil {
+ no.Filter = &iFilter{filter}
+ }
+
+ s.o = &cachedOptions{Options: no}
+ s.o.cache()
+}
+
+const optCachedLevel = 7
+
+type cachedOptions struct {
+ *opt.Options
+
+ compactionExpandLimit []int
+ compactionGPOverlaps []int
+ compactionSourceLimit []int
+ compactionTableSize []int
+ compactionTotalSize []int64
+}
+
+func (co *cachedOptions) cache() {
+ co.compactionExpandLimit = make([]int, optCachedLevel)
+ co.compactionGPOverlaps = make([]int, optCachedLevel)
+ co.compactionSourceLimit = make([]int, optCachedLevel)
+ co.compactionTableSize = make([]int, optCachedLevel)
+ co.compactionTotalSize = make([]int64, optCachedLevel)
+
+ for level := 0; level < optCachedLevel; level++ {
+ co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+ co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+ co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+ co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+ co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+ }
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+ if level < optCachedLevel {
+ return co.compactionExpandLimit[level]
+ }
+ return co.Options.GetCompactionExpandLimit(level)
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+ if level < optCachedLevel {
+ return co.compactionGPOverlaps[level]
+ }
+ return co.Options.GetCompactionGPOverlaps(level)
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+ if level < optCachedLevel {
+ return co.compactionSourceLimit[level]
+ }
+ return co.Options.GetCompactionSourceLimit(level)
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+ if level < optCachedLevel {
+ return co.compactionTableSize[level]
+ }
+ return co.Options.GetCompactionTableSize(level)
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+ if level < optCachedLevel {
+ return co.compactionTotalSize[level]
+ }
+ return co.Options.GetCompactionTotalSize(level)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
new file mode 100644
index 0000000..3f391f9
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrManifestCorrupted records manifest corruption. This error will be
+// wrapped with errors.ErrCorrupted.
+type ErrManifestCorrupted struct {
+ Field string
+ Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error {
+ return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason})
+}
+
+// session represent a persistent database session.
+type session struct {
+ // Need 64-bit alignment.
+ stNextFileNum int64 // current unused file number
+ stJournalNum int64 // current journal file number; need external synchronization
+ stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb
+ stTempFileNum int64
+ stSeqNum uint64 // last mem compacted seq; need external synchronization
+
+ stor *iStorage
+ storLock storage.Locker
+ o *cachedOptions
+ icmp *iComparer
+ tops *tOps
+ fileRef map[int64]int
+
+ manifest *journal.Writer
+ manifestWriter storage.Writer
+ manifestFd storage.FileDesc
+
+ stCompPtrs []internalKey // compaction pointers; need external synchronization
+ stVersion *version // current version
+ vmu sync.Mutex
+}
+
+// Creates new initialized session instance.
+func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
+ if stor == nil {
+ return nil, os.ErrInvalid
+ }
+ storLock, err := stor.Lock()
+ if err != nil {
+ return
+ }
+ s = &session{
+ stor: newIStorage(stor),
+ storLock: storLock,
+ fileRef: make(map[int64]int),
+ }
+ s.setOptions(o)
+ s.tops = newTableOps(s)
+ s.setVersion(newVersion(s))
+ s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
+ return
+}
+
+// Close session.
+func (s *session) close() {
+ s.tops.close()
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ s.manifest = nil
+ s.manifestWriter = nil
+ s.setVersion(&version{s: s, closing: true})
+}
+
+// Release session lock.
+func (s *session) release() {
+ s.storLock.Unlock()
+}
+
+// Create a new database session; need external synchronization.
+func (s *session) create() error {
+ // create manifest
+ return s.newManifest(nil, nil)
+}
+
+// Recover a database session; need external synchronization.
+func (s *session) recover() (err error) {
+ defer func() {
+ if os.IsNotExist(err) {
+ // Don't return os.ErrNotExist if the underlying storage contains
+ // other files that belong to LevelDB. So the DB won't get trashed.
+ if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 {
+ err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
+ }
+ }
+ }()
+
+ fd, err := s.stor.GetMeta()
+ if err != nil {
+ return
+ }
+
+ reader, err := s.stor.Open(fd)
+ if err != nil {
+ return
+ }
+ defer reader.Close()
+
+ var (
+ // Options.
+ strict = s.o.GetStrict(opt.StrictManifest)
+
+ jr = journal.NewReader(reader, dropper{s, fd}, strict, true)
+ rec = &sessionRecord{}
+ staging = s.stVersion.newStaging()
+ )
+ for {
+ var r io.Reader
+ r, err = jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ break
+ }
+ return errors.SetFd(err, fd)
+ }
+
+ err = rec.decode(r)
+ if err == nil {
+ // save compact pointers
+ for _, r := range rec.compPtrs {
+ s.setCompPtr(r.level, internalKey(r.ikey))
+ }
+ // commit record to version staging
+ staging.commit(rec)
+ } else {
+ err = errors.SetFd(err, fd)
+ if strict || !errors.IsCorrupted(err) {
+ return
+ }
+ s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
+ }
+ rec.resetCompPtrs()
+ rec.resetAddedTables()
+ rec.resetDeletedTables()
+ }
+
+ switch {
+ case !rec.has(recComparer):
+ return newErrManifestCorrupted(fd, "comparer", "missing")
+ case rec.comparer != s.icmp.uName():
+ return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+ case !rec.has(recNextFileNum):
+ return newErrManifestCorrupted(fd, "next-file-num", "missing")
+ case !rec.has(recJournalNum):
+ return newErrManifestCorrupted(fd, "journal-file-num", "missing")
+ case !rec.has(recSeqNum):
+ return newErrManifestCorrupted(fd, "seq-num", "missing")
+ }
+
+ s.manifestFd = fd
+ s.setVersion(staging.finish())
+ s.setNextFileNum(rec.nextFileNum)
+ s.recordCommited(rec)
+ return nil
+}
+
+// Commit session; need external synchronization.
+func (s *session) commit(r *sessionRecord) (err error) {
+ v := s.version()
+ defer v.release()
+
+ // spawn new version based on current version
+ nv := v.spawn(r)
+
+ if s.manifest == nil {
+ // manifest journal writer not yet created, create one
+ err = s.newManifest(r, nv)
+ } else {
+ err = s.flushManifest(r)
+ }
+
+ // finally, apply new version if no error rise
+ if err == nil {
+ s.setVersion(nv)
+ }
+
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
new file mode 100644
index 0000000..089cd00
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
@@ -0,0 +1,302 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int {
+ v := s.version()
+ defer v.release()
+ return v.pickMemdbLevel(umin, umax, maxLevel)
+}
+
+func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) {
+ // Create sorted table.
+ iter := mdb.NewIterator(nil)
+ defer iter.Release()
+ t, n, err := s.tops.createFrom(iter)
+ if err != nil {
+ return 0, err
+ }
+
+ // Pick level other than zero can cause compaction issue with large
+ // bulk insert and delete on strictly incrementing key-space. The
+ // problem is that the small deletion markers trapped at lower level,
+ // while key/value entries keep growing at higher level. Since the
+ // key-space is strictly incrementing it will not overlaps with
+ // higher level, thus maximum possible level is always picked, while
+ // overlapping deletion marker pushed into lower level.
+ // See: https://github.com/syndtr/goleveldb/issues/127.
+ flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel)
+ rec.addTableFile(flushLevel, t)
+
+ s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
+ return flushLevel, nil
+}
+
+// Pick a compaction based on current state; need external synchronization.
+func (s *session) pickCompaction() *compaction {
+ v := s.version()
+
+ var sourceLevel int
+ var t0 tFiles
+ if v.cScore >= 1 {
+ sourceLevel = v.cLevel
+ cptr := s.getCompPtr(sourceLevel)
+ tables := v.levels[sourceLevel]
+ for _, t := range tables {
+ if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
+ t0 = append(t0, t)
+ break
+ }
+ }
+ if len(t0) == 0 {
+ t0 = append(t0, tables[0])
+ }
+ } else {
+ if p := atomic.LoadPointer(&v.cSeek); p != nil {
+ ts := (*tSet)(p)
+ sourceLevel = ts.level
+ t0 = append(t0, ts.table)
+ } else {
+ v.release()
+ return nil
+ }
+ }
+
+ return newCompaction(s, v, sourceLevel, t0)
+}
+
+// Create compaction from given level and range; need external synchronization.
+func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction {
+ v := s.version()
+
+ if sourceLevel >= len(v.levels) {
+ v.release()
+ return nil
+ }
+
+ t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0)
+ if len(t0) == 0 {
+ v.release()
+ return nil
+ }
+
+ // Avoid compacting too much in one shot in case the range is large.
+ // But we cannot do this for level-0 since level-0 files can overlap
+ // and we must not pick one file and drop another older file if the
+ // two files overlap.
+ if !noLimit && sourceLevel > 0 {
+ limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel))
+ total := int64(0)
+ for i, t := range t0 {
+ total += t.size
+ if total >= limit {
+ s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
+ t0 = t0[:i+1]
+ break
+ }
+ }
+ }
+
+ return newCompaction(s, v, sourceLevel, t0)
+}
+
+func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction {
+ c := &compaction{
+ s: s,
+ v: v,
+ sourceLevel: sourceLevel,
+ levels: [2]tFiles{t0, nil},
+ maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)),
+ tPtrs: make([]int, len(v.levels)),
+ }
+ c.expand()
+ c.save()
+ return c
+}
+
+// compaction represent a compaction state.
+type compaction struct {
+ s *session
+ v *version
+
+ sourceLevel int
+ levels [2]tFiles
+ maxGPOverlaps int64
+
+ gp tFiles
+ gpi int
+ seenKey bool
+ gpOverlappedBytes int64
+ imin, imax internalKey
+ tPtrs []int
+ released bool
+
+ snapGPI int
+ snapSeenKey bool
+ snapGPOverlappedBytes int64
+ snapTPtrs []int
+}
+
+func (c *compaction) save() {
+ c.snapGPI = c.gpi
+ c.snapSeenKey = c.seenKey
+ c.snapGPOverlappedBytes = c.gpOverlappedBytes
+ c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
+
+func (c *compaction) restore() {
+ c.gpi = c.snapGPI
+ c.seenKey = c.snapSeenKey
+ c.gpOverlappedBytes = c.snapGPOverlappedBytes
+ c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
+
+func (c *compaction) release() {
+ if !c.released {
+ c.released = true
+ c.v.release()
+ }
+}
+
+// Expand compacted tables; need external synchronization.
+func (c *compaction) expand() {
+ limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel))
+ vt0 := c.v.levels[c.sourceLevel]
+ vt1 := tFiles{}
+ if level := c.sourceLevel + 1; level < len(c.v.levels) {
+ vt1 = c.v.levels[level]
+ }
+
+ t0, t1 := c.levels[0], c.levels[1]
+ imin, imax := t0.getRange(c.s.icmp)
+ // We expand t0 here just incase ukey hop across tables.
+ t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0)
+ if len(t0) != len(c.levels[0]) {
+ imin, imax = t0.getRange(c.s.icmp)
+ }
+ t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+ // Get entire range covered by compaction.
+ amin, amax := append(t0, t1...).getRange(c.s.icmp)
+
+ // See if we can grow the number of inputs in "sourceLevel" without
+ // changing the number of "sourceLevel+1" files we pick up.
+ if len(t1) > 0 {
+ exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0)
+ if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+ xmin, xmax := exp0.getRange(c.s.icmp)
+ exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
+ if len(exp1) == len(t1) {
+ c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+ c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+ len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
+ imin, imax = xmin, xmax
+ t0, t1 = exp0, exp1
+ amin, amax = append(t0, t1...).getRange(c.s.icmp)
+ }
+ }
+ }
+
+ // Compute the set of grandparent files that overlap this compaction
+ // (parent == sourceLevel+1; grandparent == sourceLevel+2)
+ if level := c.sourceLevel + 2; level < len(c.v.levels) {
+ c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
+ }
+
+ c.levels[0], c.levels[1] = t0, t1
+ c.imin, c.imax = imin, imax
+}
+
+// Check whether compaction is trivial.
+func (c *compaction) trivial() bool {
+ return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
+}
+
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+ for level := c.sourceLevel + 2; level < len(c.v.levels); level++ {
+ tables := c.v.levels[level]
+ for c.tPtrs[level] < len(tables) {
+ t := tables[c.tPtrs[level]]
+ if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+ // We've advanced far enough.
+ if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ // Key falls in this file's range, so definitely not base level.
+ return false
+ }
+ break
+ }
+ c.tPtrs[level]++
+ }
+ }
+ return true
+}
+
+func (c *compaction) shouldStopBefore(ikey internalKey) bool {
+ for ; c.gpi < len(c.gp); c.gpi++ {
+ gp := c.gp[c.gpi]
+ if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
+ break
+ }
+ if c.seenKey {
+ c.gpOverlappedBytes += gp.size
+ }
+ }
+ c.seenKey = true
+
+ if c.gpOverlappedBytes > c.maxGPOverlaps {
+ // Too much overlap for current output; start new output.
+ c.gpOverlappedBytes = 0
+ return true
+ }
+ return false
+}
+
+// Creates an iterator.
+func (c *compaction) newIterator() iterator.Iterator {
+ // Creates iterator slice.
+ icap := len(c.levels)
+ if c.sourceLevel == 0 {
+ // Special case for level-0.
+ icap = len(c.levels[0]) + 1
+ }
+ its := make([]iterator.Iterator, 0, icap)
+
+ // Options.
+ ro := &opt.ReadOptions{
+ DontFillCache: true,
+ Strict: opt.StrictOverride,
+ }
+ strict := c.s.o.GetStrict(opt.StrictCompaction)
+ if strict {
+ ro.Strict |= opt.StrictReader
+ }
+
+ for i, tables := range c.levels {
+ if len(tables) == 0 {
+ continue
+ }
+
+ // Level-0 is not sorted and may overlaps each other.
+ if c.sourceLevel+i == 0 {
+ for _, t := range tables {
+ its = append(its, c.s.tops.newIterator(t, nil, ro))
+ }
+ } else {
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
+ its = append(its, it)
+ }
+ }
+
+ return iterator.NewMergedIterator(its, c.s.icmp, strict)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
new file mode 100644
index 0000000..854e1aa
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -0,0 +1,323 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "bufio"
+ "encoding/binary"
+ "io"
+ "strings"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// These numbers are written to disk and should not be changed.
+const (
+ recComparer = 1
+ recJournalNum = 2
+ recNextFileNum = 3
+ recSeqNum = 4
+ recCompPtr = 5
+ recDelTable = 6
+ recAddTable = 7
+ // 8 was used for large value refs
+ recPrevJournalNum = 9
+)
+
+type cpRecord struct {
+ level int
+ ikey internalKey
+}
+
+type atRecord struct {
+ level int
+ num int64
+ size int64
+ imin internalKey
+ imax internalKey
+}
+
+type dtRecord struct {
+ level int
+ num int64
+}
+
+type sessionRecord struct {
+ hasRec int
+ comparer string
+ journalNum int64
+ prevJournalNum int64
+ nextFileNum int64
+ seqNum uint64
+ compPtrs []cpRecord
+ addedTables []atRecord
+ deletedTables []dtRecord
+
+ scratch [binary.MaxVarintLen64]byte
+ err error
+}
+
+func (p *sessionRecord) has(rec int) bool {
+ return p.hasRec&(1<
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// Logging.
+
+type dropper struct {
+ s *session
+ fd storage.FileDesc
+}
+
+func (d dropper) Drop(err error) {
+ if e, ok := err.(*journal.ErrCorrupted); ok {
+ d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason)
+ } else {
+ d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err)
+ }
+}
+
+func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
+
+// File utils.
+
+func (s *session) newTemp() storage.FileDesc {
+ num := atomic.AddInt64(&s.stTempFileNum, 1) - 1
+ return storage.FileDesc{Type: storage.TypeTemp, Num: num}
+}
+
+func (s *session) addFileRef(fd storage.FileDesc, ref int) int {
+ ref += s.fileRef[fd.Num]
+ if ref > 0 {
+ s.fileRef[fd.Num] = ref
+ } else if ref == 0 {
+ delete(s.fileRef, fd.Num)
+ } else {
+ panic(fmt.Sprintf("negative ref: %v", fd))
+ }
+ return ref
+}
+
+// Session state.
+
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
+func (s *session) version() *version {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ s.stVersion.incref()
+ return s.stVersion
+}
+
+func (s *session) tLen(level int) int {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ return s.stVersion.tLen(level)
+}
+
+// Set current version to v.
+func (s *session) setVersion(v *version) {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ // Hold by session. It is important to call this first before releasing
+ // current version, otherwise the still used files might get released.
+ v.incref()
+ if s.stVersion != nil {
+ // Release current version.
+ s.stVersion.releaseNB()
+ }
+ s.stVersion = v
+}
+
+// Get current unused file number.
+func (s *session) nextFileNum() int64 {
+ return atomic.LoadInt64(&s.stNextFileNum)
+}
+
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num int64) {
+ atomic.StoreInt64(&s.stNextFileNum, num)
+}
+
+// Mark file number as used.
+func (s *session) markFileNum(num int64) {
+ nextFileNum := num + 1
+ for {
+ old, x := s.stNextFileNum, nextFileNum
+ if old > x {
+ x = old
+ }
+ if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Allocate a file number.
+func (s *session) allocFileNum() int64 {
+ return atomic.AddInt64(&s.stNextFileNum, 1) - 1
+}
+
+// Reuse given file number.
+func (s *session) reuseFileNum(num int64) {
+ for {
+ old, x := s.stNextFileNum, num
+ if old != x+1 {
+ x = old
+ }
+ if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Set compaction ptr at given level; need external synchronization.
+func (s *session) setCompPtr(level int, ik internalKey) {
+ if level >= len(s.stCompPtrs) {
+ newCompPtrs := make([]internalKey, level+1)
+ copy(newCompPtrs, s.stCompPtrs)
+ s.stCompPtrs = newCompPtrs
+ }
+ s.stCompPtrs[level] = append(internalKey{}, ik...)
+}
+
+// Get compaction ptr at given level; need external synchronization.
+func (s *session) getCompPtr(level int) internalKey {
+ if level >= len(s.stCompPtrs) {
+ return nil
+ }
+ return s.stCompPtrs[level]
+}
+
+// Manifest related utils.
+
+// Fill given session record obj with current states; need external
+// synchronization.
+func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
+ r.setNextFileNum(s.nextFileNum())
+
+ if snapshot {
+ if !r.has(recJournalNum) {
+ r.setJournalNum(s.stJournalNum)
+ }
+
+ if !r.has(recSeqNum) {
+ r.setSeqNum(s.stSeqNum)
+ }
+
+ for level, ik := range s.stCompPtrs {
+ if ik != nil {
+ r.addCompPtr(level, ik)
+ }
+ }
+
+ r.setComparer(s.icmp.uName())
+ }
+}
+
+// Mark if record has been committed, this will update session state;
+// need external synchronization.
+func (s *session) recordCommited(rec *sessionRecord) {
+ if rec.has(recJournalNum) {
+ s.stJournalNum = rec.journalNum
+ }
+
+ if rec.has(recPrevJournalNum) {
+ s.stPrevJournalNum = rec.prevJournalNum
+ }
+
+ if rec.has(recSeqNum) {
+ s.stSeqNum = rec.seqNum
+ }
+
+ for _, r := range rec.compPtrs {
+ s.setCompPtr(r.level, internalKey(r.ikey))
+ }
+}
+
+// Create a new manifest file; need external synchronization.
+func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
+ fd := storage.FileDesc{Type: storage.TypeManifest, Num: s.allocFileNum()}
+ writer, err := s.stor.Create(fd)
+ if err != nil {
+ return
+ }
+ jw := journal.NewWriter(writer)
+
+ if v == nil {
+ v = s.version()
+ defer v.release()
+ }
+ if rec == nil {
+ rec = &sessionRecord{}
+ }
+ s.fillRecord(rec, true)
+ v.fillRecord(rec)
+
+ defer func() {
+ if err == nil {
+ s.recordCommited(rec)
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ if !s.manifestFd.Zero() {
+ s.stor.Remove(s.manifestFd)
+ }
+ s.manifestFd = fd
+ s.manifestWriter = writer
+ s.manifest = jw
+ } else {
+ writer.Close()
+ s.stor.Remove(fd)
+ s.reuseFileNum(fd.Num)
+ }
+ }()
+
+ w, err := jw.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = jw.Flush()
+ if err != nil {
+ return
+ }
+ err = s.stor.SetMeta(fd)
+ return
+}
+
+// Flush record to disk.
+func (s *session) flushManifest(rec *sessionRecord) (err error) {
+ s.fillRecord(rec, false)
+ w, err := s.manifest.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = s.manifest.Flush()
+ if err != nil {
+ return
+ }
+ if !s.o.GetNoSync() {
+ err = s.manifestWriter.Sync()
+ if err != nil {
+ return
+ }
+ }
+ s.recordCommited(rec)
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go
new file mode 100644
index 0000000..d45fb5d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go
@@ -0,0 +1,63 @@
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "sync/atomic"
+)
+
+type iStorage struct {
+ storage.Storage
+ read uint64
+ write uint64
+}
+
+func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) {
+ r, err := c.Storage.Open(fd)
+ return &iStorageReader{r, c}, err
+}
+
+func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) {
+ w, err := c.Storage.Create(fd)
+ return &iStorageWriter{w, c}, err
+}
+
+func (c *iStorage) reads() uint64 {
+ return atomic.LoadUint64(&c.read)
+}
+
+func (c *iStorage) writes() uint64 {
+ return atomic.LoadUint64(&c.write)
+}
+
+// newIStorage returns the given storage wrapped by iStorage.
+func newIStorage(s storage.Storage) *iStorage {
+ return &iStorage{s, 0, 0}
+}
+
+type iStorageReader struct {
+ storage.Reader
+ c *iStorage
+}
+
+func (r *iStorageReader) Read(p []byte) (n int, err error) {
+ n, err = r.Reader.Read(p)
+ atomic.AddUint64(&r.c.read, uint64(n))
+ return n, err
+}
+
+func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) {
+ n, err = r.Reader.ReadAt(p, off)
+ atomic.AddUint64(&r.c.read, uint64(n))
+ return n, err
+}
+
+type iStorageWriter struct {
+ storage.Writer
+ c *iStorage
+}
+
+func (w *iStorageWriter) Write(p []byte) (n int, err error) {
+ n, err = w.Writer.Write(p)
+ atomic.AddUint64(&w.c.write, uint64(n))
+ return n, err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
new file mode 100644
index 0000000..9ba71fd
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -0,0 +1,671 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ errFileOpen = errors.New("leveldb/storage: file still open")
+ errReadOnly = errors.New("leveldb/storage: storage is read-only")
+)
+
+type fileLock interface {
+ release() error
+}
+
+type fileStorageLock struct {
+ fs *fileStorage
+}
+
+func (lock *fileStorageLock) Unlock() {
+ if lock.fs != nil {
+ lock.fs.mu.Lock()
+ defer lock.fs.mu.Unlock()
+ if lock.fs.slock == lock {
+ lock.fs.slock = nil
+ }
+ }
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func writeFileSynced(filename string, data []byte, perm os.FileMode) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Sync(); err == nil {
+ err = err1
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+const logSizeThreshold = 1024 * 1024 // 1 MiB
+
+// fileStorage is a file-system backed storage.
+type fileStorage struct {
+ path string
+ readOnly bool
+
+ mu sync.Mutex
+ flock fileLock
+ slock *fileStorageLock
+ logw *os.File
+ logSize int64
+ buf []byte
+ // Opened file counter; if open < 0 means closed.
+ open int
+ day int
+}
+
+// OpenFile returns a new filesystem-backed storage implementation with the given
+// path. This also acquire a file lock, so any subsequent attempt to open the
+// same path will fail.
+//
+// The storage must be closed after use, by calling Close method.
+func OpenFile(path string, readOnly bool) (Storage, error) {
+ if fi, err := os.Stat(path); err == nil {
+ if !fi.IsDir() {
+ return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path)
+ }
+ } else if os.IsNotExist(err) && !readOnly {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+
+ flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err != nil {
+ flock.release()
+ }
+ }()
+
+ var (
+ logw *os.File
+ logSize int64
+ )
+ if !readOnly {
+ logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ logSize, err = logw.Seek(0, os.SEEK_END)
+ if err != nil {
+ logw.Close()
+ return nil, err
+ }
+ }
+
+ fs := &fileStorage{
+ path: path,
+ readOnly: readOnly,
+ flock: flock,
+ logw: logw,
+ logSize: logSize,
+ }
+ runtime.SetFinalizer(fs, (*fileStorage).Close)
+ return fs, nil
+}
+
+func (fs *fileStorage) Lock() (Locker, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ if fs.readOnly {
+ return &fileStorageLock{}, nil
+ }
+ if fs.slock != nil {
+ return nil, ErrLocked
+ }
+ fs.slock = &fileStorageLock{fs: fs}
+ return fs.slock, nil
+}
+
+func itoa(buf []byte, i int, wid int) []byte {
+ u := uint(i)
+ if u == 0 && wid <= 1 {
+ return append(buf, '0')
+ }
+
+ // Assemble decimal in reverse order.
+ var b [32]byte
+ bp := len(b)
+ for ; u > 0 || wid > 0; u /= 10 {
+ bp--
+ wid--
+ b[bp] = byte(u%10) + '0'
+ }
+ return append(buf, b[bp:]...)
+}
+
+func (fs *fileStorage) printDay(t time.Time) {
+ if fs.day == t.Day() {
+ return
+ }
+ fs.day = t.Day()
+ fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
+}
+
+func (fs *fileStorage) doLog(t time.Time, str string) {
+ if fs.logSize > logSizeThreshold {
+ // Rotate log file.
+ fs.logw.Close()
+ fs.logw = nil
+ fs.logSize = 0
+ rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old"))
+ }
+ if fs.logw == nil {
+ var err error
+ fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return
+ }
+ // Force printDay on new log file.
+ fs.day = 0
+ }
+ fs.printDay(t)
+ hour, min, sec := t.Clock()
+ msec := t.Nanosecond() / 1e3
+ // time
+ fs.buf = itoa(fs.buf[:0], hour, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, min, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, sec, 2)
+ fs.buf = append(fs.buf, '.')
+ fs.buf = itoa(fs.buf, msec, 6)
+ fs.buf = append(fs.buf, ' ')
+ // write
+ fs.buf = append(fs.buf, []byte(str)...)
+ fs.buf = append(fs.buf, '\n')
+ n, _ := fs.logw.Write(fs.buf)
+ fs.logSize += int64(n)
+}
+
+func (fs *fileStorage) Log(str string) {
+ if !fs.readOnly {
+ t := time.Now()
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return
+ }
+ fs.doLog(t, str)
+ }
+}
+
+func (fs *fileStorage) log(str string) {
+ if !fs.readOnly {
+ fs.doLog(time.Now(), str)
+ }
+}
+
+func (fs *fileStorage) setMeta(fd FileDesc) error {
+ content := fsGenName(fd) + "\n"
+ // Check and backup old CURRENT file.
+ currentPath := filepath.Join(fs.path, "CURRENT")
+ if _, err := os.Stat(currentPath); err == nil {
+ b, err := ioutil.ReadFile(currentPath)
+ if err != nil {
+ fs.log(fmt.Sprintf("backup CURRENT: %v", err))
+ return err
+ }
+ if string(b) == content {
+ // Content not changed, do nothing.
+ return nil
+ }
+ if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil {
+ fs.log(fmt.Sprintf("backup CURRENT: %v", err))
+ return err
+ }
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+ path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
+ if err := writeFileSynced(path, []byte(content), 0644); err != nil {
+ fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err))
+ return err
+ }
+ // Replace CURRENT file.
+ if err := rename(path, currentPath); err != nil {
+ fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err))
+ return err
+ }
+ // Sync root directory.
+ if err := syncDir(fs.path); err != nil {
+ fs.log(fmt.Sprintf("syncDir: %v", err))
+ return err
+ }
+ return nil
+}
+
+func (fs *fileStorage) SetMeta(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ return fs.setMeta(fd)
+}
+
+func (fs *fileStorage) GetMeta() (FileDesc, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return FileDesc{}, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return FileDesc{}, err
+ }
+ names, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if ce := dir.Close(); ce != nil {
+ fs.log(fmt.Sprintf("close dir: %v", ce))
+ }
+ if err != nil {
+ return FileDesc{}, err
+ }
+ // Try this in order:
+ // - CURRENT.[0-9]+ ('pending rename' file, descending order)
+ // - CURRENT
+ // - CURRENT.bak
+ //
+ // Skip corrupted file or file that point to a missing target file.
+ type currentFile struct {
+ name string
+ fd FileDesc
+ }
+ tryCurrent := func(name string) (*currentFile, error) {
+ b, err := ioutil.ReadFile(filepath.Join(fs.path, name))
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = os.ErrNotExist
+ }
+ return nil, err
+ }
+ var fd FileDesc
+ if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) {
+ fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b))
+ err := &ErrCorrupted{
+ Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"),
+ }
+ return nil, err
+ }
+ if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil {
+ if os.IsNotExist(err) {
+ fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd))
+ err = os.ErrNotExist
+ }
+ return nil, err
+ }
+ return ¤tFile{name: name, fd: fd}, nil
+ }
+ tryCurrents := func(names []string) (*currentFile, error) {
+ var (
+ cur *currentFile
+ // Last corruption error.
+ lastCerr error
+ )
+ for _, name := range names {
+ var err error
+ cur, err = tryCurrent(name)
+ if err == nil {
+ break
+ } else if err == os.ErrNotExist {
+ // Fallback to the next file.
+ } else if isCorrupted(err) {
+ lastCerr = err
+ // Fallback to the next file.
+ } else {
+ // In case the error is due to permission, etc.
+ return nil, err
+ }
+ }
+ if cur == nil {
+ err := os.ErrNotExist
+ if lastCerr != nil {
+ err = lastCerr
+ }
+ return nil, err
+ }
+ return cur, nil
+ }
+
+ // Try 'pending rename' files.
+ var nums []int64
+ for _, name := range names {
+ if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" {
+ i, err := strconv.ParseInt(name[8:], 10, 64)
+ if err == nil {
+ nums = append(nums, i)
+ }
+ }
+ }
+ var (
+ pendCur *currentFile
+ pendErr = os.ErrNotExist
+ pendNames []string
+ )
+ if len(nums) > 0 {
+ sort.Sort(sort.Reverse(int64Slice(nums)))
+ pendNames = make([]string, len(nums))
+ for i, num := range nums {
+ pendNames[i] = fmt.Sprintf("CURRENT.%d", num)
+ }
+ pendCur, pendErr = tryCurrents(pendNames)
+ if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) {
+ return FileDesc{}, pendErr
+ }
+ }
+
+ // Try CURRENT and CURRENT.bak.
+ curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"})
+ if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) {
+ return FileDesc{}, curErr
+ }
+
+ // pendCur takes precedence, but guards against obsolete pendCur.
+ if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) {
+ curCur = pendCur
+ }
+
+ if curCur != nil {
+ // Restore CURRENT file to proper state.
+ if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) {
+ // Ignore setMeta errors, however don't delete obsolete files if we
+ // catch error.
+ if err := fs.setMeta(curCur.fd); err == nil {
+ // Remove 'pending rename' files.
+ for _, name := range pendNames {
+ if err := os.Remove(filepath.Join(fs.path, name)); err != nil {
+ fs.log(fmt.Sprintf("remove %s: %v", name, err))
+ }
+ }
+ }
+ }
+ return curCur.fd, nil
+ }
+
+ // Nothing found.
+ if isCorrupted(pendErr) {
+ return FileDesc{}, pendErr
+ }
+ return FileDesc{}, curErr
+}
+
+func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return
+ }
+ names, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if cerr := dir.Close(); cerr != nil {
+ fs.log(fmt.Sprintf("close dir: %v", cerr))
+ }
+ if err == nil {
+ for _, name := range names {
+ if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 {
+ fds = append(fds, fd)
+ }
+ }
+ }
+ return
+}
+
+func (fs *fileStorage) Open(fd FileDesc) (Reader, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0)
+ if err != nil {
+ if fsHasOldName(fd) && os.IsNotExist(err) {
+ of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0)
+ if err == nil {
+ goto ok
+ }
+ }
+ return nil, err
+ }
+ok:
+ fs.open++
+ return &fileWrap{File: of, fs: fs, fd: fd}, nil
+}
+
+func (fs *fileStorage) Create(fd FileDesc) (Writer, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+ if fs.readOnly {
+ return nil, errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return nil, err
+ }
+ fs.open++
+ return &fileWrap{File: of, fs: fs, fd: fd}, nil
+}
+
+func (fs *fileStorage) Remove(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ err := os.Remove(filepath.Join(fs.path, fsGenName(fd)))
+ if err != nil {
+ if fsHasOldName(fd) && os.IsNotExist(err) {
+ if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) {
+ fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err))
+ err = e1
+ }
+ } else {
+ fs.log(fmt.Sprintf("remove %s: %v", fd, err))
+ }
+ }
+ return err
+}
+
+func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error {
+ if !FileDescOk(oldfd) || !FileDescOk(newfd) {
+ return ErrInvalidFile
+ }
+ if oldfd == newfd {
+ return nil
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd)))
+}
+
+func (fs *fileStorage) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ // Clear the finalizer.
+ runtime.SetFinalizer(fs, nil)
+
+ if fs.open > 0 {
+ fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
+ }
+ fs.open = -1
+ if fs.logw != nil {
+ fs.logw.Close()
+ }
+ return fs.flock.release()
+}
+
+type fileWrap struct {
+ *os.File
+ fs *fileStorage
+ fd FileDesc
+ closed bool
+}
+
+func (fw *fileWrap) Sync() error {
+ if err := fw.File.Sync(); err != nil {
+ return err
+ }
+ if fw.fd.Type == TypeManifest {
+ // Also sync parent directory if file type is manifest.
+ // See: https://code.google.com/p/leveldb/issues/detail?id=190.
+ if err := syncDir(fw.fs.path); err != nil {
+ fw.fs.log(fmt.Sprintf("syncDir: %v", err))
+ return err
+ }
+ }
+ return nil
+}
+
+func (fw *fileWrap) Close() error {
+ fw.fs.mu.Lock()
+ defer fw.fs.mu.Unlock()
+ if fw.closed {
+ return ErrClosed
+ }
+ fw.closed = true
+ fw.fs.open--
+ err := fw.File.Close()
+ if err != nil {
+ fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err))
+ }
+ return err
+}
+
+func fsGenName(fd FileDesc) string {
+ switch fd.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fd.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fd.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fd.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fd.Num)
+ default:
+ panic("invalid file type")
+ }
+}
+
+func fsHasOldName(fd FileDesc) bool {
+ return fd.Type == TypeTable
+}
+
+func fsGenOldName(fd FileDesc) string {
+ switch fd.Type {
+ case TypeTable:
+ return fmt.Sprintf("%06d.sst", fd.Num)
+ }
+ return fsGenName(fd)
+}
+
+func fsParseName(name string) (fd FileDesc, ok bool) {
+ var tail string
+ _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail)
+ if err == nil {
+ switch tail {
+ case "log":
+ fd.Type = TypeJournal
+ case "ldb", "sst":
+ fd.Type = TypeTable
+ case "tmp":
+ fd.Type = TypeTemp
+ default:
+ return
+ }
+ return fd, true
+ }
+ n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail)
+ if n == 1 {
+ fd.Type = TypeManifest
+ return fd, true
+ }
+ return
+}
+
+func fsParseNamePtr(name string, fd *FileDesc) bool {
+ _fd, ok := fsParseName(name)
+ if fd != nil {
+ *fd = _fd
+ }
+ return ok
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
new file mode 100644
index 0000000..5545aee
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build nacl
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ return nil, syscall.ENOTSUP
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ return syscall.ENOTSUP
+}
+
+func rename(oldpath, newpath string) error {
+ return syscall.ENOTSUP
+}
+
+func isErrInvalid(err error) bool {
+ return false
+}
+
+func syncDir(name string) error {
+ return syscall.ENOTSUP
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
new file mode 100644
index 0000000..b829798
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "os"
+)
+
+type plan9FileLock struct {
+ f *os.File
+}
+
+func (fl *plan9FileLock) release() error {
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var (
+ flag int
+ perm os.FileMode
+ )
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ perm = os.ModeExclusive
+ }
+ f, err := os.OpenFile(path, flag, perm)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644)
+ }
+ if err != nil {
+ return
+ }
+ fl = &plan9FileLock{f: f}
+ return
+}
+
+func rename(oldpath, newpath string) error {
+ if _, err := os.Stat(newpath); err == nil {
+ if err := os.Remove(newpath); err != nil {
+ return err
+ }
+ }
+
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 0000000..79901ee
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var flag int
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ }
+ f, err := os.OpenFile(path, flag, 0)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
+ }
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, readOnly, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ flock := syscall.Flock_t{
+ Type: syscall.F_UNLCK,
+ Start: 0,
+ Len: 0,
+ Whence: 1,
+ }
+ if lock {
+ if readOnly {
+ flock.Type = syscall.F_RDLCK
+ } else {
+ flock.Type = syscall.F_WRLCK
+ }
+ }
+ return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
new file mode 100644
index 0000000..d75f66a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var flag int
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ }
+ f, err := os.OpenFile(path, flag, 0)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
+ }
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, readOnly, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ how := syscall.LOCK_UN
+ if lock {
+ if readOnly {
+ how = syscall.LOCK_SH
+ } else {
+ how = syscall.LOCK_EX
+ }
+ }
+ return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func isErrInvalid(err error) bool {
+ if err == os.ErrInvalid {
+ return true
+ }
+ // Go < 1.8
+ if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
+ return true
+ }
+ // Go >= 1.8 returns *os.PathError instead
+ if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
+ return true
+ }
+ return false
+}
+
+func syncDir(name string) error {
+ // As per fsync manpage, Linux seems to expect fsync on directory, however
+ // some system don't support this, so we will ignore syscall.EINVAL.
+ //
+ // From fsync(2):
+ // Calling fsync() does not necessarily ensure that the entry in the
+ // directory containing the file has also reached disk. For that an
+ // explicit fsync() on a file descriptor for the directory is also needed.
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil && !isErrInvalid(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
new file mode 100644
index 0000000..899335f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+const (
+ _MOVEFILE_REPLACE_EXISTING = 1
+)
+
+type windowsFileLock struct {
+ fd syscall.Handle
+}
+
+func (fl *windowsFileLock) release() error {
+ return syscall.Close(fl.fd)
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ var access, shareMode uint32
+ if readOnly {
+ access = syscall.GENERIC_READ
+ shareMode = syscall.FILE_SHARE_READ
+ } else {
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ }
+ fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ if err == syscall.ERROR_FILE_NOT_FOUND {
+ fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ }
+ if err != nil {
+ return
+ }
+ fl = &windowsFileLock{fd: fd}
+ return
+}
+
+func moveFileEx(from *uint16, to *uint16, flags uint32) error {
+ r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ if r1 == 0 {
+ if e1 != 0 {
+ return error(e1)
+ }
+ return syscall.EINVAL
+ }
+ return nil
+}
+
+func rename(oldpath, newpath string) error {
+ from, err := syscall.UTF16PtrFromString(oldpath)
+ if err != nil {
+ return err
+ }
+ to, err := syscall.UTF16PtrFromString(newpath)
+ if err != nil {
+ return err
+ }
+ return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
+}
+
+func syncDir(name string) error { return nil }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
new file mode 100644
index 0000000..838f1be
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -0,0 +1,222 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "bytes"
+ "os"
+ "sync"
+)
+
+const typeShift = 4
+
+// Verify at compile-time that typeShift is large enough to cover all FileType
+// values by confirming that 0 == 0.
+var _ [0]struct{} = [TypeAll >> typeShift]struct{}{}
+
+type memStorageLock struct {
+ ms *memStorage
+}
+
+func (lock *memStorageLock) Unlock() {
+ ms := lock.ms
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock == lock {
+ ms.slock = nil
+ }
+ return
+}
+
+// memStorage is a memory-backed storage.
+type memStorage struct {
+ mu sync.Mutex
+ slock *memStorageLock
+ files map[uint64]*memFile
+ meta FileDesc
+}
+
+// NewMemStorage returns a new memory-backed storage implementation.
+func NewMemStorage() Storage {
+ return &memStorage{
+ files: make(map[uint64]*memFile),
+ }
+}
+
+func (ms *memStorage) Lock() (Locker, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock != nil {
+ return nil, ErrLocked
+ }
+ ms.slock = &memStorageLock{ms: ms}
+ return ms.slock, nil
+}
+
+func (*memStorage) Log(str string) {}
+
+func (ms *memStorage) SetMeta(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+
+ ms.mu.Lock()
+ ms.meta = fd
+ ms.mu.Unlock()
+ return nil
+}
+
+func (ms *memStorage) GetMeta() (FileDesc, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.meta.Zero() {
+ return FileDesc{}, os.ErrNotExist
+ }
+ return ms.meta, nil
+}
+
+func (ms *memStorage) List(ft FileType) ([]FileDesc, error) {
+ ms.mu.Lock()
+ var fds []FileDesc
+ for x := range ms.files {
+ fd := unpackFile(x)
+ if fd.Type&ft != 0 {
+ fds = append(fds, fd)
+ }
+ }
+ ms.mu.Unlock()
+ return fds, nil
+}
+
+func (ms *memStorage) Open(fd FileDesc) (Reader, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if m, exist := ms.files[packFile(fd)]; exist {
+ if m.open {
+ return nil, errFileOpen
+ }
+ m.open = true
+ return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil
+ }
+ return nil, os.ErrNotExist
+}
+
+func (ms *memStorage) Create(fd FileDesc) (Writer, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ x := packFile(fd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ m, exist := ms.files[x]
+ if exist {
+ if m.open {
+ return nil, errFileOpen
+ }
+ m.Reset()
+ } else {
+ m = &memFile{}
+ ms.files[x] = m
+ }
+ m.open = true
+ return &memWriter{memFile: m, ms: ms}, nil
+}
+
+func (ms *memStorage) Remove(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+
+ x := packFile(fd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if _, exist := ms.files[x]; exist {
+ delete(ms.files, x)
+ return nil
+ }
+ return os.ErrNotExist
+}
+
+func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
+ if !FileDescOk(oldfd) || !FileDescOk(newfd) {
+ return ErrInvalidFile
+ }
+ if oldfd == newfd {
+ return nil
+ }
+
+ oldx := packFile(oldfd)
+ newx := packFile(newfd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ oldm, exist := ms.files[oldx]
+ if !exist {
+ return os.ErrNotExist
+ }
+ newm, exist := ms.files[newx]
+ if (exist && newm.open) || oldm.open {
+ return errFileOpen
+ }
+ delete(ms.files, oldx)
+ ms.files[newx] = oldm
+ return nil
+}
+
+func (*memStorage) Close() error { return nil }
+
+type memFile struct {
+ bytes.Buffer
+ open bool
+}
+
+type memReader struct {
+ *bytes.Reader
+ ms *memStorage
+ m *memFile
+ closed bool
+}
+
+func (mr *memReader) Close() error {
+ mr.ms.mu.Lock()
+ defer mr.ms.mu.Unlock()
+ if mr.closed {
+ return ErrClosed
+ }
+ mr.m.open = false
+ return nil
+}
+
+type memWriter struct {
+ *memFile
+ ms *memStorage
+ closed bool
+}
+
+func (*memWriter) Sync() error { return nil }
+
+func (mw *memWriter) Close() error {
+ mw.ms.mu.Lock()
+ defer mw.ms.mu.Unlock()
+ if mw.closed {
+ return ErrClosed
+ }
+ mw.memFile.open = false
+ return nil
+}
+
+func packFile(fd FileDesc) uint64 {
+ return uint64(fd.Num)<> typeShift)}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
new file mode 100644
index 0000000..4e4a724
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package storage provides storage abstraction for LevelDB.
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// FileType represent a file type.
+type FileType int
+
+// File types.
+const (
+ TypeManifest FileType = 1 << iota
+ TypeJournal
+ TypeTable
+ TypeTemp
+
+ TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp
+)
+
+func (t FileType) String() string {
+ switch t {
+ case TypeManifest:
+ return "manifest"
+ case TypeJournal:
+ return "journal"
+ case TypeTable:
+ return "table"
+ case TypeTemp:
+ return "temp"
+ }
+ return fmt.Sprintf("", t)
+}
+
+// Common error.
+var (
+ ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument")
+ ErrLocked = errors.New("leveldb/storage: already locked")
+ ErrClosed = errors.New("leveldb/storage: closed")
+)
+
+// ErrCorrupted is the type that wraps errors that indicate corruption of
+// a file. Package storage has its own type instead of using
+// errors.ErrCorrupted to prevent circular import.
+type ErrCorrupted struct {
+ Fd FileDesc
+ Err error
+}
+
+func isCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+func (e *ErrCorrupted) Error() string {
+ if !e.Fd.Zero() {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
+ }
+ return e.Err.Error()
+}
+
+// Syncer is the interface that wraps basic Sync method.
+type Syncer interface {
+ // Sync commits the current contents of the file to stable storage.
+ Sync() error
+}
+
+// Reader is the interface that groups the basic Read, Seek, ReadAt and Close
+// methods.
+type Reader interface {
+ io.ReadSeeker
+ io.ReaderAt
+ io.Closer
+}
+
+// Writer is the interface that groups the basic Write, Sync and Close
+// methods.
+type Writer interface {
+ io.WriteCloser
+ Syncer
+}
+
+// Locker is the interface that wraps Unlock method.
+type Locker interface {
+ Unlock()
+}
+
+// FileDesc is a 'file descriptor'.
+type FileDesc struct {
+ Type FileType
+ Num int64
+}
+
+func (fd FileDesc) String() string {
+ switch fd.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fd.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fd.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fd.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fd.Num)
+ default:
+ return fmt.Sprintf("%#x-%d", fd.Type, fd.Num)
+ }
+}
+
+// Zero returns true if fd == (FileDesc{}).
+func (fd FileDesc) Zero() bool {
+ return fd == (FileDesc{})
+}
+
+// FileDescOk returns true if fd is a valid 'file descriptor'.
+func FileDescOk(fd FileDesc) bool {
+ switch fd.Type {
+ case TypeManifest:
+ case TypeJournal:
+ case TypeTable:
+ case TypeTemp:
+ default:
+ return false
+ }
+ return fd.Num >= 0
+}
+
+// Storage is the storage. A storage instance must be safe for concurrent use.
+type Storage interface {
+ // Lock locks the storage. Any subsequent attempt to call Lock will fail
+ // until the last lock released.
+ // Caller should call Unlock method after use.
+ Lock() (Locker, error)
+
+ // Log logs a string. This is used for logging.
+ // An implementation may write to a file, stdout or simply do nothing.
+ Log(str string)
+
+ // SetMeta store 'file descriptor' that can later be acquired using GetMeta
+ // method. The 'file descriptor' should point to a valid file.
+ // SetMeta should be implemented in such way that changes should happen
+ // atomically.
+ SetMeta(fd FileDesc) error
+
+ // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor'
+ // can be updated using SetMeta method.
+ // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or
+ // 'file descriptor' point to nonexistent file.
+ GetMeta() (FileDesc, error)
+
+ // List returns file descriptors that match the given file types.
+ // The file types may be OR'ed together.
+ List(ft FileType) ([]FileDesc, error)
+
+ // Open opens file with the given 'file descriptor' read-only.
+ // Returns os.ErrNotExist error if the file does not exist.
+ // Returns ErrClosed if the underlying storage is closed.
+ Open(fd FileDesc) (Reader, error)
+
+ // Create creates file with the given 'file descriptor', truncate if already
+ // exist and opens write-only.
+ // Returns ErrClosed if the underlying storage is closed.
+ Create(fd FileDesc) (Writer, error)
+
+ // Remove removes file with the given 'file descriptor'.
+ // Returns ErrClosed if the underlying storage is closed.
+ Remove(fd FileDesc) error
+
+ // Rename renames file from oldfd to newfd.
+ // Returns ErrClosed if the underlying storage is closed.
+ Rename(oldfd, newfd FileDesc) error
+
+ // Close closes the storage.
+ // It is valid to call Close multiple times. Other methods should not be
+ // called after the storage has been closed.
+ Close() error
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
new file mode 100644
index 0000000..1fac60d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
@@ -0,0 +1,531 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// tFile holds basic information about a table.
+type tFile struct {
+ fd storage.FileDesc
+ seekLeft int32
+ size int64
+ imin, imax internalKey
+}
+
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
+}
+
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
+}
+
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+ return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
+ return atomic.AddInt32(&t.seekLeft, -1)
+}
+
+// Creates new tFile.
+func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
+ f := &tFile{
+ fd: fd,
+ size: size,
+ imin: imin,
+ imax: imax,
+ }
+
+ // We arrange to automatically compact this file after
+ // a certain number of seeks. Let's assume:
+ // (1) One seek costs 10ms
+ // (2) Writing or reading 1MB costs 10ms (100MB/s)
+ // (3) A compaction of 1MB does 25MB of IO:
+ // 1MB read from this level
+ // 10-12MB read from next level (boundaries may be misaligned)
+ // 10-12MB written to next level
+ // This implies that 25 seeks cost the same as the compaction
+ // of 1MB of data. I.e., one seek costs approximately the
+ // same as the compaction of 40KB of data. We are a little
+ // conservative and allow approximately one seek for every 16KB
+ // of data before triggering a compaction.
+ f.seekLeft = int32(size / 16384)
+ if f.seekLeft < 100 {
+ f.seekLeft = 100
+ }
+
+ return f
+}
+
+func tableFileFromRecord(r atRecord) *tFile {
+ return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax)
+}
+
+// tFiles hold multiple tFile.
+type tFiles []*tFile
+
+func (tf tFiles) Len() int { return len(tf) }
+func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
+
+func (tf tFiles) nums() string {
+ x := "[ "
+ for i, f := range tf {
+ if i != 0 {
+ x += ", "
+ }
+ x += fmt.Sprint(f.fd.Num)
+ }
+ x += " ]"
+ return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
+func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
+ a, b := tf[i], tf[j]
+ n := icmp.Compare(a.imin, b.imin)
+ if n == 0 {
+ return a.fd.Num < b.fd.Num
+ }
+ return n < 0
+}
+
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
+func (tf tFiles) lessByNum(i, j int) bool {
+ return tf[i].fd.Num > tf[j].fd.Num
+}
+
+// Sorts tables by key in ascending order.
+func (tf tFiles) sortByKey(icmp *iComparer) {
+ sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
+}
+
+// Sorts tables by file number in descending order.
+func (tf tFiles) sortByNum() {
+ sort.Sort(&tFilesSortByNum{tFiles: tf})
+}
+
+// Returns sum of all tables size.
+func (tf tFiles) size() (sum int64) {
+ for _, t := range tf {
+ sum += t.size
+ }
+ return sum
+}
+
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imin, ikey) >= 0
+ })
+}
+
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imax, ikey) >= 0
+ })
+}
+
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+ if unsorted {
+ // Check against all files.
+ for _, t := range tf {
+ if t.overlaps(icmp, umin, umax) {
+ return true
+ }
+ }
+ return false
+ }
+
+ i := 0
+ if len(umin) > 0 {
+ // Find the earliest possible internal key for min.
+ i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
+ }
+ if i >= len(tf) {
+ // Beginning of range is after all files, so no overlap.
+ return false
+ }
+ return !tf[i].before(icmp, umax)
+}
+
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+ dst = dst[:0]
+ for i := 0; i < len(tf); {
+ t := tf[i]
+ if t.overlaps(icmp, umin, umax) {
+ if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+ umin = t.imin.ukey()
+ dst = dst[:0]
+ i = 0
+ continue
+ } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+ umax = t.imax.ukey()
+ // Restart search if it is overlapped.
+ if overlapped {
+ dst = dst[:0]
+ i = 0
+ continue
+ }
+ }
+
+ dst = append(dst, t)
+ }
+ i++
+ }
+
+ return dst
+}
+
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
+ for i, t := range tf {
+ if i == 0 {
+ imin, imax = t.imin, t.imax
+ continue
+ }
+ if icmp.Compare(t.imin, imin) < 0 {
+ imin = t.imin
+ }
+ if icmp.Compare(t.imax, imax) > 0 {
+ imax = t.imax
+ }
+ }
+
+ return
+}
+
+// Creates iterator index from tables.
+func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
+ if slice != nil {
+ var start, limit int
+ if slice.Start != nil {
+ start = tf.searchMax(icmp, internalKey(slice.Start))
+ }
+ if slice.Limit != nil {
+ limit = tf.searchMin(icmp, internalKey(slice.Limit))
+ } else {
+ limit = tf.Len()
+ }
+ tf = tf[start:limit]
+ }
+ return iterator.NewArrayIndexer(&tFilesArrayIndexer{
+ tFiles: tf,
+ tops: tops,
+ icmp: icmp,
+ slice: slice,
+ ro: ro,
+ })
+}
+
+// Tables iterator index.
+type tFilesArrayIndexer struct {
+ tFiles
+ tops *tOps
+ icmp *iComparer
+ slice *util.Range
+ ro *opt.ReadOptions
+}
+
+func (a *tFilesArrayIndexer) Search(key []byte) int {
+ return a.searchMax(a.icmp, internalKey(key))
+}
+
+func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
+ if i == 0 || i == a.Len()-1 {
+ return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
+ }
+ return a.tops.newIterator(a.tFiles[i], nil, a.ro)
+}
+
+// Helper type for sortByKey.
+type tFilesSortByKey struct {
+ tFiles
+ icmp *iComparer
+}
+
+func (x *tFilesSortByKey) Less(i, j int) bool {
+ return x.lessByKey(x.icmp, i, j)
+}
+
+// Helper type for sortByNum.
+type tFilesSortByNum struct {
+ tFiles
+}
+
+func (x *tFilesSortByNum) Less(i, j int) bool {
+ return x.lessByNum(i, j)
+}
+
+// Table operations.
+type tOps struct {
+ s *session
+ noSync bool
+ evictRemoved bool
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
+}
+
+// Creates an empty table and returns table writer.
+func (t *tOps) create() (*tWriter, error) {
+ fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
+ fw, err := t.s.stor.Create(fd)
+ if err != nil {
+ return nil, err
+ }
+ return &tWriter{
+ t: t,
+ fd: fd,
+ w: fw,
+ tw: table.NewWriter(fw, t.s.o.Options),
+ }, nil
+}
+
+// Builds table from src iterator.
+func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
+ w, err := t.create()
+ if err != nil {
+ return
+ }
+
+ defer func() {
+ if err != nil {
+ w.drop()
+ }
+ }()
+
+ for src.Next() {
+ err = w.append(src.Key(), src.Value())
+ if err != nil {
+ return
+ }
+ }
+ err = src.Error()
+ if err != nil {
+ return
+ }
+
+ n = w.tw.EntriesLen()
+ f, err = w.finish()
+ return
+}
+
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
+ ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {
+ var r storage.Reader
+ r, err = t.s.stor.Open(f.fd)
+ if err != nil {
+ return 0, nil
+ }
+
+ var bcache *cache.NamespaceGetter
+ if t.bcache != nil {
+ bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
+ }
+
+ var tr *table.Reader
+ tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)
+ if err != nil {
+ r.Close()
+ return 0, nil
+ }
+ return 1, tr
+
+ })
+ if ch == nil && err == nil {
+ err = ErrClosed
+ }
+ return
+}
+
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).FindKey(key, true, ro)
+}
+
+// Returns approximate offset of the given key.
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).OffsetOf(key)
+}
+
+// Creates an iterator from the given table.
+func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ ch, err := t.open(f)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+ iter.SetReleaser(ch)
+ return iter
+}
+
+// Removes table from persistent storage. It waits until
+// no one use the the table.
+func (t *tOps) remove(f *tFile) {
+ t.cache.Delete(0, uint64(f.fd.Num), func() {
+ if err := t.s.stor.Remove(f.fd); err != nil {
+ t.s.logf("table@remove removing @%d %q", f.fd.Num, err)
+ } else {
+ t.s.logf("table@remove removed @%d", f.fd.Num)
+ }
+ if t.evictRemoved && t.bcache != nil {
+ t.bcache.EvictNS(uint64(f.fd.Num))
+ }
+ })
+}
+
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
+func (t *tOps) close() {
+ t.bpool.Close()
+ t.cache.Close()
+ if t.bcache != nil {
+ t.bcache.CloseWeak()
+ }
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+ var (
+ cacher cache.Cacher
+ bcache *cache.Cache
+ bpool *util.BufferPool
+ )
+ if s.o.GetOpenFilesCacheCapacity() > 0 {
+ cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+ }
+ if !s.o.GetDisableBlockCache() {
+ var bcacher cache.Cacher
+ if s.o.GetBlockCacheCapacity() > 0 {
+ bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity())
+ }
+ bcache = cache.NewCache(bcacher)
+ }
+ if !s.o.GetDisableBufferPool() {
+ bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
+ }
+ return &tOps{
+ s: s,
+ noSync: s.o.GetNoSync(),
+ evictRemoved: s.o.GetBlockCacheEvictRemoved(),
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: bpool,
+ }
+}
+
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
+type tWriter struct {
+ t *tOps
+
+ fd storage.FileDesc
+ w storage.Writer
+ tw *table.Writer
+
+ first, last []byte
+}
+
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
+ if w.first == nil {
+ w.first = append([]byte{}, key...)
+ }
+ w.last = append(w.last[:0], key...)
+ return w.tw.Append(key, value)
+}
+
+// Returns true if the table is empty.
+func (w *tWriter) empty() bool {
+ return w.first == nil
+}
+
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+ if w.w != nil {
+ w.w.Close()
+ w.w = nil
+ }
+}
+
+// Finalizes the table and returns table file.
+func (w *tWriter) finish() (f *tFile, err error) {
+ defer w.close()
+ err = w.tw.Close()
+ if err != nil {
+ return
+ }
+ if !w.t.noSync {
+ err = w.w.Sync()
+ if err != nil {
+ return
+ }
+ }
+ f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
+ return
+}
+
+// Drops the table.
+func (w *tWriter) drop() {
+ w.close()
+ w.t.s.stor.Remove(w.fd)
+ w.t.s.reuseFileNum(w.fd.Num)
+ w.tw = nil
+ w.first = nil
+ w.last = nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
new file mode 100644
index 0000000..496feb6
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -0,0 +1,1139 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReaderReleased = errors.New("leveldb/table: reader released")
+ ErrIterReleased = errors.New("leveldb/table: iterator released")
+)
+
+// ErrCorrupted describes error due to corruption. This error will be wrapped
+// with errors.ErrCorrupted.
+type ErrCorrupted struct {
+ Pos int64
+ Size int64
+ Kind string
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+type block struct {
+ bpool *util.BufferPool
+ bh blockHandle
+ data []byte
+ restartsLen int
+ restartsOffset int
+}
+
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
+ index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+ offset++ // shared always zero, since this is a restart point
+ v1, n1 := binary.Uvarint(b.data[offset:]) // key length
+ _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
+ m := offset + n1 + n2
+ return cmp.Compare(b.data[m:m+int(v1)], key) > 0
+ }) + rstart - 1
+ if index < rstart {
+ // The smallest key is greater-than key sought.
+ index = rstart
+ }
+ offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+ return
+}
+
+func (b *block) restartIndex(rstart, rlimit, offset int) int {
+ return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
+ }) + rstart - 1
+}
+
+func (b *block) restartOffset(index int) int {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+}
+
+func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
+ if offset >= b.restartsOffset {
+ if offset != b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries offset not aligned"}
+ }
+ return
+ }
+ v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length
+ v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length
+ v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length
+ m := n0 + n1 + n2
+ n = m + int(v1) + int(v2)
+ if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries corrupted"}
+ return
+ }
+ key = b.data[offset+m : offset+m+int(v1)]
+ value = b.data[offset+m+int(v1) : offset+n]
+ nShared = int(v0)
+ return
+}
+
+func (b *block) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type blockIter struct {
+ tr *Reader
+ block *block
+ blockReleaser util.Releaser
+ releaser util.Releaser
+ key, value []byte
+ offset int
+ // Previous offset, only filled by Next.
+ prevOffset int
+ prevNode []int
+ prevKeys []byte
+ restartIndex int
+ // Iterator direction.
+ dir dir
+ // Restart index slice range.
+ riStart int
+ riLimit int
+ // Offset slice range.
+ offsetStart int
+ offsetRealStart int
+ offsetLimit int
+ // Error.
+ err error
+}
+
+func (i *blockIter) sErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+}
+
+func (i *blockIter) reset() {
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ i.dir = dirSOI
+ i.key = i.key[:0]
+ i.value = nil
+}
+
+func (i *blockIter) isFirst() bool {
+ switch i.dir {
+ case dirForward:
+ return i.prevOffset == i.offsetRealStart
+ case dirBackward:
+ return len(i.prevNode) == 1 && i.restartIndex == i.riStart
+ }
+ return false
+}
+
+func (i *blockIter) isLast() bool {
+ switch i.dir {
+ case dirForward, dirBackward:
+ return i.offset == i.offsetLimit
+ }
+ return false
+}
+
+func (i *blockIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirSOI
+ return i.Next()
+}
+
+func (i *blockIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirEOI
+ return i.Prev()
+}
+
+func (i *blockIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
+ if err != nil {
+ i.sErr(err)
+ return false
+ }
+ i.restartIndex = ri
+ i.offset = max(i.offsetStart, offset)
+ if i.dir == dirSOI || i.dir == dirEOI {
+ i.dir = dirForward
+ }
+ for i.Next() {
+ if i.tr.cmp.Compare(i.key, key) >= 0 {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *blockIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirSOI {
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ } else if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ for i.offset < i.offsetRealStart {
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.offset += n
+ }
+ if i.offset >= i.offsetLimit {
+ i.dir = dirEOI
+ if i.offset != i.offsetLimit {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ }
+ return false
+ }
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.prevOffset = i.offset
+ i.offset += n
+ i.dir = dirForward
+ return true
+}
+
+func (i *blockIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ var ri int
+ if i.dir == dirForward {
+ // Change direction.
+ i.offset = i.prevOffset
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset)
+ i.dir = dirBackward
+ } else if i.dir == dirEOI {
+ // At the end of iterator.
+ i.restartIndex = i.riLimit
+ i.offset = i.offsetLimit
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.riLimit - 1
+ i.dir = dirBackward
+ } else if len(i.prevNode) == 1 {
+ // This is the end of a restart range.
+ i.offset = i.prevNode[0]
+ i.prevNode = i.prevNode[:0]
+ if i.restartIndex == i.riStart {
+ i.dir = dirSOI
+ return false
+ }
+ i.restartIndex--
+ ri = i.restartIndex
+ } else {
+ // In the middle of restart range, get from cache.
+ n := len(i.prevNode) - 3
+ node := i.prevNode[n:]
+ i.prevNode = i.prevNode[:n]
+ // Get the key.
+ ko := node[0]
+ i.key = append(i.key[:0], i.prevKeys[ko:]...)
+ i.prevKeys = i.prevKeys[:ko]
+ // Get the value.
+ vo := node[1]
+ vl := vo + node[2]
+ i.value = i.block.data[vo:vl]
+ i.offset = vl
+ return true
+ }
+ // Build entries cache.
+ i.key = i.key[:0]
+ i.value = nil
+ offset := i.block.restartOffset(ri)
+ if offset == i.offset {
+ ri--
+ if ri < 0 {
+ i.dir = dirSOI
+ return false
+ }
+ offset = i.block.restartOffset(ri)
+ }
+ i.prevNode = append(i.prevNode, offset)
+ for {
+ key, value, nShared, n, err := i.block.entry(offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if offset >= i.offsetRealStart {
+ if i.value != nil {
+ // Appends 3 variables:
+ // 1. Previous keys offset
+ // 2. Value offset in the data block
+ // 3. Value length
+ i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value))
+ i.prevKeys = append(i.prevKeys, i.key...)
+ }
+ i.value = value
+ }
+ i.key = append(i.key[:nShared], key...)
+ offset += n
+ // Stop if target offset reached.
+ if offset >= i.offset {
+ if offset != i.offset {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ return false
+ }
+
+ break
+ }
+ }
+ i.restartIndex = ri
+ i.offset = offset
+ return true
+}
+
+func (i *blockIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *blockIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *blockIter) Release() {
+ if i.dir != dirReleased {
+ i.tr = nil
+ i.block = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+ i.key = nil
+ i.value = nil
+ i.dir = dirReleased
+ if i.blockReleaser != nil {
+ i.blockReleaser.Release()
+ i.blockReleaser = nil
+ }
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *blockIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *blockIter) Valid() bool {
+ return i.err == nil && (i.dir == dirBackward || i.dir == dirForward)
+}
+
+func (i *blockIter) Error() error {
+ return i.err
+}
+
+type filterBlock struct {
+ bpool *util.BufferPool
+ data []byte
+ oOffset int
+ baseLg uint
+ filtersNum int
+}
+
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
+ i := int(offset >> b.baseLg)
+ if i < b.filtersNum {
+ o := b.data[b.oOffset+i*4:]
+ n := int(binary.LittleEndian.Uint32(o))
+ m := int(binary.LittleEndian.Uint32(o[4:]))
+ if n < m && m <= b.oOffset {
+ return filter.Contains(b.data[n:m], key)
+ } else if n == m {
+ return false
+ }
+ }
+ return true
+}
+
+func (b *filterBlock) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type indexIter struct {
+ *blockIter
+ tr *Reader
+ slice *util.Range
+ // Options
+ fillCache bool
+}
+
+func (i *indexIter) Get() iterator.Iterator {
+ value := i.Value()
+ if value == nil {
+ return nil
+ }
+ dataBH, n := decodeBlockHandle(value)
+ if n == 0 {
+ return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
+ }
+
+ var slice *util.Range
+ if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
+ slice = i.slice
+ }
+ return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
+}
+
+// Reader is a table reader.
+type Reader struct {
+ mu sync.RWMutex
+ fd storage.FileDesc
+ reader io.ReaderAt
+ cache *cache.NamespaceGetter
+ err error
+ bpool *util.BufferPool
+ // Options
+ o *opt.Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ verifyChecksum bool
+
+ dataEnd int64
+ metaBH, indexBH, filterBH blockHandle
+ indexBlock *block
+ filterBlock *filterBlock
+}
+
+func (r *Reader) blockKind(bh blockHandle) string {
+ switch bh.offset {
+ case r.metaBH.offset:
+ return "meta-block"
+ case r.indexBH.offset:
+ return "index-block"
+ case r.filterBH.offset:
+ if r.filterBH.length > 0 {
+ return "filter-block"
+ }
+ }
+ return "data-block"
+}
+
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+ return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+ return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+ if cerr, ok := err.(*ErrCorrupted); ok {
+ cerr.Pos = int64(bh.offset)
+ cerr.Size = int64(bh.length)
+ cerr.Kind = r.blockKind(bh)
+ return &errors.ErrCorrupted{Fd: r.fd, Err: cerr}
+ }
+ return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+ data := r.bpool.Get(int(bh.length + blockTrailerLen))
+ if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ if verifyChecksum {
+ n := bh.length + 1
+ checksum0 := binary.LittleEndian.Uint32(data[n:])
+ checksum1 := util.NewCRC(data[:n]).Value()
+ if checksum0 != checksum1 {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
+ }
+ }
+
+ switch data[bh.length] {
+ case blockTypeNoCompression:
+ data = data[:bh.length]
+ case blockTypeSnappyCompression:
+ decLen, err := snappy.DecodedLen(data[:bh.length])
+ if err != nil {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ decData := r.bpool.Get(decLen)
+ decData, err = snappy.Decode(decData, data[:bh.length])
+ r.bpool.Put(data)
+ if err != nil {
+ r.bpool.Put(decData)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ data = decData
+ default:
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
+ }
+ return data, nil
+}
+
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+ data, err := r.readRawBlock(bh, verifyChecksum)
+ if err != nil {
+ return nil, err
+ }
+ restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
+ b := &block{
+ bpool: r.bpool,
+ bh: bh,
+ data: data,
+ restartsLen: restartsLen,
+ restartsOffset: len(data) - (restartsLen+1)*4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *block
+ b, err = r.readBlock(bh, verifyChecksum)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*block)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readBlock(bh, verifyChecksum)
+ return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
+ data, err := r.readRawBlock(bh, true)
+ if err != nil {
+ return nil, err
+ }
+ n := len(data)
+ if n < 5 {
+ return nil, r.newErrCorruptedBH(bh, "too short")
+ }
+ m := n - 5
+ oOffset := int(binary.LittleEndian.Uint32(data[m:]))
+ if oOffset > m {
+ return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
+ }
+ b := &filterBlock{
+ bpool: r.bpool,
+ data: data,
+ oOffset: oOffset,
+ baseLg: uint(data[n-1]),
+ filtersNum: (m - oOffset) / 4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *filterBlock
+ b, err = r.readFilterBlock(bh)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*filterBlock)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readFilterBlock(bh)
+ return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+ if r.indexBlock == nil {
+ return r.readBlockCached(r.indexBH, true, fillCache)
+ }
+ return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.filterBlock == nil {
+ return r.readFilterBlockCached(r.filterBH, fillCache)
+ }
+ return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+ bi := &blockIter{
+ tr: r,
+ block: b,
+ blockReleaser: bReleaser,
+ // Valid key should never be nil.
+ key: make([]byte, 0),
+ dir: dirSOI,
+ riStart: 0,
+ riLimit: b.restartsLen,
+ offsetStart: 0,
+ offsetRealStart: 0,
+ offsetLimit: b.restartsOffset,
+ }
+ if slice != nil {
+ if slice.Start != nil {
+ if bi.Seek(slice.Start) {
+ bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+ bi.offsetStart = b.restartOffset(bi.riStart)
+ bi.offsetRealStart = bi.prevOffset
+ } else {
+ bi.riStart = b.restartsLen
+ bi.offsetStart = b.restartsOffset
+ bi.offsetRealStart = b.restartsOffset
+ }
+ }
+ if slice.Limit != nil {
+ if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+ bi.offsetLimit = bi.prevOffset
+ bi.riLimit = bi.restartIndex + 1
+ }
+ }
+ bi.reset()
+ if bi.offsetStart > bi.offsetLimit {
+ bi.sErr(errors.New("leveldb/table: invalid slice range"))
+ }
+ }
+ return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
+}
+
+// NewIterator creates an iterator from the table.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// table. And a nil Range.Limit is treated as a key after all keys in
+// the table.
+//
+// WARNING: Any slice returned by interator (e.g. slice returned by calling
+// Iterator.Key() or Iterator.Key() methods), its content should not be modified
+// unless noted otherwise.
+//
+// The returned iterator is not safe for concurrent use and should be released
+// after use.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ fillCache := !ro.GetDontFillCache()
+ indexBlock, rel, err := r.getIndexBlock(fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ index := &indexIter{
+ blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+ tr: r,
+ slice: slice,
+ fillCache: !ro.GetDontFillCache(),
+ }
+ return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
+}
+
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.getIndexBlock(true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+
+ if !index.Seek(key) {
+ if err = index.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return nil, nil, r.err
+ }
+
+ // The filter should only used for exact match.
+ if filtered && r.filter != nil {
+ filterBlock, frel, ferr := r.getFilterBlock(true)
+ if ferr == nil {
+ if !filterBlock.contains(r.filter, dataBH.offset, key) {
+ frel.Release()
+ return nil, nil, ErrNotFound
+ }
+ frel.Release()
+ } else if !errors.IsCorrupted(ferr) {
+ return nil, nil, ferr
+ }
+ }
+
+ data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ if !data.Seek(key) {
+ data.Release()
+ if err = data.Error(); err != nil {
+ return
+ }
+
+ // The nearest greater-than key is the first key of the next block.
+ if !index.Next() {
+ if err = index.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+
+ dataBH, n = decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return nil, nil, r.err
+ }
+
+ data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ if !data.Next() {
+ data.Release()
+ if err = data.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+ }
+
+ // Key doesn't use block buffer, no need to copy the buffer.
+ rkey = data.Key()
+ if !noValue {
+ if r.bpool == nil {
+ value = data.Value()
+ } else {
+ // Value does use block buffer, and since the buffer will be
+ // recycled, it need to be copied.
+ value = append([]byte{}, data.Value()...)
+ }
+ }
+ data.Release()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+ return r.find(key, filtered, ro, false)
+}
+
+// FindKey finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+ rkey, _, err = r.find(key, filtered, ro, true)
+ return
+}
+
+// Get gets the value for the given key. It returns errors.ErrNotFound
+// if the table does not contain the key.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ rkey, value, err := r.find(key, false, ro, false)
+ if err == nil && r.cmp.Compare(rkey, key) != 0 {
+ value = nil
+ err = ErrNotFound
+ }
+ return
+}
+
+// OffsetOf returns approximate offset for the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+ if index.Seek(key) {
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return
+ }
+ offset = int64(dataBH.offset)
+ return
+ }
+ err = index.Error()
+ if err == nil {
+ offset = r.dataEnd
+ }
+ return
+}
+
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if closer, ok := r.reader.(io.Closer); ok {
+ closer.Close()
+ }
+ if r.indexBlock != nil {
+ r.indexBlock.Release()
+ r.indexBlock = nil
+ }
+ if r.filterBlock != nil {
+ r.filterBlock.Release()
+ r.filterBlock = nil
+ }
+ r.reader = nil
+ r.cache = nil
+ r.bpool = nil
+ r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is safe for concurrent use.
+func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+ if f == nil {
+ return nil, errors.New("leveldb/table: nil file")
+ }
+
+ r := &Reader{
+ fd: fd,
+ reader: f,
+ cache: cache,
+ bpool: bpool,
+ o: o,
+ cmp: o.GetComparer(),
+ verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+ }
+
+ if size < footerLen {
+ r.err = r.newErrCorrupted(0, size, "table", "too small")
+ return r, nil
+ }
+
+ footerPos := size - footerLen
+ var footer [footerLen]byte
+ if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+ return nil, err
+ }
+ if string(footer[footerLen-len(magic):footerLen]) != magic {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+ return r, nil
+ }
+
+ var n int
+ // Decode the metaindex block handle.
+ r.metaBH, n = decodeBlockHandle(footer[:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+ return r, nil
+ }
+
+ // Decode the index block handle.
+ r.indexBH, n = decodeBlockHandle(footer[n:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+ return r, nil
+ }
+
+ // Read metaindex block.
+ metaBlock, err := r.readBlock(r.metaBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ }
+ return nil, err
+ }
+
+ // Set data end.
+ r.dataEnd = int64(r.metaBH.offset)
+
+ // Read metaindex.
+ metaIter := r.newBlockIter(metaBlock, nil, nil, true)
+ for metaIter.Next() {
+ key := string(metaIter.Key())
+ if !strings.HasPrefix(key, "filter.") {
+ continue
+ }
+ fn := key[7:]
+ if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
+ r.filter = f0
+ } else {
+ for _, f0 := range o.GetAltFilters() {
+ if f0.Name() == fn {
+ r.filter = f0
+ break
+ }
+ }
+ }
+ if r.filter != nil {
+ filterBH, n := decodeBlockHandle(metaIter.Value())
+ if n == 0 {
+ continue
+ }
+ r.filterBH = filterBH
+ // Update data end.
+ r.dataEnd = int64(filterBH.offset)
+ break
+ }
+ }
+ metaIter.Release()
+ metaBlock.Release()
+
+ // Cache index and filter block locally, since we don't have global cache.
+ if cache == nil {
+ r.indexBlock, err = r.readBlock(r.indexBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ }
+ return nil, err
+ }
+ if r.filter != nil {
+ r.filterBlock, err = r.readFilterBlock(r.filterBH)
+ if err != nil {
+ if !errors.IsCorrupted(err) {
+ return nil, err
+ }
+
+ // Don't use filter then.
+ r.filter = nil
+ }
+ }
+ }
+
+ return r, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
new file mode 100644
index 0000000..beacdc1
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -0,0 +1,177 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package table allows read and write sorted key/value.
+package table
+
+import (
+ "encoding/binary"
+)
+
+/*
+Table:
+
+Table is consist of one or more data blocks, an optional filter block
+a metaindex block, an index block and a table footer. Metaindex block
+is a special block used to keep parameters of the table, such as filter
+block name and its block handle. Index block is a special block used to
+keep record of data blocks offset and length, index block use one as
+restart interval. The key used by index block are the last key of preceding
+block, shorter separator of adjacent blocks or shorter successor of the
+last key of the last block. Filter block is an optional block contains
+sequence of filter data generated by a filter generator.
+
+Table data structure:
+ + optional
+ /
+ +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
+ | data block 1 | ... | data block n | filter block | metaindex block | index block | footer |
+ +--------------+--------------+--------------+--------------+-----------------+-------------+--------+
+
+ Each block followed by a 5-bytes trailer contains compression type and checksum.
+
+Table block trailer:
+
+ +---------------------------+-------------------+
+ | compression type (1-byte) | checksum (4-byte) |
+ +---------------------------+-------------------+
+
+ The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
+ type also included in the checksum.
+
+Table footer:
+
+ +------------------- 40-bytes -------------------+
+ / \
+ +------------------------+--------------------+------+-----------------+
+ | metaindex block handle / index block handle / ---- | magic (8-bytes) |
+ +------------------------+--------------------+------+-----------------+
+
+ The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Block:
+
+Block is consist of one or more key/value entries and a block trailer.
+Block entry shares key prefix with its preceding key until a restart
+point reached. A block should contains at least one restart point.
+First restart point are always zero.
+
+Block data structure:
+
+ + restart point + restart point (depends on restart interval)
+ / /
+ +---------------+---------------+---------------+---------------+---------+
+ | block entry 1 | block entry 2 | ... | block entry n | trailer |
+ +---------------+---------------+---------------+---------------+---------+
+
+Key/value entry:
+
+ +---- key len ----+
+ / \
+ +-------+---------+-----------+---------+--------------------+--------------+----------------+
+ | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
+ +-----------------+---------------------+--------------------+--------------+----------------+
+
+ Block entry shares key prefix with its preceding key:
+ Conditions:
+ restart_interval=2
+ entry one : key=deck,value=v1
+ entry two : key=dock,value=v2
+ entry three: key=duck,value=v3
+ The entries will be encoded as follow:
+
+ + restart point (offset=0) + restart point (offset=16)
+ / /
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" |
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ \ / \ / \ /
+ +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+
+
+ The block trailer will contains two restart points:
+
+ +------------+-----------+--------+
+ | 0 | 16 | 2 |
+ +------------+-----------+---+----+
+ \ / \
+ +-- restart points --+ + restart points length
+
+Block trailer:
+
+ +-- 4-bytes --+
+ / \
+ +-----------------+-----------------+-----------------+------------------------------+
+ | restart point 1 | .... | restart point n | restart points len (4-bytes) |
+ +-----------------+-----------------+-----------------+------------------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Filter block:
+
+Filter block consist of one or more filter data and a filter block trailer.
+The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
+
+Filter block data structure:
+
+ + offset 1 + offset 2 + offset n + trailer offset
+ / / / /
+ +---------------+---------------+---------------+---------+
+ | filter data 1 | ... | filter data n | trailer |
+ +---------------+---------------+---------------+---------+
+
+Filter block trailer:
+
+ +- 4-bytes -+
+ / \
+ +---------------+---------------+---------------+-------------------------------+------------------+
+ | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+ +-------------- +---------------+---------------+-------------------------------+------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+const (
+ blockTrailerLen = 5
+ footerLen = 48
+
+ magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
+
+ // The block type gives the per-block compression format.
+ // These constants are part of the file format and should not be changed.
+ blockTypeNoCompression = 0
+ blockTypeSnappyCompression = 1
+
+ // Generate new filter every 2KB of data
+ filterBaseLg = 11
+ filterBase = 1 << filterBaseLg
+)
+
+type blockHandle struct {
+ offset, length uint64
+}
+
+func decodeBlockHandle(src []byte) (blockHandle, int) {
+ offset, n := binary.Uvarint(src)
+ length, m := binary.Uvarint(src[n:])
+ if n == 0 || m == 0 {
+ return blockHandle{}, 0
+ }
+ return blockHandle{offset, length}, n + m
+}
+
+func encodeBlockHandle(dst []byte, b blockHandle) int {
+ n := binary.PutUvarint(dst, b.offset)
+ m := binary.PutUvarint(dst[n:], b.length)
+ return n + m
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
new file mode 100644
index 0000000..b96b271
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -0,0 +1,375 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func sharedPrefixLen(a, b []byte) int {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+type blockWriter struct {
+ restartInterval int
+ buf util.Buffer
+ nEntries int
+ prevKey []byte
+ restarts []uint32
+ scratch []byte
+}
+
+func (w *blockWriter) append(key, value []byte) {
+ nShared := 0
+ if w.nEntries%w.restartInterval == 0 {
+ w.restarts = append(w.restarts, uint32(w.buf.Len()))
+ } else {
+ nShared = sharedPrefixLen(w.prevKey, key)
+ }
+ n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
+ w.buf.Write(w.scratch[:n])
+ w.buf.Write(key[nShared:])
+ w.buf.Write(value)
+ w.prevKey = append(w.prevKey[:0], key...)
+ w.nEntries++
+}
+
+func (w *blockWriter) finish() {
+ // Write restarts entry.
+ if w.nEntries == 0 {
+ // Must have at least one restart entry.
+ w.restarts = append(w.restarts, 0)
+ }
+ w.restarts = append(w.restarts, uint32(len(w.restarts)))
+ for _, x := range w.restarts {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+}
+
+func (w *blockWriter) reset() {
+ w.buf.Reset()
+ w.nEntries = 0
+ w.restarts = w.restarts[:0]
+}
+
+func (w *blockWriter) bytesLen() int {
+ restartsLen := len(w.restarts)
+ if restartsLen == 0 {
+ restartsLen = 1
+ }
+ return w.buf.Len() + 4*restartsLen + 4
+}
+
+type filterWriter struct {
+ generator filter.FilterGenerator
+ buf util.Buffer
+ nKeys int
+ offsets []uint32
+}
+
+func (w *filterWriter) add(key []byte) {
+ if w.generator == nil {
+ return
+ }
+ w.generator.Add(key)
+ w.nKeys++
+}
+
+func (w *filterWriter) flush(offset uint64) {
+ if w.generator == nil {
+ return
+ }
+ for x := int(offset / filterBase); x > len(w.offsets); {
+ w.generate()
+ }
+}
+
+func (w *filterWriter) finish() {
+ if w.generator == nil {
+ return
+ }
+ // Generate last keys.
+
+ if w.nKeys > 0 {
+ w.generate()
+ }
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ for _, x := range w.offsets {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+ w.buf.WriteByte(filterBaseLg)
+}
+
+func (w *filterWriter) generate() {
+ // Record offset.
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ // Generate filters.
+ if w.nKeys > 0 {
+ w.generator.Generate(&w.buf)
+ w.nKeys = 0
+ }
+}
+
+// Writer is a table writer.
+type Writer struct {
+ writer io.Writer
+ err error
+ // Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ compression opt.Compression
+ blockSize int
+
+ dataBlock blockWriter
+ indexBlock blockWriter
+ filterBlock filterWriter
+ pendingBH blockHandle
+ offset uint64
+ nEntries int
+ // Scratch allocated enough for 5 uvarint. Block writer should not use
+ // first 20-bytes since it will be used to encode block handle, which
+ // then passed to the block writer itself.
+ scratch [50]byte
+ comparerScratch []byte
+ compressionScratch []byte
+}
+
+func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
+ // Compress the buffer if necessary.
+ var b []byte
+ if compression == opt.SnappyCompression {
+ // Allocate scratch enough for compression and block trailer.
+ if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
+ w.compressionScratch = make([]byte, n)
+ }
+ compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
+ n := len(compressed)
+ b = compressed[:n+blockTrailerLen]
+ b[n] = blockTypeSnappyCompression
+ } else {
+ tmp := buf.Alloc(blockTrailerLen)
+ tmp[0] = blockTypeNoCompression
+ b = buf.Bytes()
+ }
+
+ // Calculate the checksum.
+ n := len(b) - 4
+ checksum := util.NewCRC(b[:n]).Value()
+ binary.LittleEndian.PutUint32(b[n:], checksum)
+
+ // Write the buffer to the file.
+ _, err = w.writer.Write(b)
+ if err != nil {
+ return
+ }
+ bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
+ w.offset += uint64(len(b))
+ return
+}
+
+func (w *Writer) flushPendingBH(key []byte) {
+ if w.pendingBH.length == 0 {
+ return
+ }
+ var separator []byte
+ if len(key) == 0 {
+ separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
+ } else {
+ separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
+ }
+ if separator == nil {
+ separator = w.dataBlock.prevKey
+ } else {
+ w.comparerScratch = separator
+ }
+ n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
+ // Append the block handle to the index block.
+ w.indexBlock.append(separator, w.scratch[:n])
+ // Reset prev key of the data block.
+ w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
+ // Clear pending block handle.
+ w.pendingBH = blockHandle{}
+}
+
+func (w *Writer) finishBlock() error {
+ w.dataBlock.finish()
+ bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ return err
+ }
+ w.pendingBH = bh
+ // Reset the data block.
+ w.dataBlock.reset()
+ // Flush the filter block.
+ w.filterBlock.flush(w.offset)
+ return nil
+}
+
+// Append appends key/value pair to the table. The keys passed must
+// be in increasing order.
+//
+// It is safe to modify the contents of the arguments after Append returns.
+func (w *Writer) Append(key, value []byte) error {
+ if w.err != nil {
+ return w.err
+ }
+ if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
+ w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
+ return w.err
+ }
+
+ w.flushPendingBH(key)
+ // Append key/value pair to the data block.
+ w.dataBlock.append(key, value)
+ // Add key to the filter block.
+ w.filterBlock.add(key)
+
+ // Finish the data block if block size target reached.
+ if w.dataBlock.bytesLen() >= w.blockSize {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.nEntries++
+ return nil
+}
+
+// BlocksLen returns number of blocks written so far.
+func (w *Writer) BlocksLen() int {
+ n := w.indexBlock.nEntries
+ if w.pendingBH.length > 0 {
+ // Includes the pending block.
+ n++
+ }
+ return n
+}
+
+// EntriesLen returns number of entries added so far.
+func (w *Writer) EntriesLen() int {
+ return w.nEntries
+}
+
+// BytesLen returns number of bytes written so far.
+func (w *Writer) BytesLen() int {
+ return int(w.offset)
+}
+
+// Close will finalize the table. Calling Append is not possible
+// after Close, but calling BlocksLen, EntriesLen and BytesLen
+// is still possible.
+func (w *Writer) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+
+ // Write the last data block. Or empty data block if there
+ // aren't any data blocks at all.
+ if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.flushPendingBH(nil)
+
+ // Write the filter block.
+ var filterBH blockHandle
+ w.filterBlock.finish()
+ if buf := &w.filterBlock.buf; buf.Len() > 0 {
+ filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
+ if w.err != nil {
+ return w.err
+ }
+ }
+
+ // Write the metaindex block.
+ if filterBH.length > 0 {
+ key := []byte("filter." + w.filter.Name())
+ n := encodeBlockHandle(w.scratch[:20], filterBH)
+ w.dataBlock.append(key, w.scratch[:n])
+ }
+ w.dataBlock.finish()
+ metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the index block.
+ w.indexBlock.finish()
+ indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the table footer.
+ footer := w.scratch[:footerLen]
+ for i := range footer {
+ footer[i] = 0
+ }
+ n := encodeBlockHandle(footer, metaindexBH)
+ encodeBlockHandle(footer[n:], indexBH)
+ copy(footer[footerLen-len(magic):], magic)
+ if _, err := w.writer.Write(footer); err != nil {
+ w.err = err
+ return w.err
+ }
+ w.offset += footerLen
+
+ w.err = errors.New("leveldb/table: writer is closed")
+ return nil
+}
+
+// NewWriter creates a new initialized table writer for the file.
+//
+// Table writer is not safe for concurrent use.
+func NewWriter(f io.Writer, o *opt.Options) *Writer {
+ w := &Writer{
+ writer: f,
+ cmp: o.GetComparer(),
+ filter: o.GetFilter(),
+ compression: o.GetCompression(),
+ blockSize: o.GetBlockSize(),
+ comparerScratch: make([]byte, 0),
+ }
+ // data block
+ w.dataBlock.restartInterval = o.GetBlockRestartInterval()
+ // The first 20-bytes are used for encoding block handle.
+ w.dataBlock.scratch = w.scratch[20:]
+ // index block
+ w.indexBlock.restartInterval = 1
+ w.indexBlock.scratch = w.scratch[20:]
+ // filter block
+ if w.filter != nil {
+ w.filterBlock.generator = w.filter.NewGenerator()
+ w.filterBlock.flush(0)
+ }
+ return w
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
new file mode 100644
index 0000000..0e2b519
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+func shorten(str string) string {
+ if len(str) <= 8 {
+ return str
+ }
+ return str[:3] + ".." + str[len(str)-3:]
+}
+
+var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"}
+
+func shortenb(bytes int) string {
+ i := 0
+ for ; bytes > 1024 && i < 4; i++ {
+ bytes /= 1024
+ }
+ return fmt.Sprintf("%d%sB", bytes, bunits[i])
+}
+
+func sshortenb(bytes int) string {
+ if bytes == 0 {
+ return "~"
+ }
+ sign := "+"
+ if bytes < 0 {
+ sign = "-"
+ bytes *= -1
+ }
+ i := 0
+ for ; bytes > 1024 && i < 4; i++ {
+ bytes /= 1024
+ }
+ return fmt.Sprintf("%s%d%sB", sign, bytes, bunits[i])
+}
+
+func sint(x int) string {
+ if x == 0 {
+ return "~"
+ }
+ sign := "+"
+ if x < 0 {
+ sign = "-"
+ x *= -1
+ }
+ return fmt.Sprintf("%s%d", sign, x)
+}
+
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+type fdSorter []storage.FileDesc
+
+func (p fdSorter) Len() int {
+ return len(p)
+}
+
+func (p fdSorter) Less(i, j int) bool {
+ return p[i].Num < p[j].Num
+}
+
+func (p fdSorter) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+func sortFds(fds []storage.FileDesc) {
+ sort.Sort(fdSorter(fds))
+}
+
+func ensureBuffer(b []byte, n int) []byte {
+ if cap(b) < n {
+ return make([]byte, n)
+ }
+ return b[:n]
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
new file mode 100644
index 0000000..21de242
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer.go
@@ -0,0 +1,293 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package util
+
+// This a copy of Go std bytes.Buffer with some modification
+// and some features stripped.
+
+import (
+ "bytes"
+ "io"
+)
+
+// A Buffer is a variable-sized buffer of bytes with Read and Write methods.
+// The zero value for Buffer is an empty buffer ready to use.
+type Buffer struct {
+ buf []byte // contents are the bytes buf[off : len(buf)]
+ off int // read at &buf[off], write at &buf[len(buf)]
+ bootstrap [64]byte // memory to hold first slice; helps small buffers (Printf) avoid allocation.
+}
+
+// Bytes returns a slice of the contents of the unread portion of the buffer;
+// len(b.Bytes()) == b.Len(). If the caller changes the contents of the
+// returned slice, the contents of the buffer will change provided there
+// are no intervening method calls on the Buffer.
+func (b *Buffer) Bytes() []byte { return b.buf[b.off:] }
+
+// String returns the contents of the unread portion of the buffer
+// as a string. If the Buffer is a nil pointer, it returns "".
+func (b *Buffer) String() string {
+ if b == nil {
+ // Special case, useful in debugging.
+ return ""
+ }
+ return string(b.buf[b.off:])
+}
+
+// Len returns the number of bytes of the unread portion of the buffer;
+// b.Len() == len(b.Bytes()).
+func (b *Buffer) Len() int { return len(b.buf) - b.off }
+
+// Truncate discards all but the first n unread bytes from the buffer.
+// It panics if n is negative or greater than the length of the buffer.
+func (b *Buffer) Truncate(n int) {
+ switch {
+ case n < 0 || n > b.Len():
+ panic("leveldb/util.Buffer: truncation out of range")
+ case n == 0:
+ // Reuse buffer space.
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+n]
+}
+
+// Reset resets the buffer so it has no content.
+// b.Reset() is the same as b.Truncate(0).
+func (b *Buffer) Reset() { b.Truncate(0) }
+
+// grow grows the buffer to guarantee space for n more bytes.
+// It returns the index where bytes should be written.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) grow(n int) int {
+ m := b.Len()
+ // If buffer is empty, reset to recover space.
+ if m == 0 && b.off != 0 {
+ b.Truncate(0)
+ }
+ if len(b.buf)+n > cap(b.buf) {
+ var buf []byte
+ if b.buf == nil && n <= len(b.bootstrap) {
+ buf = b.bootstrap[0:]
+ } else if m+n <= cap(b.buf)/2 {
+ // We can slide things down instead of allocating a new
+ // slice. We only need m+n <= cap(b.buf) to slide, but
+ // we instead let capacity get twice as large so we
+ // don't spend all our time copying.
+ copy(b.buf[:], b.buf[b.off:])
+ buf = b.buf[:m]
+ } else {
+ // not enough space anywhere
+ buf = makeSlice(2*cap(b.buf) + n)
+ copy(buf, b.buf[b.off:])
+ }
+ b.buf = buf
+ b.off = 0
+ }
+ b.buf = b.buf[0 : b.off+m+n]
+ return b.off + m
+}
+
+// Alloc allocs n bytes of slice from the buffer, growing the buffer as
+// needed. If n is negative, Alloc will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Alloc(n int) []byte {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Alloc: negative count")
+ }
+ m := b.grow(n)
+ return b.buf[m:]
+}
+
+// Grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After Grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+// If n is negative, Grow will panic.
+// If the buffer can't grow it will panic with bytes.ErrTooLarge.
+func (b *Buffer) Grow(n int) {
+ if n < 0 {
+ panic("leveldb/util.Buffer.Grow: negative count")
+ }
+ m := b.grow(n)
+ b.buf = b.buf[0:m]
+}
+
+// Write appends the contents of p to the buffer, growing the buffer as
+// needed. The return value n is the length of p; err is always nil. If the
+// buffer becomes too large, Write will panic with bytes.ErrTooLarge.
+func (b *Buffer) Write(p []byte) (n int, err error) {
+ m := b.grow(len(p))
+ return copy(b.buf[m:], p), nil
+}
+
+// MinRead is the minimum slice size passed to a Read call by
+// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond
+// what is required to hold the contents of r, ReadFrom will not grow the
+// underlying buffer.
+const MinRead = 512
+
+// ReadFrom reads data from r until EOF and appends it to the buffer, growing
+// the buffer as needed. The return value n is the number of bytes read. Any
+// error except io.EOF encountered during the read is also returned. If the
+// buffer becomes too large, ReadFrom will panic with bytes.ErrTooLarge.
+func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) {
+ // If buffer is empty, reset to recover space.
+ if b.off >= len(b.buf) {
+ b.Truncate(0)
+ }
+ for {
+ if free := cap(b.buf) - len(b.buf); free < MinRead {
+ // not enough space at end
+ newBuf := b.buf
+ if b.off+free < MinRead {
+ // not enough space using beginning of buffer;
+ // double buffer capacity
+ newBuf = makeSlice(2*cap(b.buf) + MinRead)
+ }
+ copy(newBuf, b.buf[b.off:])
+ b.buf = newBuf[:len(b.buf)-b.off]
+ b.off = 0
+ }
+ m, e := r.Read(b.buf[len(b.buf):cap(b.buf)])
+ b.buf = b.buf[0 : len(b.buf)+m]
+ n += int64(m)
+ if e == io.EOF {
+ break
+ }
+ if e != nil {
+ return n, e
+ }
+ }
+ return n, nil // err is EOF, so return nil explicitly
+}
+
+// makeSlice allocates a slice of size n. If the allocation fails, it panics
+// with bytes.ErrTooLarge.
+func makeSlice(n int) []byte {
+ // If the make fails, give a known error.
+ defer func() {
+ if recover() != nil {
+ panic(bytes.ErrTooLarge)
+ }
+ }()
+ return make([]byte, n)
+}
+
+// WriteTo writes data to w until the buffer is drained or an error occurs.
+// The return value n is the number of bytes written; it always fits into an
+// int, but it is int64 to match the io.WriterTo interface. Any error
+// encountered during the write is also returned.
+func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) {
+ if b.off < len(b.buf) {
+ nBytes := b.Len()
+ m, e := w.Write(b.buf[b.off:])
+ if m > nBytes {
+ panic("leveldb/util.Buffer.WriteTo: invalid Write count")
+ }
+ b.off += m
+ n = int64(m)
+ if e != nil {
+ return n, e
+ }
+ // all bytes should have been written, by definition of
+ // Write method in io.Writer
+ if m != nBytes {
+ return n, io.ErrShortWrite
+ }
+ }
+ // Buffer is now empty; reset.
+ b.Truncate(0)
+ return
+}
+
+// WriteByte appends the byte c to the buffer, growing the buffer as needed.
+// The returned error is always nil, but is included to match bufio.Writer's
+// WriteByte. If the buffer becomes too large, WriteByte will panic with
+// bytes.ErrTooLarge.
+func (b *Buffer) WriteByte(c byte) error {
+ m := b.grow(1)
+ b.buf[m] = c
+ return nil
+}
+
+// Read reads the next len(p) bytes from the buffer or until the buffer
+// is drained. The return value n is the number of bytes read. If the
+// buffer has no data to return, err is io.EOF (unless len(p) is zero);
+// otherwise it is nil.
+func (b *Buffer) Read(p []byte) (n int, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ if len(p) == 0 {
+ return
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, b.buf[b.off:])
+ b.off += n
+ return
+}
+
+// Next returns a slice containing the next n bytes from the buffer,
+// advancing the buffer as if the bytes had been returned by Read.
+// If there are fewer than n bytes in the buffer, Next returns the entire buffer.
+// The slice is only valid until the next call to a read or write method.
+func (b *Buffer) Next(n int) []byte {
+ m := b.Len()
+ if n > m {
+ n = m
+ }
+ data := b.buf[b.off : b.off+n]
+ b.off += n
+ return data
+}
+
+// ReadByte reads and returns the next byte from the buffer.
+// If no byte is available, it returns error io.EOF.
+func (b *Buffer) ReadByte() (c byte, err error) {
+ if b.off >= len(b.buf) {
+ // Buffer is empty, reset to recover space.
+ b.Truncate(0)
+ return 0, io.EOF
+ }
+ c = b.buf[b.off]
+ b.off++
+ return c, nil
+}
+
+// ReadBytes reads until the first occurrence of delim in the input,
+// returning a slice containing the data up to and including the delimiter.
+// If ReadBytes encounters an error before finding a delimiter,
+// it returns the data read before the error and the error itself (often io.EOF).
+// ReadBytes returns err != nil if and only if the returned data does not end in
+// delim.
+func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) {
+ slice, err := b.readSlice(delim)
+ // return a copy of slice. The buffer's backing array may
+ // be overwritten by later calls.
+ line = append(line, slice...)
+ return
+}
+
+// readSlice is like ReadBytes but returns a reference to internal buffer data.
+func (b *Buffer) readSlice(delim byte) (line []byte, err error) {
+ i := bytes.IndexByte(b.buf[b.off:], delim)
+ end := b.off + i + 1
+ if i < 0 {
+ end = len(b.buf)
+ err = io.EOF
+ }
+ line = b.buf[b.off:end]
+ b.off = end
+ return line, err
+}
+
+// NewBuffer creates and initializes a new Buffer using buf as its initial
+// contents. It is intended to prepare a Buffer to read existing data. It
+// can also be used to size the internal buffer for writing. To do that,
+// buf should have the desired capacity but a length of zero.
+//
+// In most cases, new(Buffer) (or just declaring a Buffer variable) is
+// sufficient to initialize a Buffer.
+func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
new file mode 100644
index 0000000..2f3db97
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_pool.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type buffer struct {
+ b []byte
+ miss int
+}
+
+// BufferPool is a 'buffer pool'.
+type BufferPool struct {
+ pool [6]chan []byte
+ size [5]uint32
+ sizeMiss [5]uint32
+ sizeHalf [5]uint32
+ baseline [4]int
+ baseline0 int
+
+ mu sync.RWMutex
+ closed bool
+ closeC chan struct{}
+
+ get uint32
+ put uint32
+ half uint32
+ less uint32
+ equal uint32
+ greater uint32
+ miss uint32
+}
+
+func (p *BufferPool) poolNum(n int) int {
+ if n <= p.baseline0 && n > p.baseline0/2 {
+ return 0
+ }
+ for i, x := range p.baseline {
+ if n <= x {
+ return i + 1
+ }
+ }
+ return len(p.baseline) + 1
+}
+
+// Get returns buffer with length of n.
+func (p *BufferPool) Get(n int) []byte {
+ if p == nil {
+ return make([]byte, n)
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return make([]byte, n)
+ }
+
+ atomic.AddUint32(&p.get, 1)
+
+ poolNum := p.poolNum(n)
+ pool := p.pool[poolNum]
+ if poolNum == 0 {
+ // Fast path.
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ select {
+ case pool <- b:
+ default:
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ return make([]byte, n, p.baseline0)
+ } else {
+ sizePtr := &p.size[poolNum-1]
+
+ select {
+ case b := <-pool:
+ switch {
+ case cap(b) > n:
+ if cap(b)-n >= n {
+ atomic.AddUint32(&p.half, 1)
+ sizeHalfPtr := &p.sizeHalf[poolNum-1]
+ if atomic.AddUint32(sizeHalfPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(cap(b)/2))
+ atomic.StoreUint32(sizeHalfPtr, 0)
+ } else {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ return make([]byte, n)
+ } else {
+ atomic.AddUint32(&p.less, 1)
+ return b[:n]
+ }
+ case cap(b) == n:
+ atomic.AddUint32(&p.equal, 1)
+ return b[:n]
+ default:
+ atomic.AddUint32(&p.greater, 1)
+ if uint32(cap(b)) >= atomic.LoadUint32(sizePtr) {
+ select {
+ case pool <- b:
+ default:
+ }
+ }
+ }
+ default:
+ atomic.AddUint32(&p.miss, 1)
+ }
+
+ if size := atomic.LoadUint32(sizePtr); uint32(n) > size {
+ if size == 0 {
+ atomic.CompareAndSwapUint32(sizePtr, 0, uint32(n))
+ } else {
+ sizeMissPtr := &p.sizeMiss[poolNum-1]
+ if atomic.AddUint32(sizeMissPtr, 1) == 20 {
+ atomic.StoreUint32(sizePtr, uint32(n))
+ atomic.StoreUint32(sizeMissPtr, 0)
+ }
+ }
+ return make([]byte, n)
+ } else {
+ return make([]byte, n, size)
+ }
+ }
+}
+
+// Put adds given buffer to the pool.
+func (p *BufferPool) Put(b []byte) {
+ if p == nil {
+ return
+ }
+
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+
+ if p.closed {
+ return
+ }
+
+ atomic.AddUint32(&p.put, 1)
+
+ pool := p.pool[p.poolNum(cap(b))]
+ select {
+ case pool <- b:
+ default:
+ }
+
+}
+
+func (p *BufferPool) Close() {
+ if p == nil {
+ return
+ }
+
+ p.mu.Lock()
+ if !p.closed {
+ p.closed = true
+ p.closeC <- struct{}{}
+ }
+ p.mu.Unlock()
+}
+
+func (p *BufferPool) String() string {
+ if p == nil {
+ return ""
+ }
+
+ return fmt.Sprintf("BufferPool{B·%d Z·%v Zm·%v Zh·%v G·%d P·%d H·%d <·%d =·%d >·%d M·%d}",
+ p.baseline0, p.size, p.sizeMiss, p.sizeHalf, p.get, p.put, p.half, p.less, p.equal, p.greater, p.miss)
+}
+
+func (p *BufferPool) drain() {
+ ticker := time.NewTicker(2 * time.Second)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ for _, ch := range p.pool {
+ select {
+ case <-ch:
+ default:
+ }
+ }
+ case <-p.closeC:
+ close(p.closeC)
+ for _, ch := range p.pool {
+ close(ch)
+ }
+ return
+ }
+ }
+}
+
+// NewBufferPool creates a new initialized 'buffer pool'.
+func NewBufferPool(baseline int) *BufferPool {
+ if baseline <= 0 {
+ panic("baseline can't be <= 0")
+ }
+ p := &BufferPool{
+ baseline0: baseline,
+ baseline: [...]int{baseline / 4, baseline / 2, baseline * 2, baseline * 4},
+ closeC: make(chan struct{}, 1),
+ }
+ for i, cap := range []int{2, 2, 4, 4, 2, 1} {
+ p.pool[i] = make(chan []byte, cap)
+ }
+ go p.drain()
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
new file mode 100644
index 0000000..631c9d6
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/crc32.go
@@ -0,0 +1,30 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "hash/crc32"
+)
+
+var table = crc32.MakeTable(crc32.Castagnoli)
+
+// CRC is a CRC-32 checksum computed using Castagnoli's polynomial.
+type CRC uint32
+
+// NewCRC creates a new crc based on the given bytes.
+func NewCRC(b []byte) CRC {
+ return CRC(0).Update(b)
+}
+
+// Update updates the crc with the given bytes.
+func (c CRC) Update(b []byte) CRC {
+ return CRC(crc32.Update(uint32(c), table, b))
+}
+
+// Value returns a masked crc.
+func (c CRC) Value() uint32 {
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
new file mode 100644
index 0000000..7f3fa4e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+import (
+ "encoding/binary"
+)
+
+// Hash return hash of the given data.
+func Hash(data []byte, seed uint32) uint32 {
+ // Similar to murmur hash
+ const (
+ m = uint32(0xc6a4a793)
+ r = uint32(24)
+ )
+ var (
+ h = seed ^ (uint32(len(data)) * m)
+ i int
+ )
+
+ for n := len(data) - len(data)%4; i < n; i += 4 {
+ h += binary.LittleEndian.Uint32(data[i:])
+ h *= m
+ h ^= (h >> 16)
+ }
+
+ switch len(data) - i {
+ default:
+ panic("not reached")
+ case 3:
+ h += uint32(data[i+2]) << 16
+ fallthrough
+ case 2:
+ h += uint32(data[i+1]) << 8
+ fallthrough
+ case 1:
+ h += uint32(data[i])
+ h *= m
+ h ^= (h >> r)
+ case 0:
+ }
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
new file mode 100644
index 0000000..8515958
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/range.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package util
+
+// Range is a key range.
+type Range struct {
+ // Start of the key range, include in the range.
+ Start []byte
+
+ // Limit of the key range, not include in the range.
+ Limit []byte
+}
+
+// BytesPrefix returns key range that satisfy the given prefix.
+// This only applicable for the standard 'bytes comparer'.
+func BytesPrefix(prefix []byte) *Range {
+ var limit []byte
+ for i := len(prefix) - 1; i >= 0; i-- {
+ c := prefix[i]
+ if c < 0xff {
+ limit = make([]byte, i+1)
+ copy(limit, prefix)
+ limit[i] = c + 1
+ break
+ }
+ }
+ return &Range{prefix, limit}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
new file mode 100644
index 0000000..80614af
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/util.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package util provides utilities used throughout leveldb.
+package util
+
+import (
+ "errors"
+)
+
+var (
+ ErrReleased = errors.New("leveldb: resource already relesed")
+ ErrHasReleaser = errors.New("leveldb: releaser already defined")
+)
+
+// Releaser is the interface that wraps the basic Release method.
+type Releaser interface {
+ // Release releases associated resources. Release should always success
+ // and can be called multiple times without causing error.
+ Release()
+}
+
+// ReleaseSetter is the interface that wraps the basic SetReleaser method.
+type ReleaseSetter interface {
+ // SetReleaser associates the given releaser to the resources. The
+ // releaser will be called once coresponding resources released.
+ // Calling SetReleaser with nil will clear the releaser.
+ //
+ // This will panic if a releaser already present or coresponding
+ // resource is already released. Releaser should be cleared first
+ // before assigned a new one.
+ SetReleaser(releaser Releaser)
+}
+
+// BasicReleaser provides basic implementation of Releaser and ReleaseSetter.
+type BasicReleaser struct {
+ releaser Releaser
+ released bool
+}
+
+// Released returns whether Release method already called.
+func (r *BasicReleaser) Released() bool {
+ return r.released
+}
+
+// Release implements Releaser.Release.
+func (r *BasicReleaser) Release() {
+ if !r.released {
+ if r.releaser != nil {
+ r.releaser.Release()
+ r.releaser = nil
+ }
+ r.released = true
+ }
+}
+
+// SetReleaser implements ReleaseSetter.SetReleaser.
+func (r *BasicReleaser) SetReleaser(releaser Releaser) {
+ if r.released {
+ panic(ErrReleased)
+ }
+ if r.releaser != nil && releaser != nil {
+ panic(ErrHasReleaser)
+ }
+ r.releaser = releaser
+}
+
+type NoopReleaser struct{}
+
+func (NoopReleaser) Release() {}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version.go b/vendor/github.com/syndtr/goleveldb/leveldb/version.go
new file mode 100644
index 0000000..73f272a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/version.go
@@ -0,0 +1,528 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type tSet struct {
+ level int
+ table *tFile
+}
+
+type version struct {
+ s *session
+
+ levels []tFiles
+
+ // Level that should be compacted next and its compaction score.
+ // Score < 1 means compaction is not strictly needed. These fields
+ // are initialized by computeCompaction()
+ cLevel int
+ cScore float64
+
+ cSeek unsafe.Pointer
+
+ closing bool
+ ref int
+ released bool
+}
+
+func newVersion(s *session) *version {
+ return &version{s: s}
+}
+
+func (v *version) incref() {
+ if v.released {
+ panic("already released")
+ }
+
+ v.ref++
+ if v.ref == 1 {
+ // Incr file ref.
+ for _, tt := range v.levels {
+ for _, t := range tt {
+ v.s.addFileRef(t.fd, 1)
+ }
+ }
+ }
+}
+
+func (v *version) releaseNB() {
+ v.ref--
+ if v.ref > 0 {
+ return
+ } else if v.ref < 0 {
+ panic("negative version ref")
+ }
+
+ for _, tt := range v.levels {
+ for _, t := range tt {
+ if v.s.addFileRef(t.fd, -1) == 0 {
+ v.s.tops.remove(t)
+ }
+ }
+ }
+
+ v.released = true
+}
+
+func (v *version) release() {
+ v.s.vmu.Lock()
+ v.releaseNB()
+ v.s.vmu.Unlock()
+}
+
+func (v *version) walkOverlapping(aux tFiles, ikey internalKey, f func(level int, t *tFile) bool, lf func(level int) bool) {
+ ukey := ikey.ukey()
+
+ // Aux level.
+ if aux != nil {
+ for _, t := range aux {
+ if t.overlaps(v.s.icmp, ukey, ukey) {
+ if !f(-1, t) {
+ return
+ }
+ }
+ }
+
+ if lf != nil && !lf(-1) {
+ return
+ }
+ }
+
+ // Walk tables level-by-level.
+ for level, tables := range v.levels {
+ if len(tables) == 0 {
+ continue
+ }
+
+ if level == 0 {
+ // Level-0 files may overlap each other. Find all files that
+ // overlap ukey.
+ for _, t := range tables {
+ if t.overlaps(v.s.icmp, ukey, ukey) {
+ if !f(level, t) {
+ return
+ }
+ }
+ }
+ } else {
+ if i := tables.searchMax(v.s.icmp, ikey); i < len(tables) {
+ t := tables[i]
+ if v.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ if !f(level, t) {
+ return
+ }
+ }
+ }
+ }
+
+ if lf != nil && !lf(level) {
+ return
+ }
+ }
+}
+
+func (v *version) get(aux tFiles, ikey internalKey, ro *opt.ReadOptions, noValue bool) (value []byte, tcomp bool, err error) {
+ if v.closing {
+ return nil, false, ErrClosed
+ }
+
+ ukey := ikey.ukey()
+
+ var (
+ tset *tSet
+ tseek bool
+
+ // Level-0.
+ zfound bool
+ zseq uint64
+ zkt keyType
+ zval []byte
+ )
+
+ err = ErrNotFound
+
+ // Since entries never hop across level, finding key/value
+ // in smaller level make later levels irrelevant.
+ v.walkOverlapping(aux, ikey, func(level int, t *tFile) bool {
+ if level >= 0 && !tseek {
+ if tset == nil {
+ tset = &tSet{level, t}
+ } else {
+ tseek = true
+ }
+ }
+
+ var (
+ fikey, fval []byte
+ ferr error
+ )
+ if noValue {
+ fikey, ferr = v.s.tops.findKey(t, ikey, ro)
+ } else {
+ fikey, fval, ferr = v.s.tops.find(t, ikey, ro)
+ }
+
+ switch ferr {
+ case nil:
+ case ErrNotFound:
+ return true
+ default:
+ err = ferr
+ return false
+ }
+
+ if fukey, fseq, fkt, fkerr := parseInternalKey(fikey); fkerr == nil {
+ if v.s.icmp.uCompare(ukey, fukey) == 0 {
+ // Level <= 0 may overlaps each-other.
+ if level <= 0 {
+ if fseq >= zseq {
+ zfound = true
+ zseq = fseq
+ zkt = fkt
+ zval = fval
+ }
+ } else {
+ switch fkt {
+ case keyTypeVal:
+ value = fval
+ err = nil
+ case keyTypeDel:
+ default:
+ panic("leveldb: invalid internalKey type")
+ }
+ return false
+ }
+ }
+ } else {
+ err = fkerr
+ return false
+ }
+
+ return true
+ }, func(level int) bool {
+ if zfound {
+ switch zkt {
+ case keyTypeVal:
+ value = zval
+ err = nil
+ case keyTypeDel:
+ default:
+ panic("leveldb: invalid internalKey type")
+ }
+ return false
+ }
+
+ return true
+ })
+
+ if tseek && tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+
+ return
+}
+
+func (v *version) sampleSeek(ikey internalKey) (tcomp bool) {
+ var tset *tSet
+
+ v.walkOverlapping(nil, ikey, func(level int, t *tFile) bool {
+ if tset == nil {
+ tset = &tSet{level, t}
+ return true
+ }
+ if tset.table.consumeSeek() <= 0 {
+ tcomp = atomic.CompareAndSwapPointer(&v.cSeek, nil, unsafe.Pointer(tset))
+ }
+ return false
+ }, nil)
+
+ return
+}
+
+func (v *version) getIterators(slice *util.Range, ro *opt.ReadOptions) (its []iterator.Iterator) {
+ strict := opt.GetStrict(v.s.o.Options, ro, opt.StrictReader)
+ for level, tables := range v.levels {
+ if level == 0 {
+ // Merge all level zero files together since they may overlap.
+ for _, t := range tables {
+ its = append(its, v.s.tops.newIterator(t, slice, ro))
+ }
+ } else if len(tables) != 0 {
+ its = append(its, iterator.NewIndexedIterator(tables.newIndexIterator(v.s.tops, v.s.icmp, slice, ro), strict))
+ }
+ }
+ return
+}
+
+func (v *version) newStaging() *versionStaging {
+ return &versionStaging{base: v}
+}
+
+// Spawn a new version based on this version.
+func (v *version) spawn(r *sessionRecord) *version {
+ staging := v.newStaging()
+ staging.commit(r)
+ return staging.finish()
+}
+
+func (v *version) fillRecord(r *sessionRecord) {
+ for level, tables := range v.levels {
+ for _, t := range tables {
+ r.addTableFile(level, t)
+ }
+ }
+}
+
+func (v *version) tLen(level int) int {
+ if level < len(v.levels) {
+ return len(v.levels[level])
+ }
+ return 0
+}
+
+func (v *version) offsetOf(ikey internalKey) (n int64, err error) {
+ for level, tables := range v.levels {
+ for _, t := range tables {
+ if v.s.icmp.Compare(t.imax, ikey) <= 0 {
+ // Entire file is before "ikey", so just add the file size
+ n += t.size
+ } else if v.s.icmp.Compare(t.imin, ikey) > 0 {
+ // Entire file is after "ikey", so ignore
+ if level > 0 {
+ // Files other than level 0 are sorted by meta->min, so
+ // no further files in this level will contain data for
+ // "ikey".
+ break
+ }
+ } else {
+ // "ikey" falls in the range for this table. Add the
+ // approximate offset of "ikey" within the table.
+ if m, err := v.s.tops.offsetOf(t, ikey); err == nil {
+ n += m
+ } else {
+ return 0, err
+ }
+ }
+ }
+ }
+
+ return
+}
+
+func (v *version) pickMemdbLevel(umin, umax []byte, maxLevel int) (level int) {
+ if maxLevel > 0 {
+ if len(v.levels) == 0 {
+ return maxLevel
+ }
+ if !v.levels[0].overlaps(v.s.icmp, umin, umax, true) {
+ var overlaps tFiles
+ for ; level < maxLevel; level++ {
+ if pLevel := level + 1; pLevel >= len(v.levels) {
+ return maxLevel
+ } else if v.levels[pLevel].overlaps(v.s.icmp, umin, umax, false) {
+ break
+ }
+ if gpLevel := level + 2; gpLevel < len(v.levels) {
+ overlaps = v.levels[gpLevel].getOverlaps(overlaps, v.s.icmp, umin, umax, false)
+ if overlaps.size() > int64(v.s.o.GetCompactionGPOverlaps(level)) {
+ break
+ }
+ }
+ }
+ }
+ }
+ return
+}
+
+func (v *version) computeCompaction() {
+ // Precomputed best level for next compaction
+ bestLevel := int(-1)
+ bestScore := float64(-1)
+
+ statFiles := make([]int, len(v.levels))
+ statSizes := make([]string, len(v.levels))
+ statScore := make([]string, len(v.levels))
+ statTotSize := int64(0)
+
+ for level, tables := range v.levels {
+ var score float64
+ size := tables.size()
+ if level == 0 {
+ // We treat level-0 specially by bounding the number of files
+ // instead of number of bytes for two reasons:
+ //
+ // (1) With larger write-buffer sizes, it is nice not to do too
+ // many level-0 compaction.
+ //
+ // (2) The files in level-0 are merged on every read and
+ // therefore we wish to avoid too many files when the individual
+ // file size is small (perhaps because of a small write-buffer
+ // setting, or very high compression ratios, or lots of
+ // overwrites/deletions).
+ score = float64(len(tables)) / float64(v.s.o.GetCompactionL0Trigger())
+ } else {
+ score = float64(size) / float64(v.s.o.GetCompactionTotalSize(level))
+ }
+
+ if score > bestScore {
+ bestLevel = level
+ bestScore = score
+ }
+
+ statFiles[level] = len(tables)
+ statSizes[level] = shortenb(int(size))
+ statScore[level] = fmt.Sprintf("%.2f", score)
+ statTotSize += size
+ }
+
+ v.cLevel = bestLevel
+ v.cScore = bestScore
+
+ v.s.logf("version@stat F·%v S·%s%v Sc·%v", statFiles, shortenb(int(statTotSize)), statSizes, statScore)
+}
+
+func (v *version) needCompaction() bool {
+ return v.cScore >= 1 || atomic.LoadPointer(&v.cSeek) != nil
+}
+
+type tablesScratch struct {
+ added map[int64]atRecord
+ deleted map[int64]struct{}
+}
+
+type versionStaging struct {
+ base *version
+ levels []tablesScratch
+}
+
+func (p *versionStaging) getScratch(level int) *tablesScratch {
+ if level >= len(p.levels) {
+ newLevels := make([]tablesScratch, level+1)
+ copy(newLevels, p.levels)
+ p.levels = newLevels
+ }
+ return &(p.levels[level])
+}
+
+func (p *versionStaging) commit(r *sessionRecord) {
+ // Deleted tables.
+ for _, r := range r.deletedTables {
+ scratch := p.getScratch(r.level)
+ if r.level < len(p.base.levels) && len(p.base.levels[r.level]) > 0 {
+ if scratch.deleted == nil {
+ scratch.deleted = make(map[int64]struct{})
+ }
+ scratch.deleted[r.num] = struct{}{}
+ }
+ if scratch.added != nil {
+ delete(scratch.added, r.num)
+ }
+ }
+
+ // New tables.
+ for _, r := range r.addedTables {
+ scratch := p.getScratch(r.level)
+ if scratch.added == nil {
+ scratch.added = make(map[int64]atRecord)
+ }
+ scratch.added[r.num] = r
+ if scratch.deleted != nil {
+ delete(scratch.deleted, r.num)
+ }
+ }
+}
+
+func (p *versionStaging) finish() *version {
+ // Build new version.
+ nv := newVersion(p.base.s)
+ numLevel := len(p.levels)
+ if len(p.base.levels) > numLevel {
+ numLevel = len(p.base.levels)
+ }
+ nv.levels = make([]tFiles, numLevel)
+ for level := 0; level < numLevel; level++ {
+ var baseTabels tFiles
+ if level < len(p.base.levels) {
+ baseTabels = p.base.levels[level]
+ }
+
+ if level < len(p.levels) {
+ scratch := p.levels[level]
+
+ var nt tFiles
+ // Prealloc list if possible.
+ if n := len(baseTabels) + len(scratch.added) - len(scratch.deleted); n > 0 {
+ nt = make(tFiles, 0, n)
+ }
+
+ // Base tables.
+ for _, t := range baseTabels {
+ if _, ok := scratch.deleted[t.fd.Num]; ok {
+ continue
+ }
+ if _, ok := scratch.added[t.fd.Num]; ok {
+ continue
+ }
+ nt = append(nt, t)
+ }
+
+ // New tables.
+ for _, r := range scratch.added {
+ nt = append(nt, tableFileFromRecord(r))
+ }
+
+ if len(nt) != 0 {
+ // Sort tables.
+ if level == 0 {
+ nt.sortByNum()
+ } else {
+ nt.sortByKey(p.base.s.icmp)
+ }
+
+ nv.levels[level] = nt
+ }
+ } else {
+ nv.levels[level] = baseTabels
+ }
+ }
+
+ // Trim levels.
+ n := len(nv.levels)
+ for ; n > 0 && nv.levels[n-1] == nil; n-- {
+ }
+ nv.levels = nv.levels[:n]
+
+ // Compute compaction score for new version.
+ nv.computeCompaction()
+
+ return nv
+}
+
+type versionReleaser struct {
+ v *version
+ once bool
+}
+
+func (vr *versionReleaser) Release() {
+ v := vr.v
+ v.s.vmu.Lock()
+ if !vr.once {
+ v.releaseNB()
+ vr.once = true
+ }
+ v.s.vmu.Unlock()
+}
diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS
new file mode 100644
index 0000000..2b00ddb
--- /dev/null
+++ b/vendor/golang.org/x/crypto/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS
new file mode 100644
index 0000000..1fbd3e9
--- /dev/null
+++ b/vendor/golang.org/x/crypto/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
new file mode 100644
index 0000000..c7f8c7e
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go
@@ -0,0 +1,222 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// In Go 1.13, the ed25519 package was promoted to the standard library as
+// crypto/ed25519, and this package became a wrapper for the standard library one.
+//
+// +build !go1.13
+
+// Package ed25519 implements the Ed25519 signature algorithm. See
+// https://ed25519.cr.yp.to/.
+//
+// These functions are also compatible with the “Ed25519” function defined in
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seed”.
+package ed25519
+
+// This code is a port of the public domain, “ref10” implementation of ed25519
+// from SUPERCOP.
+
+import (
+ "bytes"
+ "crypto"
+ cryptorand "crypto/rand"
+ "crypto/sha512"
+ "errors"
+ "io"
+ "strconv"
+
+ "golang.org/x/crypto/ed25519/internal/edwards25519"
+)
+
+const (
+ // PublicKeySize is the size, in bytes, of public keys as used in this package.
+ PublicKeySize = 32
+ // PrivateKeySize is the size, in bytes, of private keys as used in this package.
+ PrivateKeySize = 64
+ // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
+ SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
+)
+
+// PublicKey is the type of Ed25519 public keys.
+type PublicKey []byte
+
+// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
+type PrivateKey []byte
+
+// Public returns the PublicKey corresponding to priv.
+func (priv PrivateKey) Public() crypto.PublicKey {
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, priv[32:])
+ return PublicKey(publicKey)
+}
+
+// Seed returns the private key seed corresponding to priv. It is provided for
+// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
+// in this package.
+func (priv PrivateKey) Seed() []byte {
+ seed := make([]byte, SeedSize)
+ copy(seed, priv[:32])
+ return seed
+}
+
+// Sign signs the given message with priv.
+// Ed25519 performs two passes over messages to be signed and therefore cannot
+// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
+// indicate the message hasn't been hashed. This can be achieved by passing
+// crypto.Hash(0) as the value for opts.
+func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
+ if opts.HashFunc() != crypto.Hash(0) {
+ return nil, errors.New("ed25519: cannot sign hashed message")
+ }
+
+ return Sign(priv, message), nil
+}
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
+ if rand == nil {
+ rand = cryptorand.Reader
+ }
+
+ seed := make([]byte, SeedSize)
+ if _, err := io.ReadFull(rand, seed); err != nil {
+ return nil, nil, err
+ }
+
+ privateKey := NewKeyFromSeed(seed)
+ publicKey := make([]byte, PublicKeySize)
+ copy(publicKey, privateKey[32:])
+
+ return publicKey, privateKey, nil
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ if l := len(seed); l != SeedSize {
+ panic("ed25519: bad seed length: " + strconv.Itoa(l))
+ }
+
+ digest := sha512.Sum512(seed)
+ digest[0] &= 248
+ digest[31] &= 127
+ digest[31] |= 64
+
+ var A edwards25519.ExtendedGroupElement
+ var hBytes [32]byte
+ copy(hBytes[:], digest[:])
+ edwards25519.GeScalarMultBase(&A, &hBytes)
+ var publicKeyBytes [32]byte
+ A.ToBytes(&publicKeyBytes)
+
+ privateKey := make([]byte, PrivateKeySize)
+ copy(privateKey, seed)
+ copy(privateKey[32:], publicKeyBytes[:])
+
+ return privateKey
+}
+
+// Sign signs the message with privateKey and returns a signature. It will
+// panic if len(privateKey) is not PrivateKeySize.
+func Sign(privateKey PrivateKey, message []byte) []byte {
+ if l := len(privateKey); l != PrivateKeySize {
+ panic("ed25519: bad private key length: " + strconv.Itoa(l))
+ }
+
+ h := sha512.New()
+ h.Write(privateKey[:32])
+
+ var digest1, messageDigest, hramDigest [64]byte
+ var expandedSecretKey [32]byte
+ h.Sum(digest1[:0])
+ copy(expandedSecretKey[:], digest1[:])
+ expandedSecretKey[0] &= 248
+ expandedSecretKey[31] &= 63
+ expandedSecretKey[31] |= 64
+
+ h.Reset()
+ h.Write(digest1[32:])
+ h.Write(message)
+ h.Sum(messageDigest[:0])
+
+ var messageDigestReduced [32]byte
+ edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
+ var R edwards25519.ExtendedGroupElement
+ edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
+
+ var encodedR [32]byte
+ R.ToBytes(&encodedR)
+
+ h.Reset()
+ h.Write(encodedR[:])
+ h.Write(privateKey[32:])
+ h.Write(message)
+ h.Sum(hramDigest[:0])
+ var hramDigestReduced [32]byte
+ edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
+
+ var s [32]byte
+ edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
+
+ signature := make([]byte, SignatureSize)
+ copy(signature[:], encodedR[:])
+ copy(signature[32:], s[:])
+
+ return signature
+}
+
+// Verify reports whether sig is a valid signature of message by publicKey. It
+// will panic if len(publicKey) is not PublicKeySize.
+func Verify(publicKey PublicKey, message, sig []byte) bool {
+ if l := len(publicKey); l != PublicKeySize {
+ panic("ed25519: bad public key length: " + strconv.Itoa(l))
+ }
+
+ if len(sig) != SignatureSize || sig[63]&224 != 0 {
+ return false
+ }
+
+ var A edwards25519.ExtendedGroupElement
+ var publicKeyBytes [32]byte
+ copy(publicKeyBytes[:], publicKey)
+ if !A.FromBytes(&publicKeyBytes) {
+ return false
+ }
+ edwards25519.FeNeg(&A.X, &A.X)
+ edwards25519.FeNeg(&A.T, &A.T)
+
+ h := sha512.New()
+ h.Write(sig[:32])
+ h.Write(publicKey[:])
+ h.Write(message)
+ var digest [64]byte
+ h.Sum(digest[:0])
+
+ var hReduced [32]byte
+ edwards25519.ScReduce(&hReduced, &digest)
+
+ var R edwards25519.ProjectiveGroupElement
+ var s [32]byte
+ copy(s[:], sig[32:])
+
+ // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
+ // the range [0, order) in order to prevent signature malleability.
+ if !edwards25519.ScMinimal(&s) {
+ return false
+ }
+
+ edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
+
+ var checkR [32]byte
+ R.ToBytes(&checkR)
+ return bytes.Equal(sig[:32], checkR[:])
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
new file mode 100644
index 0000000..d1448d8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
@@ -0,0 +1,73 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+
+// Package ed25519 implements the Ed25519 signature algorithm. See
+// https://ed25519.cr.yp.to/.
+//
+// These functions are also compatible with the “Ed25519” function defined in
+// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
+// representation includes a public key suffix to make multiple signing
+// operations with the same key more efficient. This package refers to the RFC
+// 8032 private key as the “seed”.
+//
+// Beginning with Go 1.13, the functionality of this package was moved to the
+// standard library as crypto/ed25519. This package only acts as a compatibility
+// wrapper.
+package ed25519
+
+import (
+ "crypto/ed25519"
+ "io"
+)
+
+const (
+ // PublicKeySize is the size, in bytes, of public keys as used in this package.
+ PublicKeySize = 32
+ // PrivateKeySize is the size, in bytes, of private keys as used in this package.
+ PrivateKeySize = 64
+ // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
+ SignatureSize = 64
+ // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
+ SeedSize = 32
+)
+
+// PublicKey is the type of Ed25519 public keys.
+//
+// This type is an alias for crypto/ed25519's PublicKey type.
+// See the crypto/ed25519 package for the methods on this type.
+type PublicKey = ed25519.PublicKey
+
+// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
+//
+// This type is an alias for crypto/ed25519's PrivateKey type.
+// See the crypto/ed25519 package for the methods on this type.
+type PrivateKey = ed25519.PrivateKey
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
+ return ed25519.GenerateKey(rand)
+}
+
+// NewKeyFromSeed calculates a private key from a seed. It will panic if
+// len(seed) is not SeedSize. This function is provided for interoperability
+// with RFC 8032. RFC 8032's private keys correspond to seeds in this
+// package.
+func NewKeyFromSeed(seed []byte) PrivateKey {
+ return ed25519.NewKeyFromSeed(seed)
+}
+
+// Sign signs the message with privateKey and returns a signature. It will
+// panic if len(privateKey) is not PrivateKeySize.
+func Sign(privateKey PrivateKey, message []byte) []byte {
+ return ed25519.Sign(privateKey, message)
+}
+
+// Verify reports whether sig is a valid signature of message by publicKey. It
+// will panic if len(publicKey) is not PublicKeySize.
+func Verify(publicKey PublicKey, message, sig []byte) bool {
+ return ed25519.Verify(publicKey, message, sig)
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
new file mode 100644
index 0000000..e39f086
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
@@ -0,0 +1,1422 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// These values are from the public domain, “ref10” implementation of ed25519
+// from SUPERCOP.
+
+// d is a constant in the Edwards curve equation.
+var d = FieldElement{
+ -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116,
+}
+
+// d2 is 2*d.
+var d2 = FieldElement{
+ -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199,
+}
+
+// SqrtM1 is the square-root of -1 in the field.
+var SqrtM1 = FieldElement{
+ -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482,
+}
+
+// A is a constant in the Montgomery-form of curve25519.
+var A = FieldElement{
+ 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+}
+
+// bi contains precomputed multiples of the base-point. See the Ed25519 paper
+// for a discussion about how these values are used.
+var bi = [8]PreComputedGroupElement{
+ {
+ FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
+ FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
+ FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
+ },
+ {
+ FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
+ FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
+ FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
+ },
+ {
+ FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
+ FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
+ FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
+ },
+ {
+ FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
+ FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
+ FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
+ },
+ {
+ FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877},
+ FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951},
+ FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784},
+ },
+ {
+ FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436},
+ FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918},
+ FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877},
+ },
+ {
+ FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800},
+ FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305},
+ FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300},
+ },
+ {
+ FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876},
+ FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619},
+ FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683},
+ },
+}
+
+// base contains precomputed multiples of the base-point. See the Ed25519 paper
+// for a discussion about how these values are used.
+var base = [32][8]PreComputedGroupElement{
+ {
+ {
+ FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
+ FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
+ FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
+ },
+ {
+ FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303},
+ FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081},
+ FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697},
+ },
+ {
+ FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
+ FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
+ FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
+ },
+ {
+ FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540},
+ FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397},
+ FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325},
+ },
+ {
+ FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
+ FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
+ FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
+ },
+ {
+ FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777},
+ FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737},
+ FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652},
+ },
+ {
+ FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
+ FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
+ FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
+ },
+ {
+ FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726},
+ FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955},
+ FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425},
+ },
+ },
+ {
+ {
+ FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171},
+ FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510},
+ FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660},
+ },
+ {
+ FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639},
+ FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963},
+ FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950},
+ },
+ {
+ FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568},
+ FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335},
+ FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628},
+ },
+ {
+ FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007},
+ FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772},
+ FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653},
+ },
+ {
+ FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567},
+ FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686},
+ FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372},
+ },
+ {
+ FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887},
+ FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954},
+ FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953},
+ },
+ {
+ FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833},
+ FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532},
+ FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876},
+ },
+ {
+ FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268},
+ FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214},
+ FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038},
+ },
+ },
+ {
+ {
+ FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800},
+ FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645},
+ FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664},
+ },
+ {
+ FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933},
+ FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182},
+ FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222},
+ },
+ {
+ FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991},
+ FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880},
+ FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092},
+ },
+ {
+ FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295},
+ FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788},
+ FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553},
+ },
+ {
+ FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026},
+ FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347},
+ FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033},
+ },
+ {
+ FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395},
+ FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278},
+ FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890},
+ },
+ {
+ FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995},
+ FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596},
+ FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891},
+ },
+ {
+ FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060},
+ FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608},
+ FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606},
+ },
+ },
+ {
+ {
+ FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389},
+ FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016},
+ FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341},
+ },
+ {
+ FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505},
+ FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553},
+ FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655},
+ },
+ {
+ FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220},
+ FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631},
+ FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099},
+ },
+ {
+ FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556},
+ FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749},
+ FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930},
+ },
+ {
+ FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391},
+ FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253},
+ FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066},
+ },
+ {
+ FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958},
+ FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082},
+ FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383},
+ },
+ {
+ FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521},
+ FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807},
+ FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948},
+ },
+ {
+ FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134},
+ FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455},
+ FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629},
+ },
+ },
+ {
+ {
+ FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069},
+ FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746},
+ FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919},
+ },
+ {
+ FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837},
+ FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906},
+ FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771},
+ },
+ {
+ FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817},
+ FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098},
+ FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409},
+ },
+ {
+ FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504},
+ FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727},
+ FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420},
+ },
+ {
+ FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003},
+ FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605},
+ FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384},
+ },
+ {
+ FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701},
+ FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683},
+ FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708},
+ },
+ {
+ FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563},
+ FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260},
+ FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387},
+ },
+ {
+ FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672},
+ FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686},
+ FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665},
+ },
+ },
+ {
+ {
+ FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182},
+ FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277},
+ FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628},
+ },
+ {
+ FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474},
+ FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539},
+ FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822},
+ },
+ {
+ FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970},
+ FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756},
+ FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508},
+ },
+ {
+ FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683},
+ FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655},
+ FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158},
+ },
+ {
+ FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125},
+ FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839},
+ FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664},
+ },
+ {
+ FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294},
+ FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899},
+ FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070},
+ },
+ {
+ FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294},
+ FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949},
+ FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083},
+ },
+ {
+ FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420},
+ FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940},
+ FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396},
+ },
+ },
+ {
+ {
+ FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567},
+ FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127},
+ FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294},
+ },
+ {
+ FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887},
+ FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964},
+ FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195},
+ },
+ {
+ FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244},
+ FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999},
+ FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762},
+ },
+ {
+ FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274},
+ FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236},
+ FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605},
+ },
+ {
+ FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761},
+ FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884},
+ FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482},
+ },
+ {
+ FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638},
+ FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490},
+ FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170},
+ },
+ {
+ FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736},
+ FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124},
+ FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392},
+ },
+ {
+ FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029},
+ FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048},
+ FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958},
+ },
+ },
+ {
+ {
+ FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593},
+ FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071},
+ FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692},
+ },
+ {
+ FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687},
+ FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441},
+ FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001},
+ },
+ {
+ FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460},
+ FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007},
+ FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762},
+ },
+ {
+ FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005},
+ FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674},
+ FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035},
+ },
+ {
+ FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590},
+ FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957},
+ FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812},
+ },
+ {
+ FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740},
+ FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122},
+ FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158},
+ },
+ {
+ FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885},
+ FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140},
+ FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857},
+ },
+ {
+ FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155},
+ FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260},
+ FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483},
+ },
+ },
+ {
+ {
+ FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677},
+ FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815},
+ FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751},
+ },
+ {
+ FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203},
+ FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208},
+ FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230},
+ },
+ {
+ FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850},
+ FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389},
+ FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968},
+ },
+ {
+ FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689},
+ FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880},
+ FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304},
+ },
+ {
+ FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632},
+ FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412},
+ FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566},
+ },
+ {
+ FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038},
+ FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232},
+ FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943},
+ },
+ {
+ FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856},
+ FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738},
+ FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971},
+ },
+ {
+ FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718},
+ FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697},
+ FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883},
+ },
+ },
+ {
+ {
+ FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912},
+ FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358},
+ FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849},
+ },
+ {
+ FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307},
+ FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977},
+ FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335},
+ },
+ {
+ FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644},
+ FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616},
+ FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735},
+ },
+ {
+ FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099},
+ FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341},
+ FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336},
+ },
+ {
+ FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646},
+ FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425},
+ FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388},
+ },
+ {
+ FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743},
+ FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822},
+ FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462},
+ },
+ {
+ FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985},
+ FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702},
+ FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797},
+ },
+ {
+ FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293},
+ FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100},
+ FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688},
+ },
+ },
+ {
+ {
+ FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186},
+ FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610},
+ FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707},
+ },
+ {
+ FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220},
+ FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025},
+ FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044},
+ },
+ {
+ FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992},
+ FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027},
+ FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197},
+ },
+ {
+ FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901},
+ FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952},
+ FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878},
+ },
+ {
+ FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390},
+ FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730},
+ FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730},
+ },
+ {
+ FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180},
+ FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272},
+ FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715},
+ },
+ {
+ FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970},
+ FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772},
+ FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865},
+ },
+ {
+ FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750},
+ FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373},
+ FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348},
+ },
+ },
+ {
+ {
+ FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144},
+ FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195},
+ FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086},
+ },
+ {
+ FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684},
+ FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518},
+ FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233},
+ },
+ {
+ FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793},
+ FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794},
+ FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435},
+ },
+ {
+ FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921},
+ FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518},
+ FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563},
+ },
+ {
+ FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278},
+ FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024},
+ FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030},
+ },
+ {
+ FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783},
+ FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717},
+ FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844},
+ },
+ {
+ FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333},
+ FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048},
+ FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760},
+ },
+ {
+ FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760},
+ FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757},
+ FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112},
+ },
+ },
+ {
+ {
+ FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468},
+ FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184},
+ FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289},
+ },
+ {
+ FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066},
+ FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882},
+ FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226},
+ },
+ {
+ FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101},
+ FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279},
+ FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811},
+ },
+ {
+ FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709},
+ FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714},
+ FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121},
+ },
+ {
+ FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464},
+ FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847},
+ FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400},
+ },
+ {
+ FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414},
+ FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158},
+ FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045},
+ },
+ {
+ FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415},
+ FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459},
+ FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079},
+ },
+ {
+ FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412},
+ FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743},
+ FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836},
+ },
+ },
+ {
+ {
+ FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022},
+ FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429},
+ FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065},
+ },
+ {
+ FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861},
+ FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000},
+ FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101},
+ },
+ {
+ FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815},
+ FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642},
+ FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966},
+ },
+ {
+ FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574},
+ FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742},
+ FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689},
+ },
+ {
+ FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020},
+ FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772},
+ FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982},
+ },
+ {
+ FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953},
+ FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218},
+ FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265},
+ },
+ {
+ FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073},
+ FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325},
+ FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798},
+ },
+ {
+ FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870},
+ FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863},
+ FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927},
+ },
+ },
+ {
+ {
+ FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267},
+ FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663},
+ FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862},
+ },
+ {
+ FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673},
+ FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943},
+ FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020},
+ },
+ {
+ FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238},
+ FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064},
+ FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795},
+ },
+ {
+ FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052},
+ FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904},
+ FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531},
+ },
+ {
+ FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979},
+ FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841},
+ FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431},
+ },
+ {
+ FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324},
+ FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940},
+ FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320},
+ },
+ {
+ FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184},
+ FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114},
+ FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878},
+ },
+ {
+ FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784},
+ FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091},
+ FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585},
+ },
+ },
+ {
+ {
+ FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208},
+ FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864},
+ FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661},
+ },
+ {
+ FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233},
+ FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212},
+ FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525},
+ },
+ {
+ FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068},
+ FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397},
+ FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988},
+ },
+ {
+ FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889},
+ FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038},
+ FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697},
+ },
+ {
+ FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875},
+ FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905},
+ FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656},
+ },
+ {
+ FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818},
+ FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714},
+ FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203},
+ },
+ {
+ FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931},
+ FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024},
+ FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084},
+ },
+ {
+ FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204},
+ FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817},
+ FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667},
+ },
+ },
+ {
+ {
+ FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504},
+ FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768},
+ FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255},
+ },
+ {
+ FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790},
+ FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438},
+ FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333},
+ },
+ {
+ FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971},
+ FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905},
+ FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409},
+ },
+ {
+ FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409},
+ FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499},
+ FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363},
+ },
+ {
+ FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664},
+ FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324},
+ FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940},
+ },
+ {
+ FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990},
+ FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914},
+ FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290},
+ },
+ {
+ FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257},
+ FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433},
+ FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236},
+ },
+ {
+ FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045},
+ FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093},
+ FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347},
+ },
+ },
+ {
+ {
+ FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191},
+ FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507},
+ FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906},
+ },
+ {
+ FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018},
+ FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109},
+ FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926},
+ },
+ {
+ FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528},
+ FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625},
+ FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286},
+ },
+ {
+ FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033},
+ FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866},
+ FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896},
+ },
+ {
+ FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075},
+ FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347},
+ FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437},
+ },
+ {
+ FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165},
+ FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588},
+ FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193},
+ },
+ {
+ FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017},
+ FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883},
+ FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961},
+ },
+ {
+ FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043},
+ FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663},
+ FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362},
+ },
+ },
+ {
+ {
+ FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860},
+ FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466},
+ FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063},
+ },
+ {
+ FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997},
+ FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295},
+ FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369},
+ },
+ {
+ FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385},
+ FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109},
+ FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906},
+ },
+ {
+ FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424},
+ FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185},
+ FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962},
+ },
+ {
+ FieldElement{-7737563, -