Compare commits

..

11 Commits

Author SHA1 Message Date
bloved fa60f84fc9 fixed debug log (ParallelResolve)
1 year ago
bloved d84cc29392 ADDED: parallel queries to to multiple DNS servers
1 year ago
bloved bc2470e344 updated vendor dependencies and golang to 1.15
2 years ago
bloved e2a625a92e if the DNS response Rcode contains an error the cache expires after just 10 seconds.
2 years ago
bloved 2fe0b7b0c2 - added whitelist documentation
2 years ago
bloved ef9f912cf7 - added simple stats python script
2 years ago
bloved f018377b44 Merge branch 'master' into master
2 years ago
bloved d74f2a973e - BUG FIX: preserving DNS RCode in Zabov responses (SetReply(query) resets it to RcodeSuccess)
2 years ago
bloved c56789e193 - BUG FIX: IP Groups: blank timetable was not allowed; timetable & cfg con now be blank or undefined
2 years ago
bloved 9b4e4fbcb9 - optimization: blackholeip is parsed only once at startup
2 years ago
bloved b9f17b3c19 - network down is ignored if the selected configuration is local responder
2 years ago

@ -13,6 +13,9 @@ var MyZabovCDB *leveldb.DB
//MyZabovKDBs is the storage where we'll put domains to block (one for each config)
var MyZabovKDBs map[string]*leveldb.DB
//MyZabovWLDBs is the storage where we'll put domains to whitelist (one for each config)
var MyZabovWLDBs map[string]*leveldb.DB
func init() {
var err error
@ -29,6 +32,7 @@ func init() {
}
MyZabovKDBs = map[string]*leveldb.DB{}
MyZabovWLDBs = map[string]*leveldb.DB{}
}
// ZabovCreateKDB creates Kill DBs
@ -46,3 +50,19 @@ func ZabovCreateKDB(conf string) {
MyZabovKDBs[conf] = KDB
}
// ZabovCreateWLDB creates Whitelist DBs
func ZabovCreateWLDB(conf string) {
var err error
dbname := "./db/whitelist_" + conf
WLDB, err := leveldb.OpenFile(dbname, nil)
if err != nil {
fmt.Println("Cannot create whitelist db: ", err.Error())
} else {
fmt.Println("whitelist DB created:", dbname)
}
MyZabovWLDBs[conf] = WLDB
}

@ -111,14 +111,41 @@ func init() {
conf.ZabovSingleBL = confRaw["singlefilters"].(string)
conf.ZabovDoubleBL = confRaw["doublefilters"].(string)
conf.ZabovAddBL = net.ParseIP(confRaw["blackholeip"].(string))
conf.ZabovHostsFile = confRaw["hostsfile"].(string)
if confRaw["hostsfile"] != nil {
conf.ZabovHostsFile = confRaw["hostsfile"].(string)
}
if confRaw["whitelist"] != nil {
conf.ZabovWhiteList = confRaw["whitelist"].(string)
}
if confRaw["cache"] != nil {
conf.ZabovCache = confRaw["cache"].(bool)
} else {
conf.ZabovCache = true
}
conf.ZabovDNSArray = fileByLines(conf.ZabovUpDNS)
ZabovDNSArray := fileByLines(conf.ZabovUpDNS)
conf.ZabovDNSArray = []string{}
for _, value := range ZabovDNSArray {
tvalue := strings.TrimSpace(value)
if tvalue != "" && !strings.HasPrefix(tvalue, "#") {
conf.ZabovDNSArray = append(conf.ZabovDNSArray, tvalue)
}
}
if confRaw["parallelqueries"] != nil {
conf.ZabovParallelQueries = int(confRaw["parallelqueries"].(float64))
}
if conf.ZabovParallelQueries > len(conf.ZabovDNSArray) {
conf.ZabovParallelQueries = len(conf.ZabovDNSArray)
}
if conf.ZabovParallelQueries < 1 {
conf.ZabovParallelQueries = 1
}
fmt.Println("ZabovParallelQueries:", conf.ZabovParallelQueries)
ZabovConfigs[name] = &conf
}
@ -276,7 +303,8 @@ func init() {
if localresponder["responder"] != nil {
ZabovLocalResponder = localresponder["responder"].(string)
if len(ZabovLocalResponder) > 0 {
local := ZabovConfig{ZabovDNSArray: []string{ZabovLocalResponder}, references: 1}
local := ZabovConfig{ZabovDNSArray: []string{ZabovLocalResponder},
references: 1, ZabovParallelQueries: 1}
ZabovConfigs[localresponderConfigName] = &local
fmt.Println("ZabovLocalResponder:", ZabovLocalResponder)
}
@ -295,6 +323,7 @@ func init() {
delete(ZabovConfigs, name)
} else {
ZabovCreateKDB(name)
ZabovCreateWLDB(name)
}
}

@ -11,20 +11,23 @@ type killfileItem struct {
Kconfigs stringarray
}
var bChannel chan killfileItem
var bKillChannel chan killfileItem
var bWhiteListChannel chan killfileItem
func init() {
bChannel = make(chan killfileItem, 1024)
bKillChannel = make(chan killfileItem, 1024)
bWhiteListChannel = make(chan killfileItem, 1024)
fmt.Println("Initializing kill channel engine.")
go bWriteThread()
go bWriteKillThread()
go bWriteWhiteListThread()
}
func bWriteThread() {
func bWriteKillThread() {
for item := range bChannel {
for item := range bKillChannel {
alreadyInSomeDB := false
@ -56,7 +59,7 @@ func DomainKill(s, durl string, configs stringarray) {
k.Ksource = durl
k.Kconfigs = configs
bChannel <- k
bKillChannel <- k
}
@ -88,3 +91,69 @@ func domainInKillfile(domain string, config string) bool {
return has
}
func bWriteWhiteListThread() {
for item := range bWhiteListChannel {
alreadyInSomeDB := false
for _, config := range item.Kconfigs {
if !alreadyInSomeDB {
alreadyInSomeDB = domainInWhiteListfile(item.Kdomain, config)
}
writeInWhiteListfile(item.Kdomain, item.Ksource, config)
}
if !alreadyInSomeDB {
incrementStats("WL domains from "+item.Ksource, 1)
incrementStats("WL TOTAL", 1)
}
}
}
//DomainWhiteList stores a domain name inside the killfile
func DomainWhiteList(s, durl string, configs stringarray) {
if len(s) > 2 {
s = strings.ToLower(s)
var k killfileItem
k.Kdomain = s
k.Ksource = durl
k.Kconfigs = configs
bWhiteListChannel <- k
}
}
func writeInWhiteListfile(key, value string, config string) {
stK := []byte(key)
stV := []byte(value)
MyZabovWLDB := MyZabovWLDBs[config]
err := MyZabovWLDB.Put(stK, stV, nil)
if err != nil {
fmt.Println("Cannot write to Whitelist DB: ", err.Error())
}
}
func domainInWhiteListfile(domain string, config string) bool {
s := strings.ToLower(domain)
MyZabovWLDB := MyZabovWLDBs[config]
has, err := MyZabovWLDB.Has([]byte(s), nil)
if err != nil {
fmt.Println("Cannot read from Whitelist DB: ", err.Error())
}
return has
}

@ -13,7 +13,7 @@ import (
type cacheItem struct {
Query []byte
Date time.Time
ExpireDate time.Time
}
//DomainCache stores a domain name inside the cache
@ -28,7 +28,17 @@ func DomainCache(s string, resp *dns.Msg) {
if err != nil {
fmt.Println("Problems packing the response: ", err.Error())
}
domain2cache.Date = time.Now()
if resp.Rcode == dns.RcodeSuccess{
// on success stores response normally
domain2cache.ExpireDate = time.Now().Add((time.Duration(ZabovCacheTTL) * time.Hour))
}else
{
// on failure stores response for a very short time
if ZabovDebug {
fmt.Println("DomainCache(): DNS error Rcode: ", resp.Rcode, s, "cache time reduced to 10 seconds...")
}
domain2cache.ExpireDate = time.Now().Add((time.Duration(10) * time.Second))
}
err = enc.Encode(domain2cache)
@ -65,7 +75,7 @@ func GetDomainFromCache(s string) *dns.Msg {
conf, errDB = MyZabovCDB.Get([]byte(s), nil)
if errDB != nil {
fmt.Println("Cant READ DB :" , errDB.Error() )
fmt.Println("Cant READ DB:" , errDB.Error() )
return nil
}
@ -77,7 +87,10 @@ func GetDomainFromCache(s string) *dns.Msg {
return nil
}
if time.Since(record.Date) > (time.Duration(ZabovCacheTTL) * time.Hour) {
if time.Now().After(record.ExpireDate) {
if ZabovDebug {
fmt.Println("GetDomainFromCache(): entry expired:", s)
}
return nil
}

@ -63,6 +63,7 @@ Minimal config file should look like:
"doublefilters":"./urls-hosts.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local.txt",
"whitelist":"./whitelist.txt",
"cache":true
},
}
@ -84,7 +85,8 @@ configs:
- singlefilters: name of the file for blacklists following the "singlefilter" schema.(one URL per line)
- doublefilters: name of the file, for blacklists following the "doublefilter" schema.(one URL per line)
- blackholeip: IP address to return when the IP is banned. This is because you may want to avoid MX issues, mail loops on localhost, or you have a web server running on localhost
- hostsfile: path where you keep your local blacklistfile : this is in the format "singlefilter", meaning one domain per line, unlike hosts file.
- hostsfile: path where you keep your local blacklist file : this is in the format "singlefilter", meaning one domain per line, unlike hosts file.
- whitelist: path where you keep your local whitelist file : this is in the format "singlefilter", meaning one domain per line, unlike hosts file.
- cache: if set to false disable the cache for this configuration. Boolean, defaults true
Advanced configuration includes support for multiple configurations based on IP Source and timetables:
@ -138,21 +140,24 @@ Advanced configuration includes support for multiple configurations based on IP
"singlefilters":"./urls-domains.txt",
"doublefilters":"./urls-hosts.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local.txt"
"hostsfile":"./urls-local.txt",
"whitelist":"./whitelist.txt",
},
"children":{
"upstream":"./dns-upstream-safe.txt",
"singlefilters":"./urls-domains.txt",
"doublefilters":"./urls-hosts.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local.txt"
"hostsfile":"./urls-local.txt",
"whitelist":"./whitelist.txt",
},
"children_restricted":{
"upstream":"./dns-upstream-safe.txt",
"singlefilters":"./urls-domains-restricted.txt",
"doublefilters":"./urls-hosts-restricted.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local.txt"
"hostsfile":"./urls-local.txt",
"whitelist":"./whitelist.txt",
},
"tv":{
"upstream":"./dns-upstream.txt",
@ -160,6 +165,7 @@ Advanced configuration includes support for multiple configurations based on IP
"doublefilters":"",
"blackholeip":"127.0.0.1",
"hostsfile":"",
"whitelist":"",
"cache":false
}
}

@ -36,7 +36,9 @@
"singlefilters":"./urls-domains-updated.txt",
"doublefilters":"./urls-hosts-normal.txt",
"blackholeip":"127.0.0.1",
"hostsfile":"./urls-local-normal.txt"
"hostsfile":"./urls-local-normal.txt",
"whitelist":"./urls-wl-normal.txt",
"parallelqueries":4
},
"children":{
"upstream":"./dns-familyscreen.txt",

@ -225,7 +225,6 @@
212.89.128.28:53
213.133.116.14:53
213.166.247.100:53
217.243.173.82:53
217.5.182.118:53
217.7.80.40:53
217.7.81.136:53
@ -362,7 +361,6 @@
85.214.62.160:53
85.93.91.101:53
87.106.63.208:53
87.118.126.225:53
88.198.37.146:53
88.99.66.18:53
89.163.150.209:53

@ -11,6 +11,77 @@ import (
"github.com/miekg/dns"
)
func SingleResolve(query *dns.Msg, d string, lfqdn string, ch chan *dns.Msg) {
c := new(dns.Client)
c.ReadTimeout = 500 * time.Millisecond
c.WriteTimeout = 500 * time.Millisecond
in, _, err := c.Exchange(query, d)
if err != nil {
fmt.Printf("SingleResolve: Problem with DNS %s : %s\n", d, err.Error())
go incrementStats("DNS Problems "+d, 1)
ch <- nil
} else {
go incrementStats(d, 1)
Rcode := in.MsgHdr.Rcode
in.SetReply(query)
in.MsgHdr.Rcode = Rcode
in.Authoritative = true
in.Compress = true
go DomainCache(lfqdn, in)
if ZabovDebug {
log.Println("SingleResolve: OK:", d, lfqdn)
}
ch <- in
}
}
func ParallelResolve(query *dns.Msg, config string, lfqdn string) *dns.Msg {
var resCur *dns.Msg
var res *dns.Msg
ch := make(chan *dns.Msg)
dnss := oneTimeDNS(config, ZabovConfigs[config].ZabovParallelQueries)
for _, d := range dnss {
if ZabovDebug {
log.Println("ParallelResolve: running SingleResolve on:", d, lfqdn)
}
go SingleResolve(query, d, lfqdn, ch)
}
if ZabovDebug {
log.Println("ParallelResolve: wait for results...")
}
for range dnss {
resCur = <-ch
if resCur != nil {
if res == nil {
if ZabovDebug {
log.Println("ParallelResolve: got first result!")
}
res = resCur
} else if resCur.Rcode == dns.RcodeSuccess {
if ZabovDebug {
log.Println("ParallelResolve: got next result, RcodeSuccess, replacing previous...")
}
res = resCur
} else {
if ZabovDebug {
log.Println("ParallelResolve: got next result, discarding...")
}
}
if res.Rcode == dns.RcodeSuccess {
break
}
}
}
return res
}
//ForwardQuery forwards the query to the upstream server
//first server to answer wins
//accepts config name to select the UP DNS source list
@ -39,15 +110,9 @@ func ForwardQuery(query *dns.Msg, config string, nocache bool) *dns.Msg {
}
cached.Compress = true
return cached
}
}
c := new(dns.Client)
c.ReadTimeout = 500 * time.Millisecond
c.WriteTimeout = 500 * time.Millisecond
for {
// round robin with retry
@ -58,27 +123,10 @@ func ForwardQuery(query *dns.Msg, config string, nocache bool) *dns.Msg {
continue
}
d := oneTimeDNS(config)
in, _, err := c.Exchange(query, d)
if err != nil {
fmt.Printf("Problem with DNS %s : %s\n", d, err.Error())
go incrementStats("DNS Problems "+d, 1)
continue
} else {
go incrementStats(d, 1)
Rcode := in.MsgHdr.Rcode
in.SetReply(query)
in.MsgHdr.Rcode = Rcode
in.Authoritative = true
in.Compress = true
go DomainCache(lfqdn, in)
if ZabovDebug {
log.Println("ForwardQuery: OK!")
}
in := ParallelResolve(query, config, lfqdn)
if in != nil {
return in
}
}
@ -98,9 +146,12 @@ func init() {
}
func oneTimeDNS(config string) (dns string) {
func oneTimeDNS(config string, count int) (dns []string) {
rand.Seed(time.Now().Unix())
if count == 0 {
count = 1
}
rand.Seed(time.Now().UnixNano())
upl := ZabovConfigs[config].ZabovDNSArray
@ -108,16 +159,27 @@ func oneTimeDNS(config string) (dns string) {
if len(ZabovLocalResponder) > 0 {
fmt.Println("No DNS defined, fallback to local responder:", ZabovLocalResponder)
return ZabovLocalResponder
return []string{ZabovLocalResponder}
}
fmt.Println("No DNS defined, using default 127.0.0.53:53. Hope it works!")
return "127.0.0.53:53"
return []string{"127.0.0.53:53"}
}
n := rand.Intn(128*len(upl)) % len(upl)
dns = upl[n]
res := []string{}
for i := 0; i < count; i++ {
res = append(res, upl[(n+i)%len(upl)])
}
return
check := make(map[string]int)
for _, val := range res {
check[val] = 1
}
res = []string{}
for d, _ := range check {
res = append(res, d)
}
return res
}

@ -277,7 +277,7 @@ func (mydns *handler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
}
}
if domainInKillfile(fqdn, config) {
if !domainInWhiteListfile(fqdn, config) && domainInKillfile(fqdn, config) {
go incrementStats("Killed", 1)
msg.Answer = append(msg.Answer, &dns.A{

@ -1,8 +1,11 @@
module zabov
go 1.13
go 1.15
require (
github.com/miekg/dns v1.1.27
github.com/golang/snappy v0.0.4
github.com/miekg/dns v1.1.43
github.com/syndtr/goleveldb v1.0.0
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164
)

@ -1,35 +1,33 @@
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8 h1:/6y1LfuqNuQdHAm0jjtPtgRcxIxjVZgm5OTu8/QhZvk=
golang.org/x/net v0.0.0-20210916014120-12bc252f5db8/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 h1:7ZDGnxgHAMw7thfC5bEos0RDAccZKxioiWBhfIe+tvw=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=

@ -11,28 +11,29 @@ func init() {
fmt.Println("Ingesting local hosts file")
ingestLocalBlacklists()
ingestLocalWhiteLists()
}
func ingestLocalBlacklists() {
fmt.Println("ingestLocalBlacklist: collecting urls from all configs...")
_files := urlsMap{}
_HostsFiles := urlsMap{}
for config := range ZabovConfigs {
ZabovHostsFile := ZabovConfigs[config].ZabovHostsFile
if len(ZabovHostsFile) == 0 {
continue
}
configs := _files[ZabovHostsFile]
configs := _HostsFiles[ZabovHostsFile]
if configs == nil {
configs = stringarray{}
_files[ZabovHostsFile] = configs
_HostsFiles[ZabovHostsFile] = configs
}
configs = append(configs, config)
_files[ZabovHostsFile] = configs
_HostsFiles[ZabovHostsFile] = configs
}
for ZabovHostsFile, configs := range _files {
for ZabovHostsFile, configs := range _HostsFiles {
file, err := os.Open(ZabovHostsFile)
if err != nil {
fmt.Println(err.Error())
@ -57,6 +58,49 @@ func ingestLocalBlacklists() {
}
func ingestLocalWhiteLists() {
fmt.Println("ingestLocalWhiteLists: collecting urls from all configs...")
_WhiteListFiles := urlsMap{}
for config := range ZabovConfigs {
ZabovWhiteList := ZabovConfigs[config].ZabovWhiteList
if len(ZabovWhiteList) == 0 {
continue
}
configs := _WhiteListFiles[ZabovWhiteList]
if configs == nil {
configs = stringarray{}
_WhiteListFiles[ZabovWhiteList] = configs
}
configs = append(configs, config)
_WhiteListFiles[ZabovWhiteList] = configs
}
for ZabovWhiteList, configs := range _WhiteListFiles {
file, err := os.Open(ZabovWhiteList)
if err != nil {
fmt.Println(err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
d := scanner.Text()
if len(d) == 0 || strings.TrimSpace(d)[0] == '#' {
continue
}
DomainWhiteList(d, ZabovWhiteList, configs)
incrementStats("WhiteList", 1)
}
if err := scanner.Err(); err != nil {
fmt.Println(err.Error())
}
}
}
func fileByLines(filename string) (blurls []string) {
file, err := os.Open(filename)

@ -35,14 +35,16 @@ type handler struct{}
// ZabovConfig contains all Zabov configs
type ZabovConfig struct {
ZabovSingleBL string // json:singlefilters -> ZabovSingleBL list of urls returning a file with just names of domains
ZabovDoubleBL string // json:doublefilters -> ZabovDoubleBL list of urls returning a file with IP<space>domain
ZabovAddBL net.IP // json:blackholeip -> ZabovAddBL is the IP we want to send all the clients to. Usually is 127.0.0.1
ZabovHostsFile string // json:hostsfile -> ZabovHostsFile is the file we use to keep our hosts
ZabovUpDNS string // json:upstream -> ZabovUpDNS keeps the name of upstream DNSs
ZabovDNSArray []string // contains all the DNS we mention, parsed from ZabovUpDNS file
ZabovCache bool // allows to disable cache
references int // contains references to this config; if zero, config shall be removed
ZabovSingleBL string // json:singlefilters -> ZabovSingleBL list of urls returning a file with just names of domains
ZabovDoubleBL string // json:doublefilters -> ZabovDoubleBL list of urls returning a file with IP<space>domain
ZabovAddBL net.IP // json:blackholeip -> ZabovAddBL is the IP we want to send all the clients to. Usually is 127.0.0.1
ZabovHostsFile string // json:hostsfile -> ZabovHostsFile is the file we use to keep our hosts
ZabovWhiteList string // json:hostsfile -> ZabovWhiteList is the file we use to keep white listed hosts
ZabovUpDNS string // json:upstream -> ZabovUpDNS keeps the name of upstream DNSs
ZabovDNSArray []string // contains all the DNS we mention, parsed from ZabovUpDNS file
ZabovCache bool // allows to disable cache
ZabovParallelQueries int // contains max number of parallel queries to multiple DNS servers
references int // contains references to this config; if zero, config shall be removed
}
// ZabovConfigs contains all Zabov configs

@ -0,0 +1,95 @@
#!/bin/env python3
import os
import glob
import argparse
parser = argparse.ArgumentParser(description='Zabov logs analyzer')
parser.add_argument('--ip', dest="ip", metavar='IP', type=str,
help='filter by source IP (substring to match). Default: any')
parser.add_argument('--action', dest="action", metavar='action', type=str, default="killed",
help='filter action (substring to match): killed|forwarded|any. Default: killed')
parser.add_argument('--config', dest="config", metavar='name', type=str,
help='filter by config name (substring to match). Default: any')
parser.add_argument('--timetable', dest="timetable", metavar='name', type=str,
help='filter by timetable name (substring to match). Default: any')
parser.add_argument('--reqtype', dest="reqtype", metavar='TypeA', type=str, default="TypeA",
help='filter by reqtype name (substring to match): TypeA|TypeAAAA|TypeMX|...')
parser.add_argument('--domain', dest="domain", metavar='name', type=str,
help='filter by domain name (substring to match). Default: all')
parser.add_argument('--min-entries', dest="minentries", metavar='100', type=int, default=0,
help='filter output by minimum number of entries. Default: any')
parser.add_argument('--logs-path', dest="logs", metavar='path', type=str, default="./config/logs",
help='Zabov logs path')
args = parser.parse_args()
timetables = {}
configs = {}
killed = {}
for x in glob.glob(os.path.join(args.logs, "*.log")):
#print (x)
f = open(x, "r")
f.readline()
if args.reqtype:
args.reqtype = args.reqtype.lower()
if args.domain:
args.domain = args.domain.lower()
for line in f.readlines():
linel = line.strip().lower()
fields = linel.split("\t")
timetables[fields[5]] = timetables.get(fields[5], 0) +1
configs[fields[4]] = configs.get(fields[4], 0) +1
ok = all((not args.action or fields[6].find(args.action)>=0 or fields[6] == "any", \
not args.timetable or fields[5].find(args.timetable)>=0 or fields[5] == "any", \
not args.config or fields[4].find(args.config)>=0 or fields[4] == "any", \
not args.ip or fields[1].find(args.ip)>=0 or fields[1] == "any",\
not args.domain or fields[2].find(args.domain)>=0 or fields[2] == "any", \
not args.reqtype or fields[3].find(args.reqtype)>=0 ))
if ok:
killed[fields[2]] = killed.get(fields[2], 0) +1
killed_sorted = {key: value for key, value in sorted(killed.items(), key=lambda item: item[1], reverse=True)}
total_queries_filtered = 0
total_domain_filtered = 0
total_queries = 0
for k in killed_sorted.keys():
if args.minentries == 0 or killed[k] >= args.minentries:
print (k, killed[k])
total_queries_filtered += killed[k]
total_domain_filtered+=1
total_queries += killed[k]
print("")
print("TOTAL domains (filtered):", total_domain_filtered )
print("TOTAL queries (filtred):", total_queries_filtered )
print("TOTAL domains:", len(killed_sorted.keys()) )
print("TOTAL queries:", total_queries )
timetables = {key: value for key, value in sorted(timetables.items(), key=lambda item: item[0], reverse=False)}
configs = {key: value for key, value in sorted(configs.items(), key=lambda item: item[0], reverse=False)}
print("all available timetables:")
for k in timetables.keys():
print(" '%s': %d items" % (k, timetables[k], ))
print("all available configs:")
for k in configs.keys():
print(" '%s': %d items" % (k, configs[k], ))

@ -8,8 +8,11 @@
# Please keep the list sorted.
Amazon.com, Inc
Damian Gryski <dgryski@gmail.com>
Eric Buth <eric@topos.com>
Google Inc.
Jan Mercl <0xjnml@gmail.com>
Klaus Post <klauspost@gmail.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Sebastien Binet <seb.binet@gmail.com>

@ -26,9 +26,13 @@
# Please keep the list sorted.
Alex Legg <alexlegg@google.com>
Damian Gryski <dgryski@gmail.com>
Eric Buth <eric@topos.com>
Jan Mercl <0xjnml@gmail.com>
Jonathan Swinney <jswinney@amazon.com>
Kai Backman <kaib@golang.org>
Klaus Post <klauspost@gmail.com>
Marc-Antoine Ruel <maruel@chromium.org>
Nigel Tao <nigeltao@golang.org>
Rob Pike <r@golang.org>

@ -52,6 +52,8 @@ const (
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
//
// Decode handles the Snappy block format, not the Snappy stream format.
func Decode(dst, src []byte) ([]byte, error) {
dLen, s, err := decodedLen(src)
if err != nil {
@ -83,6 +85,8 @@ func NewReader(r io.Reader) *Reader {
}
// Reader is an io.Reader that can read Snappy-compressed bytes.
//
// Reader handles the Snappy stream format, not the Snappy block format.
type Reader struct {
r io.Reader
err error
@ -114,32 +118,23 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
return true
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
for {
if r.i < r.j {
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
func (r *Reader) fill() error {
for r.i >= r.j {
if !r.readFull(r.buf[:4], true) {
return 0, r.err
return r.err
}
chunkType := r.buf[0]
if !r.readHeader {
if chunkType != chunkTypeStreamIdentifier {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
r.readHeader = true
}
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
if chunkLen > len(r.buf) {
r.err = ErrUnsupported
return 0, r.err
return r.err
}
// The chunk types are specified at
@ -149,11 +144,11 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.2. Compressed data (chunk type 0x00).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
buf := r.buf[:chunkLen]
if !r.readFull(buf, false) {
return 0, r.err
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
buf = buf[checksumSize:]
@ -161,19 +156,19 @@ func (r *Reader) Read(p []byte) (int, error) {
n, err := DecodedLen(buf)
if err != nil {
r.err = err
return 0, r.err
return r.err
}
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
if _, err := Decode(r.decoded, buf); err != nil {
r.err = err
return 0, r.err
return r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
r.i, r.j = 0, n
continue
@ -182,25 +177,25 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.3. Uncompressed data (chunk type 0x01).
if chunkLen < checksumSize {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
buf := r.buf[:checksumSize]
if !r.readFull(buf, false) {
return 0, r.err
return r.err
}
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
// Read directly into r.decoded instead of via r.buf.
n := chunkLen - checksumSize
if n > len(r.decoded) {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
if !r.readFull(r.decoded[:n], false) {
return 0, r.err
return r.err
}
if crc(r.decoded[:n]) != checksum {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
r.i, r.j = 0, n
continue
@ -209,15 +204,15 @@ func (r *Reader) Read(p []byte) (int, error) {
// Section 4.1. Stream identifier (chunk type 0xff).
if chunkLen != len(magicBody) {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
if !r.readFull(r.buf[:len(magicBody)], false) {
return 0, r.err
return r.err
}
for i := 0; i < len(magicBody); i++ {
if r.buf[i] != magicBody[i] {
r.err = ErrCorrupt
return 0, r.err
return r.err
}
}
continue
@ -226,12 +221,44 @@ func (r *Reader) Read(p []byte) (int, error) {
if chunkType <= 0x7f {
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
r.err = ErrUnsupported
return 0, r.err
return r.err
}
// Section 4.4 Padding (chunk type 0xfe).
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
if !r.readFull(r.buf[:chunkLen], false) {
return 0, r.err
return r.err
}
}
return nil
}
// Read satisfies the io.Reader interface.
func (r *Reader) Read(p []byte) (int, error) {
if r.err != nil {
return 0, r.err
}
if err := r.fill(); err != nil {
return 0, err
}
n := copy(p, r.decoded[r.i:r.j])
r.i += n
return n, nil
}
// ReadByte satisfies the io.ByteReader interface.
func (r *Reader) ReadByte() (byte, error) {
if r.err != nil {
return 0, r.err
}
if err := r.fill(); err != nil {
return 0, err
}
c := r.decoded[r.i]
r.i++
return c, nil
}

@ -0,0 +1,494 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
// The asm code generally follows the pure Go code in decode_other.go, except
// where marked with a "!!!".
// func decode(dst, src []byte) int
//
// All local variables fit into registers. The non-zero stack size is only to
// spill registers and push args when issuing a CALL. The register allocation:
// - R2 scratch
// - R3 scratch
// - R4 length or x
// - R5 offset
// - R6 &src[s]
// - R7 &dst[d]
// + R8 dst_base
// + R9 dst_len
// + R10 dst_base + dst_len
// + R11 src_base
// + R12 src_len
// + R13 src_base + src_len
// - R14 used by doCopy
// - R15 used by doCopy
//
// The registers R8-R13 (marked with a "+") are set at the start of the
// function, and after a CALL returns, and are not otherwise modified.
//
// The d variable is implicitly R7 - R8, and len(dst)-d is R10 - R7.
// The s variable is implicitly R6 - R11, and len(src)-s is R13 - R6.
TEXT ·decode(SB), NOSPLIT, $56-56
// Initialize R6, R7 and R8-R13.
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R7
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R6
MOVD R11, R13
ADD R12, R13, R13
loop:
// for s < len(src)
CMP R13, R6
BEQ end
// R4 = uint32(src[s])
//
// switch src[s] & 0x03
MOVBU (R6), R4
MOVW R4, R3
ANDW $3, R3
MOVW $1, R1
CMPW R1, R3
BGE tagCopy
// ----------------------------------------
// The code below handles literal tags.
// case tagLiteral:
// x := uint32(src[s] >> 2)
// switch
MOVW $60, R1
LSRW $2, R4, R4
CMPW R4, R1
BLS tagLit60Plus
// case x < 60:
// s++
ADD $1, R6, R6
doLit:
// This is the end of the inner "switch", when we have a literal tag.
//
// We assume that R4 == x and x fits in a uint32, where x is the variable
// used in the pure Go decode_other.go code.
// length = int(x) + 1
//
// Unlike the pure Go code, we don't need to check if length <= 0 because
// R4 can hold 64 bits, so the increment cannot overflow.
ADD $1, R4, R4
// Prepare to check if copying length bytes will run past the end of dst or
// src.
//
// R2 = len(dst) - d
// R3 = len(src) - s
MOVD R10, R2
SUB R7, R2, R2
MOVD R13, R3
SUB R6, R3, R3
// !!! Try a faster technique for short (16 or fewer bytes) copies.
//
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
// goto callMemmove // Fall back on calling runtime·memmove.
// }
//
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
// against 21 instead of 16, because it cannot assume that all of its input
// is contiguous in memory and so it needs to leave enough source bytes to
// read the next tag without refilling buffers, but Go's Decode assumes
// contiguousness (the src argument is a []byte).
CMP $16, R4
BGT callMemmove
CMP $16, R2
BLT callMemmove
CMP $16, R3
BLT callMemmove
// !!! Implement the copy from src to dst as a 16-byte load and store.
// (Decode's documentation says that dst and src must not overlap.)
//
// This always copies 16 bytes, instead of only length bytes, but that's
// OK. If the input is a valid Snappy encoding then subsequent iterations
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
// non-nil error), so the overrun will be ignored.
//
// Note that on arm64, it is legal and cheap to issue unaligned 8-byte or
// 16-byte loads and stores. This technique probably wouldn't be as
// effective on architectures that are fussier about alignment.
LDP 0(R6), (R14, R15)
STP (R14, R15), 0(R7)
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
callMemmove:
// if length > len(dst)-d || length > len(src)-s { etc }
CMP R2, R4
BGT errCorrupt
CMP R3, R4
BGT errCorrupt
// copy(dst[d:], src[s:s+length])
//
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
// R7, R6 and R4 as arguments. Coincidentally, we also need to spill those
// three registers to the stack, to save local variables across the CALL.
MOVD R7, 8(RSP)
MOVD R6, 16(RSP)
MOVD R4, 24(RSP)
MOVD R7, 32(RSP)
MOVD R6, 40(RSP)
MOVD R4, 48(RSP)
CALL runtime·memmove(SB)
// Restore local variables: unspill registers from the stack and
// re-calculate R8-R13.
MOVD 32(RSP), R7
MOVD 40(RSP), R6
MOVD 48(RSP), R4
MOVD dst_base+0(FP), R8
MOVD dst_len+8(FP), R9
MOVD R8, R10
ADD R9, R10, R10
MOVD src_base+24(FP), R11
MOVD src_len+32(FP), R12
MOVD R11, R13
ADD R12, R13, R13
// d += length
// s += length
ADD R4, R7, R7
ADD R4, R6, R6
B loop
tagLit60Plus:
// !!! This fragment does the
//
// s += x - 58; if uint(s) > uint(len(src)) { etc }
//
// checks. In the asm version, we code it once instead of once per switch case.
ADD R4, R6, R6
SUB $58, R6, R6
MOVD R6, R3
SUB R11, R3, R3
CMP R12, R3
BGT errCorrupt
// case x == 60:
MOVW $61, R1
CMPW R1, R4
BEQ tagLit61
BGT tagLit62Plus
// x = uint32(src[s-1])
MOVBU -1(R6), R4
B doLit
tagLit61:
// case x == 61:
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
MOVHU -2(R6), R4
B doLit
tagLit62Plus:
CMPW $62, R4
BHI tagLit63
// case x == 62:
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
MOVHU -3(R6), R4
MOVBU -1(R6), R3
ORR R3