Initial commit

master
Uriel Fanelli 2023-02-14 11:33:38 +01:00
parent 84212e6246
commit a318691954
148 changed files with 19953 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
userdb
data

BIN
chtorr Executable file

Binary file not shown.

5
config.ini Normal file
View File

@ -0,0 +1,5 @@
[general]
baseURL = https://anticristo.ddns.net
storage = data ; can be relative or absolute path
userAgent = "Chtorr Fediverse Server"

6
doc.go Normal file
View File

@ -0,0 +1,6 @@
// chtorr project doc.go
/*
chtorr document
*/
package main

24
go.mod Normal file
View File

@ -0,0 +1,24 @@
module chtorr
go 1.18
require (
github.com/gologme/log v1.3.0
github.com/peterbourgon/diskv/v3 v3.0.1
github.com/writeas/activityserve v0.0.0-20200409150223-d7ab3eaa4481
)
require (
github.com/captncraig/cors v0.0.0-20190703115713-e80254a89df1 // indirect
github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5 // indirect
github.com/go-fed/httpsig v0.1.1-0.20200204213531-0ef28562fabe // indirect
github.com/google/btree v1.0.0 // indirect
github.com/gorilla/mux v1.7.4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/smartystreets/goconvey v1.7.2 // indirect
github.com/writeas/go-webfinger v1.1.0 // indirect
github.com/writefreely/go-nodeinfo v1.2.0 // indirect
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 // indirect
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a // indirect
gopkg.in/ini.v1 v1.55.0 // indirect
)

42
go.sum Normal file
View File

@ -0,0 +1,42 @@
github.com/captncraig/cors v0.0.0-20190703115713-e80254a89df1 h1:AFSJaASPGYNbkUa5c8ZybrcW9pP3Cy7+z5dnpcc/qG8=
github.com/captncraig/cors v0.0.0-20190703115713-e80254a89df1/go.mod h1:EIlIeMufZ8nqdUhnesledB15xLRl4wIJUppwDLPrdrQ=
github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5 h1:RAV05c0xOkJ3dZGS0JFybxFKZ2WMLabgx3uXnd7rpGs=
github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5/go.mod h1:GgB8SF9nRG+GqaDtLcwJZsQFhcogVCJ79j4EdT0c2V4=
github.com/go-fed/httpsig v0.1.1-0.20200204213531-0ef28562fabe h1:U71giCx5NjRn4Lb71UuprPHqhjxGv3Jqonb9fgcaJH8=
github.com/go-fed/httpsig v0.1.1-0.20200204213531-0ef28562fabe/go.mod h1:T56HUNYZUQ1AGUzhAYPugZfp36sKApVnGBgKlIY+aIE=
github.com/gologme/log v1.2.0/go.mod h1:gq31gQ8wEHkR+WekdWsqDuf8pXTUZA9BnnzTuPz1Y9U=
github.com/gologme/log v1.3.0 h1:l781G4dE+pbigClDSDzSaaYKtiueHCILUa/qSDsmHAo=
github.com/gologme/log v1.3.0/go.mod h1:yKT+DvIPdDdDoPtqFrFxheooyVmoqi0BAsw+erN3wA4=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU=
github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/writeas/activityserve v0.0.0-20200409150223-d7ab3eaa4481 h1:BiSivIxLQFcKoUorpNN3rNwwFG5bITPnqUSyIccfdh0=
github.com/writeas/activityserve v0.0.0-20200409150223-d7ab3eaa4481/go.mod h1:4akDJSl+sSp+QhrQKMqzAqdV1gJ1pPx6XPI77zgMM8o=
github.com/writeas/go-webfinger v1.1.0 h1:MzNyt0ry/GMsRmJGftn2o9mPwqK1Q5MLdh4VuJCfb1Q=
github.com/writeas/go-webfinger v1.1.0/go.mod h1:w2VxyRO/J5vfNjJHYVubsjUGHd3RLDoVciz0DE3ApOc=
github.com/writefreely/go-nodeinfo v1.2.0 h1:La+YbTCvmpTwFhBSlebWDDL81N88Qf/SCAvRLR7F8ss=
github.com/writefreely/go-nodeinfo v1.2.0/go.mod h1:UTvE78KpcjYOlRHupZIiSEFcXHioTXuacCbHU+CAcPg=
golang.org/x/crypto v0.0.0-20180527072434-ab813273cd59/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
gopkg.in/ini.v1 v1.55.0 h1:E8yzL5unfpW3M6fz/eB7Cb5MQAYSZ7GKo4Qth+N2sgQ=
gopkg.in/ini.v1 v1.55.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=

62
main.go Normal file
View File

@ -0,0 +1,62 @@
package main
import (
"flag"
"fmt"
"github.com/gologme/log"
"github.com/writeas/activityserve"
)
func init() {
debugFlag := flag.Bool("debug", true, "set to true to get debugging information in the console")
flag.Parse()
if *debugFlag {
log.EnableLevel("info")
log.EnableLevel("error")
} else {
log.DisableLevel("info")
}
activityserve.Setup("config.ini", *debugFlag)
UserDbInit()
}
func startActor(a activityserve.Actor) {
a.OnReceiveContent = func(activity map[string]interface{}) {
object := activity["object"].(map[string]interface{})
fmt.Println(object["content"].(string))
}
}
func main() {
var actors []activityserve.Actor
for acct := range Userdb.Keys(nil) {
acTemp, _ := activityserve.GetActor(acct, "This is an activityserver test actor", "Service")
log.Info("Created actor for user: ", acTemp.Name)
actors = append(actors, acTemp)
startActor(acTemp)
}
for _, ac := range actors {
ac.Follow("https://bbs.keinpfusch.net/uriel")
ac.CreateNote("Hello from: "+ac.Name, "")
}
// creating a default listener
instance, _ := activityserve.GetActor("inbox", "This is an activityserver test actor", "Service")
startActor(instance)
activityserve.ServeSingleActor(instance)
}

47
userdb.go Normal file
View File

@ -0,0 +1,47 @@
package main
import (
"crypto/md5"
"fmt"
"github.com/gologme/log"
"github.com/peterbourgon/diskv/v3"
)
var Userdb *diskv.Diskv
func flatTransform(s string) []string {
t := fmt.Sprintf("%x", md5.Sum([]byte(s)))
return []string{t}
}
func UserDbInit() {
// Simplest transform function: put all the data files into the base dir.
// flatTransform := func(s string) []string { return []string{} }
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
Userdb = diskv.New(diskv.Options{
BasePath: "userdb",
Transform: flatTransform,
CacheSizeMax: 1024 * 1024,
})
log.Info("Users Database Created")
if err := Userdb.WriteString("Uriel", "futura password criptata"); err == nil {
log.Info("User UserTest Created")
} else {
log.Info("Error while creating user", err.Error())
}
if err := Userdb.WriteString("Loweel", "futura password criptata"); err == nil {
log.Info("User LoweelTest Created")
} else {
log.Info("Error while creating user", err.Error())
}
log.Info("Users Created")
}

21
vendor/github.com/captncraig/cors/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 Craig Peterson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

53
vendor/github.com/captncraig/cors/README.md generated vendored Normal file
View File

@ -0,0 +1,53 @@
cors gives you easy control over Cross Origin Resource Sharing for your site.
It allows you to whitelist particular domains per route, or to simply allow all domains `*` If desired you may customize nearly every aspect of the specification.
### Syntax
```
cors [path] [domains...] {
origin [origin]
origin_regexp [regexp]
methods [methods]
allow_credentials [allowCredentials]
max_age [maxAge]
allowed_headers [allowedHeaders]
exposed_headers [exposedHeaders]
}
```
* **path** is the file or directory this applies to (default is /).
* **domains** is a space-seperated list of domains to allow. If ommitted, all domains will be granted access.
* **origin** is a domain to grant access to. May be specified multiple times or ommitted.
* **origin_regexp** is a regexp that will be matched to the `Origin` header. Access will be granted accordingly. It can be used in conjonction with the `origin` config (executed as a fallback to `origin`). May be specified multiple times or ommitted.
* **methods** is set of http methods to allow. Default is these: POST,GET,OPTIONS,PUT,DELETE.
* **allow_credentials** sets the value of the Access-Control-Allow-Credentials header. Can be true or false. By default, header will not be included.
* **max_age** is the length of time in seconds to cache preflight info. Not set by default.
* **allowed_headers** is a comma-seperated list of request headers a client may send.
* **exposed_headers** is a comma-seperated list of response headers a client may access.
### Examples
Simply allow all domains to request any path:
<code class="block"><span class="hl-directive">cors</span></code>
Protect specific paths only, and only allow a few domains:
<code class="block"><span class="hl-directive">cors</span> <span class="hl-arg">/foo http://mysite.com http://anothertrustedsite.com</span></code>
Full configuration:
```
cors / {
origin http://allowedSite.com
origin http://anotherSite.org https://anotherSite.org
origin_regexp .+\.example\.com$
methods POST,PUT
allow_credentials false
max_age 3600
allowed_headers X-Custom-Header,X-Foobar
exposed_headers X-Something-Special,SomethingElse
}
```

117
vendor/github.com/captncraig/cors/cors.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
package cors
import (
"fmt"
"net/http"
"regexp"
)
const (
allowOriginKey string = "Access-Control-Allow-Origin"
allowCredentialsKey = "Access-Control-Allow-Credentials"
allowHeadersKey = "Access-Control-Allow-Headers"
allowMethodsKey = "Access-Control-Allow-Methods"
maxAgeKey = "Access-Control-Max-Age"
originKey = "Origin"
varyKey = "Vary"
requestMethodKey = "Access-Control-Request-Method"
requestHeadersKey = "Access-Control-Request-Headers"
exposeHeadersKey = "Access-Control-Expose-Headers"
options = "OPTIONS"
)
type Config struct {
AllowedOrigins []string
OriginRegexps []*regexp.Regexp
AllowedMethods string
AllowedHeaders string
ExposedHeaders string
AllowCredentials *bool
MaxAge int
}
func Default() *Config {
return &Config{
AllowedOrigins: []string{"*"},
OriginRegexps: []*regexp.Regexp{},
AllowedMethods: "POST, GET, OPTIONS, PUT, DELETE",
AllowedHeaders: "",
ExposedHeaders: "",
MaxAge: 0,
AllowCredentials: nil,
}
}
// Read the request, setting response headers as appropriate.
// Will NOT write anything to response in any circumstances.
func (c *Config) HandleRequest(w http.ResponseWriter, r *http.Request) {
requestOrigin := r.Header.Get(originKey)
if requestOrigin == "" {
return
}
//check origin against allowed origins
for _, ao := range c.AllowedOrigins {
if ao == "*" || ao == requestOrigin {
responseOrigin := "*"
if ao != "*" {
responseOrigin = requestOrigin
}
addAllowOriginHeader(w, responseOrigin)
break
}
}
if w.Header().Get(allowOriginKey) == "" {
if c.anyOriginRegexpMatch(requestOrigin) {
addAllowOriginHeader(w, requestOrigin)
} else {
return //if we didn't set a valid allow-origin, none of the other headers matter
}
}
if IsPreflight(r) {
w.Header().Set(allowMethodsKey, c.AllowedMethods)
if c.AllowedHeaders != "" {
if c.AllowedHeaders != "*" {
w.Header().Set(allowHeadersKey, c.AllowedHeaders)
} else {
w.Header().Set(allowHeadersKey, r.Header.Get(requestHeadersKey))
}
}
if c.MaxAge > 0 {
w.Header().Set(maxAgeKey, fmt.Sprint(c.MaxAge))
}
} else {
//regular request
if c.ExposedHeaders != "" {
w.Header().Set(exposeHeadersKey, c.ExposedHeaders)
}
}
if c.AllowCredentials != nil {
w.Header().Set(allowCredentialsKey, fmt.Sprint(*c.AllowCredentials))
}
}
func IsPreflight(r *http.Request) bool {
return r.Method == options && r.Header.Get(requestMethodKey) != ""
}
func addAllowOriginHeader(w http.ResponseWriter, allowedOrigin string) {
w.Header().Set(allowOriginKey, allowedOrigin)
w.Header().Add(varyKey, originKey)
}
func (c *Config) anyOriginRegexpMatch(origin string) bool {
for _, r := range c.OriginRegexps {
if r.MatchString(origin) {
return true
}
}
return false
}

6
vendor/github.com/dchest/uniuri/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,6 @@
language: go
go:
- 1.3
- 1.4
- tip

121
vendor/github.com/dchest/uniuri/COPYING generated vendored Normal file
View File

@ -0,0 +1,121 @@
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

97
vendor/github.com/dchest/uniuri/README.md generated vendored Normal file
View File

@ -0,0 +1,97 @@
Package uniuri
=====================
[![Build Status](https://travis-ci.org/dchest/uniuri.svg)](https://travis-ci.org/dchest/uniuri)
```go
import "github.com/dchest/uniuri"
```
Package uniuri generates random strings good for use in URIs to identify
unique objects.
Example usage:
```go
s := uniuri.New() // s is now "apHCJBl7L1OmC57n"
```
A standard string created by New() is 16 bytes in length and consists of
Latin upper and lowercase letters, and numbers (from the set of 62 allowed
characters), which means that it has ~95 bits of entropy. To get more
entropy, you can use NewLen(UUIDLen), which returns 20-byte string, giving
~119 bits of entropy, or any other desired length.
Functions read from crypto/rand random source, and panic if they fail to
read from it.
Constants
---------
```go
const (
// StdLen is a standard length of uniuri string to achive ~95 bits of entropy.
StdLen = 16
// UUIDLen is a length of uniuri string to achive ~119 bits of entropy, closest
// to what can be losslessly converted to UUIDv4 (122 bits).
UUIDLen = 20
)
```
Variables
---------
```go
var StdChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
```
StdChars is a set of standard characters allowed in uniuri string.
Functions
---------
### func New
```go
func New() string
```
New returns a new random string of the standard length, consisting of
standard characters.
### func NewLen
```go
func NewLen(length int) string
```
NewLen returns a new random string of the provided length, consisting of
standard characters.
### func NewLenChars
```go
func NewLenChars(length int, chars []byte) string
```
NewLenChars returns a new random string of the provided length, consisting
of the provided byte slice of allowed characters (maximum 256).
Public domain dedication
------------------------
Written in 2011-2014 by Dmitry Chestnykh
The author(s) have dedicated all copyright and related and
neighboring rights to this software to the public domain
worldwide. Distributed without any warranty.
http://creativecommons.org/publicdomain/zero/1.0/

81
vendor/github.com/dchest/uniuri/uniuri.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
// Written in 2011-2014 by Dmitry Chestnykh
//
// The author(s) have dedicated all copyright and related and
// neighboring rights to this software to the public domain
// worldwide. Distributed without any warranty.
// http://creativecommons.org/publicdomain/zero/1.0/
// Package uniuri generates random strings good for use in URIs to identify
// unique objects.
//
// Example usage:
//
// s := uniuri.New() // s is now "apHCJBl7L1OmC57n"
//
// A standard string created by New() is 16 bytes in length and consists of
// Latin upper and lowercase letters, and numbers (from the set of 62 allowed
// characters), which means that it has ~95 bits of entropy. To get more
// entropy, you can use NewLen(UUIDLen), which returns 20-byte string, giving
// ~119 bits of entropy, or any other desired length.
//
// Functions read from crypto/rand random source, and panic if they fail to
// read from it.
package uniuri
import "crypto/rand"
const (
// StdLen is a standard length of uniuri string to achive ~95 bits of entropy.
StdLen = 16
// UUIDLen is a length of uniuri string to achive ~119 bits of entropy, closest
// to what can be losslessly converted to UUIDv4 (122 bits).
UUIDLen = 20
)
// StdChars is a set of standard characters allowed in uniuri string.
var StdChars = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
// New returns a new random string of the standard length, consisting of
// standard characters.
func New() string {
return NewLenChars(StdLen, StdChars)
}
// NewLen returns a new random string of the provided length, consisting of
// standard characters.
func NewLen(length int) string {
return NewLenChars(length, StdChars)
}
// NewLenChars returns a new random string of the provided length, consisting
// of the provided byte slice of allowed characters (maximum 256).
func NewLenChars(length int, chars []byte) string {
if length == 0 {
return ""
}
clen := len(chars)
if clen < 2 || clen > 256 {
panic("uniuri: wrong charset length for NewLenChars")
}
maxrb := 255 - (256 % clen)
b := make([]byte, length)
r := make([]byte, length+(length/4)) // storage for random bytes.
i := 0
for {
if _, err := rand.Read(r); err != nil {
panic("uniuri: error reading random bytes: " + err.Error())
}
for _, rb := range r {
c := int(rb)
if c > maxrb {
// Skip this number to avoid modulo bias.
continue
}
b[i] = chars[c%clen]
i++
if i == length {
return string(b)
}
}
}
}

29
vendor/github.com/go-fed/httpsig/LICENSE generated vendored Normal file
View File

@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2018, go-fed
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

94
vendor/github.com/go-fed/httpsig/README.md generated vendored Normal file
View File

@ -0,0 +1,94 @@
# httpsig
`go get github.com/go-fed/httpsig`
Implementation of [HTTP Signatures](https://tools.ietf.org/html/draft-cavage-http-signatures).
Supports many different combinations of MAC, HMAC signing of hash, or RSA
signing of hash schemes. Its goals are:
* Have a very simple interface for signing and validating
* Support a variety of signing algorithms and combinations
* Support setting either headers (`Authorization` or `Signature`)
* Remaining flexible with headers included in the signing string
* Support both HTTP requests and responses
* Explicitly not support known-cryptographically weak algorithms
* Support automatic signing and validating Digest headers
## How to use
`import "github.com/go-fed/httpsig"`
### Signing
Signing a request or response requires creating a new `Signer` and using it:
```
func sign(privateKey crypto.PrivateKey, pubKeyId string, r *http.Request) error {
prefs := []httpsig.Algorithm{httpsig.RSA_SHA512, httpsig.RSA_SHA256}
digestAlgorithm := DigestSha256
// The "Date" and "Digest" headers must already be set on r, as well as r.URL.
headersToSign := []string{httpsig.RequestTarget, "date", "digest"}
signer, chosenAlgo, err := httpsig.NewSigner(prefs, digestAlgorithm, headersToSign, httpsig.Signature)
if err != nil {
return err
}
// To sign the digest, we need to give the signer a copy of the body...
// ...but it is optional, no digest will be signed if given "nil"
body := ...
// If r were a http.ResponseWriter, call SignResponse instead.
return signer.SignRequest(privateKey, pubKeyId, r, body)
}
```
`Signer`s are not safe for concurrent use by goroutines, so be sure to guard
access:
```
type server struct {
signer httpsig.Signer
mu *sync.Mutex
}
func (s *server) handlerFunc(w http.ResponseWriter, r *http.Request) {
privateKey := ...
pubKeyId := ...
// Set headers and such on w
s.mu.Lock()
defer s.mu.Unlock()
// To sign the digest, we need to give the signer a copy of the response body...
// ...but it is optional, no digest will be signed if given "nil"
body := ...
err := s.signer.SignResponse(privateKey, pubKeyId, w, body)
if err != nil {
...
}
...
}
```
The `pubKeyId` will be used at verification time.
### Verifying
Verifying requires an application to use the `pubKeyId` to both retrieve the key
needed for verification as well as determine the algorithm to use. Use a
`Verifier`:
```
func verify(r *http.Request) error {
verifier, err := httpsig.NewVerifier(r)
if err != nil {
return err
}
pubKeyId := verifier.KeyId()
var algo httpsig.Algorithm = ...
var pubKey crypto.PublicKey = ...
// The verifier will verify the Digest in addition to the HTTP signature
return verifier.Verify(pubKey, algo)
}
```
`Verifier`s are not safe for concurrent use by goroutines, but since they are
constructed on a per-request or per-response basis it should not be a common
restriction.

370
vendor/github.com/go-fed/httpsig/algorithms.go generated vendored Normal file
View File

@ -0,0 +1,370 @@
package httpsig
import (
"crypto"
"crypto/hmac"
"crypto/rsa"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle" // Use should trigger great care
"errors"
"fmt"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/blake2s"
"golang.org/x/crypto/ripemd160"
"golang.org/x/crypto/sha3"
"hash"
"io"
"strings"
)
const (
hmacPrefix = "hmac"
rsaPrefix = "rsa"
md4String = "md4"
md5String = "md5"
sha1String = "sha1"
sha224String = "sha224"
sha256String = "sha256"
sha384String = "sha384"
sha512String = "sha512"
md5sha1String = "md5sha1"
ripemd160String = "ripemd160"
sha3_224String = "sha3-224"
sha3_256String = "sha3-256"
sha3_384String = "sha3-384"
sha3_512String = "sha3-512"
sha512_224String = "sha512-224"
sha512_256String = "sha512-256"
blake2s_256String = "blake2s-256"
blake2b_256String = "blake2b-256"
blake2b_384String = "blake2b-384"
blake2b_512String = "blake2b-512"
)
var blake2Algorithms = map[crypto.Hash]bool{
crypto.BLAKE2s_256: true,
crypto.BLAKE2b_256: true,
crypto.BLAKE2b_384: true,
crypto.BLAKE2b_512: true,
}
var hashToDef = map[crypto.Hash]struct {
name string
new func(key []byte) (hash.Hash, error) // Only MACers will accept a key
}{
// Which standard names these?
// The spec lists the following as a canonical reference, which is dead:
// http://www.iana.org/assignments/signature-algorithms
//
// Note that the forbidden hashes have an invalid 'new' function.
crypto.MD4: {md4String, func(key []byte) (hash.Hash, error) { return nil, nil }},
crypto.MD5: {md5String, func(key []byte) (hash.Hash, error) { return nil, nil }},
crypto.SHA1: {sha1String, func(key []byte) (hash.Hash, error) { return nil, nil }},
crypto.SHA224: {sha224String, func(key []byte) (hash.Hash, error) { return sha256.New224(), nil }},
crypto.SHA256: {sha256String, func(key []byte) (hash.Hash, error) { return sha256.New(), nil }},
crypto.SHA384: {sha384String, func(key []byte) (hash.Hash, error) { return sha512.New384(), nil }},
crypto.SHA512: {sha512String, func(key []byte) (hash.Hash, error) { return sha512.New(), nil }},
crypto.MD5SHA1: {md5sha1String, func(key []byte) (hash.Hash, error) { return nil, nil }},
crypto.RIPEMD160: {ripemd160String, func(key []byte) (hash.Hash, error) { return ripemd160.New(), nil }},
crypto.SHA3_224: {sha3_224String, func(key []byte) (hash.Hash, error) { return sha3.New224(), nil }},
crypto.SHA3_256: {sha3_256String, func(key []byte) (hash.Hash, error) { return sha3.New256(), nil }},
crypto.SHA3_384: {sha3_384String, func(key []byte) (hash.Hash, error) { return sha3.New384(), nil }},
crypto.SHA3_512: {sha3_512String, func(key []byte) (hash.Hash, error) { return sha3.New512(), nil }},
crypto.SHA512_224: {sha512_224String, func(key []byte) (hash.Hash, error) { return sha512.New512_224(), nil }},
crypto.SHA512_256: {sha512_256String, func(key []byte) (hash.Hash, error) { return sha512.New512_256(), nil }},
crypto.BLAKE2s_256: {blake2s_256String, func(key []byte) (hash.Hash, error) { return blake2s.New256(key) }},
crypto.BLAKE2b_256: {blake2b_256String, func(key []byte) (hash.Hash, error) { return blake2b.New256(key) }},
crypto.BLAKE2b_384: {blake2b_384String, func(key []byte) (hash.Hash, error) { return blake2b.New384(key) }},
crypto.BLAKE2b_512: {blake2b_512String, func(key []byte) (hash.Hash, error) { return blake2b.New512(key) }},
}
var stringToHash map[string]crypto.Hash
const (
defaultAlgorithm = RSA_SHA256
defaultAlgorithmHashing = sha256String
)
func init() {
stringToHash = make(map[string]crypto.Hash, len(hashToDef))
for k, v := range hashToDef {
stringToHash[v.name] = k
}
// This should guarantee that at runtime the defaultAlgorithm will not
// result in errors when fetching a macer or signer (see algorithms.go)
if ok, err := isAvailable(string(defaultAlgorithmHashing)); err != nil {
panic(err)
} else if !ok {
panic(fmt.Sprintf("the default httpsig algorithm is unavailable: %q", defaultAlgorithm))
}
}
func isForbiddenHash(h crypto.Hash) bool {
switch h {
// Not actually cryptographically secure
case crypto.MD4:
fallthrough
case crypto.MD5:
fallthrough
case crypto.SHA1:
fallthrough
case crypto.MD5SHA1: // shorthand for crypto/tls, not actually implemented
return true
}
// Still cryptographically secure
return false
}
// signer is an internally public type.
type signer interface {
Sign(rand io.Reader, p crypto.PrivateKey, sig []byte) ([]byte, error)
Verify(pub crypto.PublicKey, toHash, signature []byte) error
String() string
}
// macer is an internally public type.
type macer interface {
Sign(sig, key []byte) ([]byte, error)
Equal(sig, actualMAC, key []byte) (bool, error)
String() string
}
var _ macer = &hmacAlgorithm{}
type hmacAlgorithm struct {
fn func(key []byte) (hash.Hash, error)
kind crypto.Hash
}
func (h *hmacAlgorithm) Sign(sig, key []byte) ([]byte, error) {
hs, err := h.fn(key)
if err = setSig(hs, sig); err != nil {
return nil, err
}
return hs.Sum(nil), nil
}
func (h *hmacAlgorithm) Equal(sig, actualMAC, key []byte) (bool, error) {
hs, err := h.fn(key)
if err != nil {
return false, err
}
defer hs.Reset()
err = setSig(hs, sig)
if err != nil {
return false, err
}
expected := hs.Sum(nil)
return hmac.Equal(actualMAC, expected), nil
}
func (h *hmacAlgorithm) String() string {
return fmt.Sprintf("%s-%s", hmacPrefix, hashToDef[h.kind].name)
}
var _ signer = &rsaAlgorithm{}
type rsaAlgorithm struct {
hash.Hash
kind crypto.Hash
}
func (r *rsaAlgorithm) setSig(b []byte) error {
n, err := r.Write(b)
if err != nil {
r.Reset()
return err
} else if n != len(b) {
r.Reset()
return fmt.Errorf("could only write %d of %d bytes of signature to hash", n, len(b))
}
return nil
}
func (r *rsaAlgorithm) Sign(rand io.Reader, p crypto.PrivateKey, sig []byte) ([]byte, error) {
defer r.Reset()
if err := r.setSig(sig); err != nil {
return nil, err
}
rsaK, ok := p.(*rsa.PrivateKey)
if !ok {
return nil, errors.New("crypto.PrivateKey is not *rsa.PrivateKey")
}
return rsa.SignPKCS1v15(rand, rsaK, r.kind, r.Sum(nil))
}
func (r *rsaAlgorithm) Verify(pub crypto.PublicKey, toHash, signature []byte) error {
defer r.Reset()
rsaK, ok := pub.(*rsa.PublicKey)
if !ok {
return errors.New("crypto.PublicKey is not *rsa.PublicKey")
}
if err := r.setSig(toHash); err != nil {
return err
}
return rsa.VerifyPKCS1v15(rsaK, r.kind, r.Sum(nil), signature)
}
func (r *rsaAlgorithm) String() string {
return fmt.Sprintf("%s-%s", rsaPrefix, hashToDef[r.kind].name)
}
var _ macer = &blakeMacAlgorithm{}
type blakeMacAlgorithm struct {
fn func(key []byte) (hash.Hash, error)
kind crypto.Hash
}
func (r *blakeMacAlgorithm) Sign(sig, key []byte) ([]byte, error) {
hs, err := r.fn(key)
if err != nil {
return nil, err
}
if err = setSig(hs, sig); err != nil {
return nil, err
}
return hs.Sum(nil), nil
}
func (r *blakeMacAlgorithm) Equal(sig, actualMAC, key []byte) (bool, error) {
hs, err := r.fn(key)
if err != nil {
return false, err
}
defer hs.Reset()
err = setSig(hs, sig)
if err != nil {
return false, err
}
expected := hs.Sum(nil)
return subtle.ConstantTimeCompare(actualMAC, expected) == 1, nil
}
func (r *blakeMacAlgorithm) String() string {
return fmt.Sprintf("%s", hashToDef[r.kind].name)
}
func setSig(a hash.Hash, b []byte) error {
n, err := a.Write(b)
if err != nil {
a.Reset()
return err
} else if n != len(b) {
a.Reset()
return fmt.Errorf("could only write %d of %d bytes of signature to hash", n, len(b))
}
return nil
}
// IsSupportedHttpSigAlgorithm returns true if the string is supported by this
// library, is not a hash known to be weak, and is supported by the hardware.
func IsSupportedHttpSigAlgorithm(algo string) bool {
a, err := isAvailable(algo)
return a && err == nil
}
// isAvailable is an internally public function
func isAvailable(algo string) (bool, error) {
c, ok := stringToHash[algo]
if !ok {
return false, fmt.Errorf("no match for %q", algo)
}
if isForbiddenHash(c) {
return false, fmt.Errorf("forbidden hash type in %q", algo)
}
return c.Available(), nil
}
func newAlgorithmConstructor(algo string) (fn func(k []byte) (hash.Hash, error), c crypto.Hash, e error) {
ok := false
c, ok = stringToHash[algo]
if !ok {
e = fmt.Errorf("no match for %q", algo)
return
}
if isForbiddenHash(c) {
e = fmt.Errorf("forbidden hash type in %q", algo)
return
}
algoDef, ok := hashToDef[c]
if !ok {
e = fmt.Errorf("have crypto.Hash %v but no definition", c)
return
}
fn = func(key []byte) (hash.Hash, error) {
h, err := algoDef.new(key)
if err != nil {
return nil, err
}
return h, nil
}
return
}
func newAlgorithm(algo string, key []byte) (hash.Hash, crypto.Hash, error) {
fn, c, err := newAlgorithmConstructor(algo)
if err != nil {
return nil, c, err
}
h, err := fn(key)
return h, c, err
}
// signerFromString is an internally public method constructor
func signerFromString(s string) (signer, error) {
s = strings.ToLower(s)
if !strings.HasPrefix(s, rsaPrefix) {
return nil, fmt.Errorf("no signer matching %q", s)
}
algo := strings.TrimPrefix(s, rsaPrefix+"-")
hash, cHash, err := newAlgorithm(algo, nil)
if err != nil {
return nil, err
}
return &rsaAlgorithm{
Hash: hash,
kind: cHash,
}, nil
}
// macerFromString is an internally public method constructor
func macerFromString(s string) (macer, error) {
s = strings.ToLower(s)
if strings.HasPrefix(s, hmacPrefix) {
algo := strings.TrimPrefix(s, hmacPrefix+"-")
hashFn, cHash, err := newAlgorithmConstructor(algo)
if err != nil {
return nil, err
}
// Ensure below does not panic
_, err = hashFn(nil)
if err != nil {
return nil, err
}
return &hmacAlgorithm{
fn: func(key []byte) (hash.Hash, error) {
return hmac.New(func() hash.Hash {
h, e := hashFn(nil)
if e != nil {
panic(e)
}
return h
}, key), nil
},
kind: cHash,
}, nil
} else if bl, ok := stringToHash[s]; ok && blake2Algorithms[bl] {
hashFn, cHash, err := newAlgorithmConstructor(s)
if err != nil {
return nil, err
}
return &blakeMacAlgorithm{
fn: hashFn,
kind: cHash,
}, nil
} else {
return nil, fmt.Errorf("no MACer matching %q", s)
}
}

120
vendor/github.com/go-fed/httpsig/digest.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package httpsig
import (
"bytes"
"crypto"
"encoding/base64"
"fmt"
"hash"
"net/http"
"strings"
)
type DigestAlgorithm string
const (
DigestSha256 DigestAlgorithm = "SHA-256"
DigestSha512 = "SHA-512"
)
var digestToDef = map[DigestAlgorithm]crypto.Hash{
DigestSha256: crypto.SHA256,
DigestSha512: crypto.SHA512,
}
// IsSupportedDigestAlgorithm returns true if hte string is supported by this
// library, is not a hash known to be weak, and is supported by the hardware.
func IsSupportedDigestAlgorithm(algo string) bool {
uc := DigestAlgorithm(strings.ToUpper(algo))
c, ok := digestToDef[uc]
return ok && c.Available()
}
func getHash(alg DigestAlgorithm) (h hash.Hash, toUse DigestAlgorithm, err error) {
upper := DigestAlgorithm(strings.ToUpper(string(alg)))
c, ok := digestToDef[upper]
if !ok {
err = fmt.Errorf("unknown or unsupported Digest algorithm: %s", alg)
} else if !c.Available() {
err = fmt.Errorf("unavailable Digest algorithm: %s", alg)
} else {
h = c.New()
toUse = upper
}
return
}
const (
digestHeader = "Digest"
digestDelim = "="
)
func addDigest(r *http.Request, algo DigestAlgorithm, b []byte) (err error) {
_, ok := r.Header[digestHeader]
if ok {
err = fmt.Errorf("cannot add Digest: Digest is already set")
return
}
var h hash.Hash
var a DigestAlgorithm
h, a, err = getHash(algo)
if err != nil {
return
}
h.Write(b)
sum := h.Sum(nil)
r.Header.Add(digestHeader,
fmt.Sprintf("%s%s%s",
a,
digestDelim,
base64.StdEncoding.EncodeToString(sum[:])))
return
}
func addDigestResponse(r http.ResponseWriter, algo DigestAlgorithm, b []byte) (err error) {
_, ok := r.Header()[digestHeader]
if ok {
err = fmt.Errorf("cannot add Digest: Digest is already set")
return
}
var h hash.Hash
var a DigestAlgorithm
h, a, err = getHash(algo)
if err != nil {
return
}
h.Write(b)
sum := h.Sum(nil)
r.Header().Add(digestHeader,
fmt.Sprintf("%s%s%s",
a,
digestDelim,
base64.StdEncoding.EncodeToString(sum[:])))
return
}
func verifyDigest(r *http.Request, body *bytes.Buffer) (err error) {
d := r.Header.Get(digestHeader)
if len(d) == 0 {
err = fmt.Errorf("cannot verify Digest: request has no Digest header")
return
}
elem := strings.SplitN(d, digestDelim, 2)
if len(elem) != 2 {
err = fmt.Errorf("cannot verify Digest: malformed Digest: %s", d)
return
}
var h hash.Hash
h, _, err = getHash(DigestAlgorithm(elem[0]))
if err != nil {
return
}
h.Write(body.Bytes())
sum := h.Sum(nil)
encSum := base64.StdEncoding.EncodeToString(sum[:])
if encSum != elem[1] {
err = fmt.Errorf("cannot verify Digest: header Digest does not match the digest of the request body")
return
}
return
}

2
vendor/github.com/go-fed/httpsig/go.modverify generated vendored Normal file
View File

@ -0,0 +1,2 @@
golang.org/x/crypto v0.0.0-20180527072434-ab813273cd59 h1:hk3yo72LXLapY9EXVttc3Z1rLOxT9IuAPPX3GpY2+jo=
golang.org/x/sys v0.0.0-20180525142821-c11f84a56e43 h1:PvnWIWTbA7gsEBkKjt0HV9hckYfcqYv8s/ju7ArZ0do=

241
vendor/github.com/go-fed/httpsig/httpsig.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
// Implements HTTP request and response signing and verification. Supports the
// major MAC and asymmetric key signature algorithms. It has several safety
// restrictions: One, none of the widely known non-cryptographically safe
// algorithms are permitted; Two, the RSA SHA256 algorithms must be available in
// the binary (and it should, barring export restrictions); Finally, the library
// assumes either the 'Authorizationn' or 'Signature' headers are to be set (but
// not both).
package httpsig
import (
"crypto"
"fmt"
"net/http"
)
// Algorithm specifies a cryptography secure algorithm for signing HTTP requests
// and responses.
type Algorithm string
const (
// MAC-based algoirthms.
HMAC_SHA224 Algorithm = hmacPrefix + "-" + sha224String
HMAC_SHA256 Algorithm = hmacPrefix + "-" + sha256String
HMAC_SHA384 Algorithm = hmacPrefix + "-" + sha384String
HMAC_SHA512 Algorithm = hmacPrefix + "-" + sha512String
HMAC_RIPEMD160 Algorithm = hmacPrefix + "-" + ripemd160String
HMAC_SHA3_224 Algorithm = hmacPrefix + "-" + sha3_224String
HMAC_SHA3_256 Algorithm = hmacPrefix + "-" + sha3_256String
HMAC_SHA3_384 Algorithm = hmacPrefix + "-" + sha3_384String
HMAC_SHA3_512 Algorithm = hmacPrefix + "-" + sha3_512String
HMAC_SHA512_224 Algorithm = hmacPrefix + "-" + sha512_224String
HMAC_SHA512_256 Algorithm = hmacPrefix + "-" + sha512_256String
HMAC_BLAKE2S_256 Algorithm = hmacPrefix + "-" + blake2s_256String
HMAC_BLAKE2B_256 Algorithm = hmacPrefix + "-" + blake2b_256String
HMAC_BLAKE2B_384 Algorithm = hmacPrefix + "-" + blake2b_384String
HMAC_BLAKE2B_512 Algorithm = hmacPrefix + "-" + blake2b_512String
BLAKE2S_256 Algorithm = blake2s_256String
BLAKE2B_256 Algorithm = blake2b_256String
BLAKE2B_384 Algorithm = blake2b_384String
BLAKE2B_512 Algorithm = blake2b_512String
// RSA-based algorithms.
RSA_SHA224 Algorithm = rsaPrefix + "-" + sha224String
// RSA_SHA256 is the default algorithm.
RSA_SHA256 Algorithm = rsaPrefix + "-" + sha256String
RSA_SHA384 Algorithm = rsaPrefix + "-" + sha384String
RSA_SHA512 Algorithm = rsaPrefix + "-" + sha512String
RSA_RIPEMD160 Algorithm = rsaPrefix + "-" + ripemd160String
// Just because you can glue things together, doesn't mean they will
// work. The following options are not supported.
rsa_SHA3_224 Algorithm = rsaPrefix + "-" + sha3_224String
rsa_SHA3_256 Algorithm = rsaPrefix + "-" + sha3_256String
rsa_SHA3_384 Algorithm = rsaPrefix + "-" + sha3_384String
rsa_SHA3_512 Algorithm = rsaPrefix + "-" + sha3_512String
rsa_SHA512_224 Algorithm = rsaPrefix + "-" + sha512_224String
rsa_SHA512_256 Algorithm = rsaPrefix + "-" + sha512_256String
rsa_BLAKE2S_256 Algorithm = rsaPrefix + "-" + blake2s_256String
rsa_BLAKE2B_256 Algorithm = rsaPrefix + "-" + blake2b_256String
rsa_BLAKE2B_384 Algorithm = rsaPrefix + "-" + blake2b_384String
rsa_BLAKE2B_512 Algorithm = rsaPrefix + "-" + blake2b_512String
)
// HTTP Signatures can be applied to different HTTP headers, depending on the
// expected application behavior.
type SignatureScheme string
const (
// Signature will place the HTTP Signature into the 'Signature' HTTP
// header.
Signature SignatureScheme = "Signature"
// Authorization will place the HTTP Signature into the 'Authorization'
// HTTP header.
Authorization SignatureScheme = "Authorization"
)
const (
// The HTTP Signatures specification uses the "Signature" auth-scheme
// for the Authorization header. This is coincidentally named, but not
// semantically the same, as the "Signature" HTTP header value.
signatureAuthScheme = "Signature"
)
// There are subtle differences to the values in the header. The Authorization
// header has an 'auth-scheme' value that must be prefixed to the rest of the
// key and values.
func (s SignatureScheme) authScheme() string {
switch s {
case Authorization:
return signatureAuthScheme
default:
return ""
}
}
// Signers will sign HTTP requests or responses based on the algorithms and
// headers selected at creation time.
//
// Signers are not safe to use between multiple goroutines.
//
// Note that signatures do set the deprecated 'algorithm' parameter for
// backwards compatibility.
type Signer interface {
// SignRequest signs the request using a private key. The public key id
// is used by the HTTP server to identify which key to use to verify the
// signature.
//
// If the Signer was created using a MAC based algorithm, then the key
// is expected to be of type []byte. If the Signer was created using an
// RSA based algorithm, then the private key is expected to be of type
// *rsa.PrivateKey.
//
// A Digest (RFC 3230) will be added to the request. The body provided
// must match the body used in the request, and is allowed to be nil.
// The Digest ensures the request body is not tampered with in flight,
// and if the signer is created to also sign the "Digest" header, the
// HTTP Signature will then ensure both the Digest and body are not both
// modified to maliciously represent different content.
SignRequest(pKey crypto.PrivateKey, pubKeyId string, r *http.Request, body []byte) error
// SignResponse signs the response using a private key. The public key
// id is used by the HTTP client to identify which key to use to verify
// the signature.
//
// If the Signer was created using a MAC based algorithm, then the key
// is expected to be of type []byte. If the Signer was created using an
// RSA based algorithm, then the private key is expected to be of type
// *rsa.PrivateKey.
//
// A Digest (RFC 3230) will be added to the response. The body provided
// must match the body written in the response, and is allowed to be
// nil. The Digest ensures the response body is not tampered with in
// flight, and if the signer is created to also sign the "Digest"
// header, the HTTP Signature will then ensure both the Digest and body
// are not both modified to maliciously represent different content.
SignResponse(pKey crypto.PrivateKey, pubKeyId string, r http.ResponseWriter, body []byte) error
}
// NewSigner creates a new Signer with the provided algorithm preferences to
// make HTTP signatures. Only the first available algorithm will be used, which
// is returned by this function along with the Signer. If none of the preferred
// algorithms were available, then the default algorithm is used. The headers
// specified will be included into the HTTP signatures.
//
// The Digest will also be calculated on a request's body using the provided
// digest algorithm, if "Digest" is one of the headers listed.
//
// The provided scheme determines which header is populated with the HTTP
// Signature.
//
// An error is returned if an unknown or a known cryptographically insecure
// Algorithm is provided.
func NewSigner(prefs []Algorithm, dAlgo DigestAlgorithm, headers []string, scheme SignatureScheme) (Signer, Algorithm, error) {
for _, pref := range prefs {
s, err := newSigner(pref, dAlgo, headers, scheme)
if err != nil {
continue
}
return s, pref, err
}
s, err := newSigner(defaultAlgorithm, dAlgo, headers, scheme)
return s, defaultAlgorithm, err
}
// Verifier verifies HTTP Signatures.
//
// It will determine which of the supported headers has the parameters
// that define the signature.
//
// Verifiers are not safe to use between multiple goroutines.
//
// Note that verification ignores the deprecated 'algorithm' parameter.
type Verifier interface {
// KeyId gets the public key id that the signature is signed with.
//
// Note that the application is expected to determine the algorithm
// used based on metadata or out-of-band information for this key id.
KeyId() string
// Verify accepts the public key specified by KeyId and returns an
// error if verification fails or if the signature is malformed. The
// algorithm must be the one used to create the signature in order to
// pass verification. The algorithm is determined based on metadata or
// out-of-band information for the key id.
//
// If the signature was created using a MAC based algorithm, then the
// key is expected to be of type []byte. If the signature was created
// using an RSA based algorithm, then the public key is expected to be
// of type *rsa.PublicKey.
Verify(pKey crypto.PublicKey, algo Algorithm) error
}
const (
// host is treated specially because golang may not include it in the
// request header map on the server side of a request.
hostHeader = "Host"
)
// NewVerifier verifies the given request. It returns an error if the HTTP
// Signature parameters are not present in any headers, are present in more than
// one header, are malformed, or are missing required parameters. It ignores
// unknown HTTP Signature parameters.
func NewVerifier(r *http.Request) (Verifier, error) {
h := r.Header
if _, hasHostHeader := h[hostHeader]; len(r.Host) > 0 && !hasHostHeader {
h[hostHeader] = []string{r.Host}
}
return newVerifier(h, func(h http.Header, toInclude []string) (string, error) {
return signatureString(h, toInclude, addRequestTarget(r))
})
}
// NewResponseVerifier verifies the given response. It returns errors under the
// same conditions as NewVerifier.
func NewResponseVerifier(r *http.Response) (Verifier, error) {
return newVerifier(r.Header, func(h http.Header, toInclude []string) (string, error) {
return signatureString(h, toInclude, requestTargetNotPermitted)
})
}
func newSigner(algo Algorithm, dAlgo DigestAlgorithm, headers []string, scheme SignatureScheme) (Signer, error) {
s, err := signerFromString(string(algo))
if err == nil {
a := &asymmSigner{
s: s,
dAlgo: dAlgo,
headers: headers,
targetHeader: scheme,
prefix: scheme.authScheme(),
}
return a, nil
}
m, err := macerFromString(string(algo))
if err != nil {
return nil, fmt.Errorf("no crypto implementation available for %q", algo)
}
c := &macSigner{
m: m,
dAlgo: dAlgo,
headers: headers,
targetHeader: scheme,
prefix: scheme.authScheme(),
}
return c, nil
}

272
vendor/github.com/go-fed/httpsig/signing.go generated vendored Normal file
View File

@ -0,0 +1,272 @@
package httpsig
import (
"bytes"
"crypto"
"crypto/rand"
"encoding/base64"
"fmt"
"net/http"
"net/textproto"
"strings"
)
const (
// Signature Parameters
keyIdParameter = "keyId"
algorithmParameter = "algorithm"
headersParameter = "headers"
signatureParameter = "signature"
prefixSeparater = " "
parameterKVSeparater = "="
parameterValueDelimiter = "\""
parameterSeparater = ","
headerParameterValueDelim = " "
// RequestTarget specifies to include the http request method and
// entire URI in the signature. Pass it as a header to NewSigner.
RequestTarget = "(request-target)"
dateHeader = "date"
// Signature String Construction
headerFieldDelimiter = ": "
headersDelimiter = "\n"
headerValueDelimiter = ", "
requestTargetSeparator = " "
)
var defaultHeaders = []string{dateHeader}
var _ Signer = &macSigner{}
type macSigner struct {
m macer
makeDigest bool
dAlgo DigestAlgorithm
headers []string
targetHeader SignatureScheme
prefix string
}
func (m *macSigner) SignRequest(pKey crypto.PrivateKey, pubKeyId string, r *http.Request, body []byte) error {
if body != nil {
err := addDigest(r, m.dAlgo, body)
if err != nil {
return err
}
}
s, err := m.signatureString(r)
if err != nil {
return err
}
enc, err := m.signSignature(pKey, s)
if err != nil {
return err
}
setSignatureHeader(r.Header, string(m.targetHeader), m.prefix, pubKeyId, m.m.String(), enc, m.headers)
return nil
}
func (m *macSigner) SignResponse(pKey crypto.PrivateKey, pubKeyId string, r http.ResponseWriter, body []byte) error {
if body != nil {
err := addDigestResponse(r, m.dAlgo, body)
if err != nil {
return err
}
}
s, err := m.signatureStringResponse(r)
if err != nil {
return err
}
enc, err := m.signSignature(pKey, s)
if err != nil {
return err
}
setSignatureHeader(r.Header(), string(m.targetHeader), m.prefix, pubKeyId, m.m.String(), enc, m.headers)
return nil
}
func (m *macSigner) signSignature(pKey crypto.PrivateKey, s string) (string, error) {
pKeyBytes, ok := pKey.([]byte)
if !ok {
return "", fmt.Errorf("private key for MAC signing must be of type []byte")
}
sig, err := m.m.Sign([]byte(s), pKeyBytes)
if err != nil {
return "", err
}
enc := base64.StdEncoding.EncodeToString(sig)
return enc, nil
}
func (m *macSigner) signatureString(r *http.Request) (string, error) {
return signatureString(r.Header, m.headers, addRequestTarget(r))
}
func (m *macSigner) signatureStringResponse(r http.ResponseWriter) (string, error) {
return signatureString(r.Header(), m.headers, requestTargetNotPermitted)
}
var _ Signer = &asymmSigner{}
type asymmSigner struct {
s signer
makeDigest bool
dAlgo DigestAlgorithm
headers []string
targetHeader SignatureScheme
prefix string
}
func (a *asymmSigner) SignRequest(pKey crypto.PrivateKey, pubKeyId string, r *http.Request, body []byte) error {
if body != nil {
err := addDigest(r, a.dAlgo, body)
if err != nil {
return err
}
}
s, err := a.signatureString(r)
if err != nil {
return err
}
enc, err := a.signSignature(pKey, s)
if err != nil {
return err
}
setSignatureHeader(r.Header, string(a.targetHeader), a.prefix, pubKeyId, a.s.String(), enc, a.headers)
return nil
}
func (a *asymmSigner) SignResponse(pKey crypto.PrivateKey, pubKeyId string, r http.ResponseWriter, body []byte) error {
if body != nil {
err := addDigestResponse(r, a.dAlgo, body)
if err != nil {
return err
}
}
s, err := a.signatureStringResponse(r)
if err != nil {
return err
}
enc, err := a.signSignature(pKey, s)
if err != nil {
return err
}
setSignatureHeader(r.Header(), string(a.targetHeader), a.prefix, pubKeyId, a.s.String(), enc, a.headers)
return nil
}
func (a *asymmSigner) signSignature(pKey crypto.PrivateKey, s string) (string, error) {
sig, err := a.s.Sign(rand.Reader, pKey, []byte(s))
if err != nil {
return "", err
}
enc := base64.StdEncoding.EncodeToString(sig)
return enc, nil
}
func (a *asymmSigner) signatureString(r *http.Request) (string, error) {
return signatureString(r.Header, a.headers, addRequestTarget(r))
}
func (a *asymmSigner) signatureStringResponse(r http.ResponseWriter) (string, error) {
return signatureString(r.Header(), a.headers, requestTargetNotPermitted)
}
func setSignatureHeader(h http.Header, targetHeader, prefix, pubKeyId, algo, enc string, headers []string) {
if len(headers) == 0 {
headers = defaultHeaders
}
var b bytes.Buffer
// KeyId
b.WriteString(prefix)
if len(prefix) > 0 {
b.WriteString(prefixSeparater)
}
b.WriteString(keyIdParameter)
b.WriteString(parameterKVSeparater)
b.WriteString(parameterValueDelimiter)
b.WriteString(pubKeyId)
b.WriteString(parameterValueDelimiter)
b.WriteString(parameterSeparater)
// Algorithm (deprecated)
// TODO: Remove this.
b.WriteString(algorithmParameter)
b.WriteString(parameterKVSeparater)
b.WriteString(parameterValueDelimiter)
b.WriteString(algo)
b.WriteString(parameterValueDelimiter)
b.WriteString(parameterSeparater)
// Headers
b.WriteString(headersParameter)
b.WriteString(parameterKVSeparater)
b.WriteString(parameterValueDelimiter)
for i, h := range headers {
b.WriteString(strings.ToLower(h))
if i != len(headers)-1 {
b.WriteString(headerParameterValueDelim)
}
}
b.WriteString(parameterValueDelimiter)
b.WriteString(parameterSeparater)
// Signature
b.WriteString(signatureParameter)
b.WriteString(parameterKVSeparater)
b.WriteString(parameterValueDelimiter)
b.WriteString(enc)
b.WriteString(parameterValueDelimiter)
h.Add(targetHeader, b.String())
}
func requestTargetNotPermitted(b *bytes.Buffer) error {
return fmt.Errorf("cannot sign with %q on anything other than an http request", RequestTarget)
}
func addRequestTarget(r *http.Request) func(b *bytes.Buffer) error {
return func(b *bytes.Buffer) error {
b.WriteString(RequestTarget)
b.WriteString(headerFieldDelimiter)
b.WriteString(strings.ToLower(r.Method))
b.WriteString(requestTargetSeparator)
b.WriteString(r.URL.Path)
if r.URL.RawQuery != "" {
b.WriteString("?")
b.WriteString(r.URL.RawQuery)
}
return nil
}
}
func signatureString(values http.Header, include []string, requestTargetFn func(b *bytes.Buffer) error) (string, error) {
if len(include) == 0 {
include = defaultHeaders
}
var b bytes.Buffer
for n, i := range include {
i := strings.ToLower(i)
if i == RequestTarget {
err := requestTargetFn(&b)
if err != nil {
return "", err
}
} else {
hv, ok := values[textproto.CanonicalMIMEHeaderKey(i)]
if !ok {
return "", fmt.Errorf("missing header %q", i)
}
b.WriteString(i)
b.WriteString(headerFieldDelimiter)
for i, v := range hv {
b.WriteString(strings.TrimSpace(v))
if i < len(hv)-1 {
b.WriteString(headerValueDelimiter)
}
}
}
if n < len(include)-1 {
b.WriteString(headersDelimiter)
}
}
return b.String(), nil
}

153
vendor/github.com/go-fed/httpsig/verifying.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
package httpsig
import (
"crypto"
"encoding/base64"
"fmt"
"net/http"
"strings"
)
var _ Verifier = &verifier{}
type verifier struct {
header http.Header
kId string
signature string
headers []string
sigStringFn func(http.Header, []string) (string, error)
}
func newVerifier(h http.Header, sigStringFn func(http.Header, []string) (string, error)) (*verifier, error) {
scheme, s, err := getSignatureScheme(h)
if err != nil {
return nil, err
}
kId, sig, headers, err := getSignatureComponents(scheme, s)
if err != nil {
return nil, err
}
return &verifier{
header: h,
kId: kId,
signature: sig,
headers: headers,
sigStringFn: sigStringFn,
}, nil
}
func (v *verifier) KeyId() string {
return v.kId
}
func (v *verifier) Verify(pKey crypto.PublicKey, algo Algorithm) error {
s, err := signerFromString(string(algo))
if err == nil {
return v.asymmVerify(s, pKey)
}
m, err := macerFromString(string(algo))
if err == nil {
return v.macVerify(m, pKey)
}
return fmt.Errorf("no crypto implementation available for %q", algo)
}
func (v *verifier) macVerify(m macer, pKey crypto.PublicKey) error {
key, ok := pKey.([]byte)
if !ok {
return fmt.Errorf("public key for MAC verifying must be of type []byte")
}
signature, err := v.sigStringFn(v.header, v.headers)
if err != nil {
return err
}
actualMAC, err := base64.StdEncoding.DecodeString(v.signature)
if err != nil {
return err
}
ok, err = m.Equal([]byte(signature), actualMAC, key)
if err != nil {
return err
} else if !ok {
return fmt.Errorf("invalid http signature")
}
return nil
}
func (v *verifier) asymmVerify(s signer, pKey crypto.PublicKey) error {
toHash, err := v.sigStringFn(v.header, v.headers)
if err != nil {
return err
}
signature, err := base64.StdEncoding.DecodeString(v.signature)
if err != nil {
return err
}
err = s.Verify(pKey, []byte(toHash), signature)
if err != nil {
return err
}
return nil
}
func getSignatureScheme(h http.Header) (scheme SignatureScheme, val string, err error) {
s := h.Get(string(Signature))
sigHasAll := strings.Contains(s, keyIdParameter) ||
strings.Contains(s, headersParameter) ||
strings.Contains(s, signatureParameter)
a := h.Get(string(Authorization))
authHasAll := strings.Contains(a, keyIdParameter) ||
strings.Contains(a, headersParameter) ||
strings.Contains(a, signatureParameter)
if sigHasAll && authHasAll {
err = fmt.Errorf("both %q and %q have signature parameters", Signature, Authorization)
return
} else if !sigHasAll && !authHasAll {
err = fmt.Errorf("neither %q nor %q have signature parameters", Signature, Authorization)
return
} else if sigHasAll {
val = s
scheme = Signature
return
} else { // authHasAll
val = a
scheme = Authorization
return
}
}
func getSignatureComponents(scheme SignatureScheme, s string) (kId, sig string, headers []string, err error) {
if as := scheme.authScheme(); len(as) > 0 {
s = strings.TrimPrefix(s, as+prefixSeparater)
}
params := strings.Split(s, parameterSeparater)
for _, p := range params {
kv := strings.SplitN(p, parameterKVSeparater, 2)
if len(kv) != 2 {
err = fmt.Errorf("malformed http signature parameter: %v", kv)
return
}
k := kv[0]
v := strings.Trim(kv[1], parameterValueDelimiter)
switch k {
case keyIdParameter:
kId = v
case algorithmParameter:
// Deprecated, ignore
case headersParameter:
headers = strings.Split(v, headerParameterValueDelim)
case signatureParameter:
sig = v
default:
// Ignore unrecognized parameters
}
}
if len(kId) == 0 {
err = fmt.Errorf("missing %q parameter in http signature", keyIdParameter)
} else if len(sig) == 0 {
err = fmt.Errorf("missing %q parameter in http signature", signatureParameter)
} else if len(headers) == 0 { // Optional
headers = defaultHeaders
}
return
}

27
vendor/github.com/gologme/log/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2018-2021 Bret Jordan. All rights reserved.
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission. ∑ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.

141
vendor/github.com/gologme/log/README.md generated vendored Normal file
View File

@ -0,0 +1,141 @@
# gologme/log #
[![Go Report Card](https://goreportcard.com/badge/github.com/gologme/log)](https://goreportcard.com/report/github.com/gologme/log)
[![GoDoc](https://godoc.org/github.com/gologme/log?status.png)](https://godoc.org/github.com/gologme/log)
[![GitHub license](https://img.shields.io/github/license/gologme/log.svg?style=flat)](https://github.com/gologme/log/blob/master/LICENSE)
[![GitHub go.mod Go version of a Go module](https://img.shields.io/github/go-mod/go-version/gologme/log.svg?style=flat)](https://github.com/gologme/log)
This package is a drop in replacement for the built-in Go log package. All the
functionality of the built-in package still exists and is unchanged. In addition,
this package contains a series of small enhancements and additions. Namely, it
adds four logging levels. These logging levels are:
- Error
- Warn
- Info
- Debug
- Trace
In addition to these four defined logging levels, users can also define their
own arbitrary logging levels.
Unlike other loggers, these logging levels are not enabled in a chain. Meaning,
once a level is enabled, say Warn, all levels above it are not also enabled.
This package is implemented in such a way that users can individually turn on
and turn off the various new logging levels. If existing code uses the build-in
log package, no change is needed to use this package.
Another feature that was added, based on comments seen on the various golang
boards, is the ability to set the calldepth.
## Version ##
1.3.0
### New to version 1.3.0 ###
- Made changes to bring it current as of Go 1.17.1 so it remains a drop in replacement
- Made chagnes based on feedback
- Sorted log levels into natural order
- Enabled formatted prefixes that include the log level (e.g., [info] {rest of log line})
## Installation ##
This package can be installed with the go get command:
```
go get github.com/gologme/log
go install github.com/gologme/log
```
## Example ##
Just like the built-in package, code can still do simple logging just like:
```
log.Println("some interesting logging message")
```
In addition to this, users can enable info, warn, error, debug, or trace logging like:
```
log.EnableLevel("error")
log.EnableLevel("warn")
log.EnableLevel("info")
log.EnableLevel("debug")
log.EnableLevel("trace")
```
Once these levels are enabled, calls to the info, warn, error, debug, or trace loggers
will print out just like they do for the Print and Fatal built-in loggers. The
functions / methods definitions that are defined for each level, match exactly
the ones defined in the built-in package. The new functions/methods are called:
```
log.Error()
log.Errorf()
log.Errorln()
log.Warn()
log.Warnf()
log.Warnln()
log.Info()
log.Infof()
log.Infoln()
log.Debug()
log.Debugf()
log.Debugln()
log.Trace()
log.Tracef()
log.Traceln()
```
In addition to the defined levels, arbitrary levels can be enabled. For example:
```
log.EnableLevel("tracedebug")
```
This level can then be used from an application as shown below. All three
functions/methods are defined for this: log.Level(), log.Levelln(), log.Levelf().
For each of these, the first argument is the level name.
```
log.Levelln("tracedebug", "some other neat logging message for this level")
```
To enable the log level to be included in the prefix enable it.
```
log.EnableFormattedPrefix()
```
This will make the log lines look like, note the log level prefix:
`[debug] 2021/10/15 12:46:52 some logging message `
To enable logging by the following numerical levels
Level 10 = panic, fatal, error, warn, info, debug, & trace
Level 5 = panic, fatal, error, warn, info, & debug
Level 4 = panic, fatal, error, warn, & info
Level 3 = panic, fatal, error, & warn
Level 2 = panic, fatal & error
Level 1 = panic, fatal
```
log.EnableLevelsByNumber(int)
```
To disable all logging levels
```
log.DisableAllLevels()
```
The last thing that was enabled was the ability to define the calldepth. The
built-in package from the Go authors had this hard coded to a value of 2. A small
change was made to enable this to be set by the application using the log package.
From the Go authors source code it seems like the normal possible values would
be 1, 2, or 3.
```
log.SetCallDepth(int)
```
## License ##
This is free software, licensed under the same BSD license that the original
Go log package was licensed.
## Copyright ##
Copyright 2017 Bret Jordan, All rights reserved.

810
vendor/github.com/gologme/log/log.go generated vendored Normal file
View File

@ -0,0 +1,810 @@
// Copyright 2018-2021 Bret Jordan, All rights reserved.
//
// This package contains a series of changes and additions to the the build-in
// log package from the Go Authors found here:
// https://golang.org/src/log/log.go
// See their copyright below.
//
// Version 1.3.0
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package log implements a simple logging package. It defines a type, Logger,
// with methods for formatting output. It also has a predefined 'standard'
// Logger accessible through helper functions Print[f|ln], Fatal[f|ln], and
// Panic[f|ln], which are easier to use than creating a Logger manually.
// That logger writes to standard error and prints the date and time
// of each logged message.
// Every log message is output on a separate line: if the message being
// printed does not end in a newline, the logger will add one.
// The Fatal functions call os.Exit(1) after writing the log message.
// The Panic functions call panic after writing the log message.
package log
import (
"fmt"
"io"
"os"
"runtime"
"strings"
"sync"
"time"
)
// These flags define which text to prefix to each log entry generated by the Logger.
// Bits are or'ed together to control what's printed.
// With the exception of the Lmsgprefix flag, there is no
// control over the order they appear (the order listed here)
// or the format they present (as described in the comments).
// The prefix is followed by a colon only when Llongfile or Lshortfile
// is specified.
// For example, flags Ldate | Ltime (or LstdFlags) produce,
// 2009/01/23 01:23:23 message
// while flags Ldate | Ltime | Lmicroseconds | Llongfile produce,
// 2009/01/23 01:23:23.123123 /a/b/c/d.go:23: message
const (
Ldate = 1 << iota // the date in the local time zone: 2009/01/23
Ltime // the time in the local time zone: 01:23:23
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
Llongfile // full file name and line number: /a/b/c/d.go:23
Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
Lmsgprefix // move the "prefix" from the beginning of the line to before the message
LstdFlags = Ldate | Ltime // initial values for the standard logger
)
// A Logger represents an active logging object that generates lines of
// output to an io.Writer. Each logging operation makes a single call to
// the Writer's Write method. A Logger can be used simultaneously from
// multiple goroutines; it guarantees to serialize access to the Writer.
//
// Calldepth is used to recover the PC and is provided for generality, although
// at the moment on all pre-defined paths it will be 2.
// Jordan: Added calldepth, levels, and formattedPrefix to the struct
type Logger struct {
mu sync.Mutex // ensures atomic writes; protects the following fields
prefix string // prefix to write at beginning of each line
flag int // properties
out io.Writer // destination for output
buf []byte // for accumulating text to write
calldepth int
levels map[string]bool
formattedPrefix bool
}
// New creates a new Logger. The out variable sets the
// destination to which log data will be written.
// The prefix appears at the beginning of each generated log line, or
// after the log header if the Lmsgprefix flag is provided.
// The flag argument defines the logging properties.
// Jordan: Added levels and enabled fatal by default
func New(out io.Writer, prefix string, flag int) *Logger {
l := Logger{out: out, prefix: prefix, flag: flag}
// Since panic and fatal are part of the standard library, lets enable them
// by default so that they just work and the checks in the Output function
// will allow them.
l.levels = map[string]bool{
"panic": true,
"fatal": true,
"error": false,
"warn": false,
"info": false,
"debug": false,
"trace": false,
}
return &l
}
// SetOutput sets the output destination for the logger.
func (l *Logger) SetOutput(w io.Writer) {
l.mu.Lock()
defer l.mu.Unlock()
l.out = w
}
var std = New(os.Stderr, "", LstdFlags)
// Default returns the standard logger used by the package-level output functions.
func Default() *Logger { return std }
// Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.
func itoa(buf *[]byte, i int, wid int) {
// Assemble decimal in reverse order.
var b [20]byte
bp := len(b) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
b[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
b[bp] = byte('0' + i)
*buf = append(*buf, b[bp:]...)
}
// formatHeader writes log header to buf in following order:
// * l.prefix (if it's not blank and Lmsgprefix is unset),
// * date and/or time (if corresponding flags are provided),
// * file and line number (if corresponding flags are provided),
// * l.prefix (if it's not blank and Lmsgprefix is set).
func (l *Logger) formatHeader(buf *[]byte, t time.Time, file string, line int) {
if l.flag&Lmsgprefix == 0 {
*buf = append(*buf, l.prefix...)
}
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
if l.flag&LUTC != 0 {
t = t.UTC()
}
if l.flag&Ldate != 0 {
year, month, day := t.Date()
itoa(buf, year, 4)
*buf = append(*buf, '/')
itoa(buf, int(month), 2)
*buf = append(*buf, '/')
itoa(buf, day, 2)
*buf = append(*buf, ' ')
}
if l.flag&(Ltime|Lmicroseconds) != 0 {
hour, min, sec := t.Clock()
itoa(buf, hour, 2)
*buf = append(*buf, ':')
itoa(buf, min, 2)
*buf = append(*buf, ':')
itoa(buf, sec, 2)
if l.flag&Lmicroseconds != 0 {
*buf = append(*buf, '.')
itoa(buf, t.Nanosecond()/1e3, 6)
}
*buf = append(*buf, ' ')
}
}
if l.flag&(Lshortfile|Llongfile) != 0 {
if l.flag&Lshortfile != 0 {
short := file
for i := len(file) - 1; i > 0; i-- {
if file[i] == '/' {
short = file[i+1:]
break
}
}
file = short
}
*buf = append(*buf, file...)
*buf = append(*buf, ':')
itoa(buf, line, -1)
*buf = append(*buf, ": "...)
}
if l.flag&Lmsgprefix != 0 {
*buf = append(*buf, l.prefix...)
}
}
// Output writes the output for a logging event. The string s contains
// the text to print after the prefix specified by the flags of the
// Logger. A newline is appended if the last character of s is not
// already a newline. Calldepth is used to recover the PC and is
// provided for generality, although at the moment on all pre-defined
// paths it will be 2.
// Jordan: Set calldepth and allow default levels. Call depth is now in object
func (l *Logger) Output(level string, s string) error {
// Lets set the default value to 2, just like it was before.
if l.calldepth == 0 {
l.calldepth = 2
}
// Allow the default standard log types to just work and do not output
// log levels that are turned off. However, we need to address the "fatal"
// loglevel and allow it, as it is part of the standard library. This was
// done in the new function to enable fatal by default.
if level != "" {
if val, ok := l.levels[level]; !ok || val == false {
return nil
}
}
// Set prefix to be formatted
var pre string
if l.formattedPrefix {
if level == "warn" || level == "info" {
pre = "[" + strings.ToUpper(level) + "] "
} else {
pre = "[" + strings.ToUpper(level) + "] "
}
l.SetPrefix(pre)
}
now := time.Now() // get this early.
var file string
var line int
l.mu.Lock()
defer l.mu.Unlock()
if l.flag&(Lshortfile|Llongfile) != 0 {
// Release lock while getting caller info - it's expensive.
l.mu.Unlock()
var ok bool
// Jordan: use calldepth on object
_, file, line, ok = runtime.Caller(l.calldepth)
if !ok {
file = "???"
line = 0
}
l.mu.Lock()
}
l.buf = l.buf[:0]
l.formatHeader(&l.buf, now, file, line)
l.buf = append(l.buf, s...)
if len(s) == 0 || s[len(s)-1] != '\n' {
l.buf = append(l.buf, '\n')
}
_, err := l.out.Write(l.buf)
return err
}
// Jordan: In all of these functions we are passing in the logging level now
// not the calldepth, since the calldepth is set on the object
// Printf calls l.Output to print to the logger.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Printf(format string, v ...interface{}) {
l.Output("", fmt.Sprintf(format, v...))
}
// Print calls l.Output to print to the logger.
// Arguments are handled in the manner of fmt.Print.
func (l *Logger) Print(v ...interface{}) { l.Output("", fmt.Sprint(v...)) }
// Println calls l.Output to print to the logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Println(v ...interface{}) { l.Output("", fmt.Sprintln(v...)) }
// Fatal is equivalent to l.Print() followed by a call to os.Exit(1).
// Jordan: added fatal as a level so it can show up in formatted logs
func (l *Logger) Fatal(v ...interface{}) {
l.Output("fatal", fmt.Sprint(v...))
os.Exit(1)
}
// Fatalf is equivalent to l.Printf() followed by a call to os.Exit(1).
// Jordan: added fatal as a level so it can show up in formatted logs
func (l *Logger) Fatalf(format string, v ...interface{}) {
l.Output("fatal", fmt.Sprintf(format, v...))
os.Exit(1)
}
// Fatalln is equivalent to l.Println() followed by a call to os.Exit(1).
// Jordan: added fatal as a level so it can show up in formatted logs
func (l *Logger) Fatalln(v ...interface{}) {
l.Output("fatal", fmt.Sprintln(v...))
os.Exit(1)
}
// Panic is equivalent to l.Print() followed by a call to panic().
// Jordan: added panic as a level so it can show up in formatted logs
func (l *Logger) Panic(v ...interface{}) {
s := fmt.Sprint(v...)
l.Output("panic", s)
panic(s)
}
// Panicf is equivalent to l.Printf() followed by a call to panic().
// Jordan: added panic as a level so it can show up in formatted logs
func (l *Logger) Panicf(format string, v ...interface{}) {
s := fmt.Sprintf(format, v...)
l.Output("panic", s)
panic(s)
}
// Panicln is equivalent to l.Println() followed by a call to panic().
// Jordan: added panic as a level so it can show up in formatted logs
func (l *Logger) Panicln(v ...interface{}) {
s := fmt.Sprintln(v...)
l.Output("panic", s)
panic(s)
}
// Flags returns the output flags for the logger.
// The flag bits are Ldate, Ltime, and so on.
func (l *Logger) Flags() int {
l.mu.Lock()
defer l.mu.Unlock()
return l.flag
}
// SetFlags sets the output flags for the logger.
// The flag bits are Ldate, Ltime, and so on.
func (l *Logger) SetFlags(flag int) {
l.mu.Lock()
defer l.mu.Unlock()
l.flag = flag
}
// Prefix returns the output prefix for the logger.
func (l *Logger) Prefix() string {
l.mu.Lock()
defer l.mu.Unlock()
return l.prefix
}
// SetPrefix sets the output prefix for the logger.
func (l *Logger) SetPrefix(prefix string) {
l.mu.Lock()
defer l.mu.Unlock()
l.prefix = prefix
}
// Writer returns the output destination for the logger.
func (l *Logger) Writer() io.Writer {
l.mu.Lock()
defer l.mu.Unlock()
return l.out
}
// SetOutput sets the output destination for the standard logger.
func SetOutput(w io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.out = w
}
// Flags returns the output flags for the standard logger.
// The flag bits are Ldate, Ltime, and so on.
func Flags() int {
return std.Flags()
}
// SetFlags sets the output flags for the standard logger.
// The flag bits are Ldate, Ltime, and so on.
func SetFlags(flag int) {
std.SetFlags(flag)
}
// Prefix returns the output prefix for the standard logger.
func Prefix() string {
return std.Prefix()
}
// SetPrefix sets the output prefix for the standard logger.
func SetPrefix(prefix string) {
std.SetPrefix(prefix)
}
// Writer returns the output destination for the standard logger.
func Writer() io.Writer {
return std.Writer()
}
// These functions write to the standard logger.
// Print calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Print.
func Print(v ...interface{}) {
std.Output("", fmt.Sprint(v...))
}
// Printf calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func Printf(format string, v ...interface{}) {
std.Output("", fmt.Sprintf(format, v...))
}
// Println calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Println(v ...interface{}) {
std.Output("", fmt.Sprintln(v...))
}
// Fatal is equivalent to Print() followed by a call to os.Exit(1).
// Jordan: added fatal as a level so it can show up in formatted logs
func Fatal(v ...interface{}) {
std.Output("fatal", fmt.Sprint(v...))
os.Exit(1)
}
// Fatalf is equivalent to Printf() followed by a call to os.Exit(1).
// Jordan: added fatal as a level so it can show up in formatted logs
func Fatalf(format string, v ...interface{}) {
std.Output("fatal", fmt.Sprintf(format, v...))
os.Exit(1)
}
// Fatalln is equivalent to Println() followed by a call to os.Exit(1).
// Jordan: added fatal as a level so it can show up in formatted logs
func Fatalln(v ...interface{}) {
std.Output("fatal", fmt.Sprintln(v...))
os.Exit(1)
}
// Panic is equivalent to Print() followed by a call to panic().
// Jordan: added panic as a level so it can show up in formatted logs
func Panic(v ...interface{}) {
s := fmt.Sprint(v...)
std.Output("panic", s)
panic(s)
}
// Panicf is equivalent to Printf() followed by a call to panic().
// Jordan: added panic as a level so it can show up in formatted logs
func Panicf(format string, v ...interface{}) {
s := fmt.Sprintf(format, v...)
std.Output("panic", s)
panic(s)
}
// Panicln is equivalent to Println() followed by a call to panic().
// Jordan: added panic as a level so it can show up in formatted logs
func Panicln(v ...interface{}) {
s := fmt.Sprintln(v...)
std.Output("panic", s)
panic(s)
}
// Output writes the output for a logging event. The string s contains
// the text to print after the prefix specified by the flags of the
// Logger. A newline is appended if the last character of s is not
// already a newline. Calldepth is the count of the number of
// frames to skip when computing the file name and line number
// if Llongfile or Lshortfile is set; a value of 1 will print the details
// for the caller of Output.
// Jordan: calldepth is set on the object so passing in empty levels
func Output(level string, s string) error {
return std.Output("level", s) // +1 for this frame.
}
// ----------------------------------------------------------------------
//
// Jordan: My changes to enable logging levels
//
// ----------------------------------------------------------------------
// SetCallDepth - This function will set the call depth. By default the call
// depth is set at 2. A depth of 2 represents the behavior of the standard
// library.
func (l *Logger) SetCallDepth(d int) {
l.calldepth = d
}
// SetCallDepth - This function will set the call depth. By default the call
// depth is set at 2. A depth of 2 represents the behavior of the standard
// library.
func SetCallDepth(d int) {
std.SetCallDepth(d)
}
// EnableLevel - This function will enable the output from the supplied logging
// level
func (l *Logger) EnableLevel(level string) {
l.levels[level] = true
}
// EnableLevel - This function will enable the output from the supplied logging
// level
func EnableLevel(level string) {
std.EnableLevel(level)
}
// DisableLevel - This function will disable the output from the supplied
// logging level
func (l *Logger) DisableLevel(level string) {
if _, ok := l.levels[level]; ok {
l.levels[level] = false
}
}
// DisableLevel - This function will disable the output from the supplied
// logging level
func DisableLevel(level string) {
std.DisableLevel(level)
}
// GetLevel - This function will return the state of a given level
func (l *Logger) GetLevel(level string) bool {
if _, ok := l.levels[level]; ok {
return l.levels[level]
}
return false
}
// GetLevel - This function will return the state of a given level
func GetLevel(level string) bool {
return std.GetLevel(level)
}
// EnableFormattedPrefix - This function will enable the formatted prefix in output
func (l *Logger) EnableFormattedPrefix() {
l.formattedPrefix = true
}
// EnableFormattedPrefix - This function will enable the formatted prefix in output
func EnableFormattedPrefix() {
std.formattedPrefix = true
}
// EnableLevelsByNumber - This function will enable logging levels by number
func (l *Logger) EnableLevelsByNumber(num int) {
switch num {
case 1:
l.EnableLevel("panic")
l.EnableLevel("fatal")
case 2:
l.EnableLevel("panic")
l.EnableLevel("fatal")
l.EnableLevel("error")
case 3:
l.EnableLevel("panic")
l.EnableLevel("fatal")
l.EnableLevel("error")
l.EnableLevel("warn")
case 4:
l.EnableLevel("panic")
l.EnableLevel("fatal")
l.EnableLevel("error")
l.EnableLevel("warn")
l.EnableLevel("info")
case 5:
l.EnableLevel("panic")
l.EnableLevel("fatal")
l.EnableLevel("error")
l.EnableLevel("warn")
l.EnableLevel("info")
l.EnableLevel("debug")
case 10:
l.EnableLevel("panic")
l.EnableLevel("fatal")
l.EnableLevel("error")
l.EnableLevel("warn")
l.EnableLevel("info")
l.EnableLevel("debug")
l.EnableLevel("trace")
}
return
}
// EnableLevelsByNumber - This function will enable logging levels by number
func EnableLevelsByNumber(num int) {
std.EnableLevelsByNumber(num)
}
// DisableAllLevels - This function will the output from all logging level
func (l *Logger) DisableAllLevels() {
for k := range l.levels {
l.levels[k] = false
}
}
// DisableAllLevels - This function will the output from all logging level
func DisableAllLevels() {
std.DisableAllLevels()
}
// ----------------------------------------------------------------------
// New logger functions and methods
// ----------------------------------------------------------------------
// Error - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func (l *Logger) Error(v ...interface{}) {
l.Output("error", fmt.Sprint(v...))
}
// Errorf - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Errorf(format string, v ...interface{}) {
l.Output("error", fmt.Sprintf(format, v...))
}
// Errorln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Errorln(v ...interface{}) {
l.Output("error", fmt.Sprintln(v...))
}
// Warn - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func (l *Logger) Warn(v ...interface{}) {
l.Output("warn", fmt.Sprint(v...))
}
// Warnf - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Printf.
func (l *Logger) Warnf(format string, v ...interface{}) {
l.Output("warn", fmt.Sprintf(format, v...))
}
// Warnln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Warnln(v ...interface{}) {
l.Output("warn", fmt.Sprintln(v...))
}
// Info - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func (l *Logger) Info(v ...interface{}) {
l.Output("info", fmt.Sprint(v...))
}
// Infof - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Printf.
func (l *Logger) Infof(format string, v ...interface{}) {
l.Output("info", fmt.Sprintf(format, v...))
}
// Infoln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Infoln(v ...interface{}) {
l.Output("info", fmt.Sprintln(v...))
}
// Debug - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func (l *Logger) Debug(v ...interface{}) {
l.Output("debug", fmt.Sprint(v...))
}
// Debugf - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Debugf(format string, v ...interface{}) {
l.Output("debug", fmt.Sprintf(format, v...))
}
// Debugln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Debugln(v ...interface{}) {
l.Output("debug", fmt.Sprintln(v...))
}
// Trace - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func (l *Logger) Trace(v ...interface{}) {
l.Output("trace", fmt.Sprint(v...))
}
// Tracef - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Tracef(format string, v ...interface{}) {
l.Output("trace", fmt.Sprintf(format, v...))
}
// Traceln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func (l *Logger) Traceln(v ...interface{}) {
l.Output("trace", fmt.Sprintln(v...))
}
// Level - This function calls Output to print to the standard logger. The first
// parameter is a logging level, this allows the printing of arbitrary logging
// levels. Arguments are handled in the manner of fmt.Print.
func (l *Logger) Level(level string, v ...interface{}) {
l.Output(level, fmt.Sprint(v...))
}
// Levelf - This function calls Output to print to the standard logger. The
// first parameter is a logging level, this allows the printing of arbitrary
// logging levels. Arguments are handled in the manner of fmt.Printf.
func (l *Logger) Levelf(level, format string, v ...interface{}) {
l.Output(level, fmt.Sprintf(format, v...))
}
// Levelln - This function calls Output to print to the standard logger. The
// first parameter is a logging level, this allows the printing of arbitrary
// logging levels. Arguments are handled in the manner of fmt.Println.
func (l *Logger) Levelln(level string, v ...interface{}) {
l.Output(level, fmt.Sprintln(v...))
}
// Error - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func Error(v ...interface{}) {
std.Output("error", fmt.Sprint(v...))
}
// Errorf - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, v ...interface{}) {
std.Output("error", fmt.Sprintf(format, v...))
}
// Errorln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Errorln(v ...interface{}) {
std.Output("error", fmt.Sprintln(v...))
}
// Warn - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func Warn(v ...interface{}) {
std.Output("warn", fmt.Sprint(v...))
}
// Warnf - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Printf.
func Warnf(format string, v ...interface{}) {
std.Output("warn", fmt.Sprintf(format, v...))
}
// Warnln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Warnln(v ...interface{}) {
std.Output("warn", fmt.Sprintln(v...))
}
// Info - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func Info(v ...interface{}) {
std.Output("info", fmt.Sprint(v...))
}
// Infof - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Printf.
func Infof(format string, v ...interface{}) {
std.Output("info", fmt.Sprintf(format, v...))
}
// Infoln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Infoln(v ...interface{}) {
std.Output("info", fmt.Sprintln(v...))
}
// Debug - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func Debug(v ...interface{}) {
std.Output("debug", fmt.Sprint(v...))
}
// Debugf - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func Debugf(format string, v ...interface{}) {
std.Output("debug", fmt.Sprintf(format, v...))
}
// Debugln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Debugln(v ...interface{}) {
std.Output("debug", fmt.Sprintln(v...))
}
// Trace - This function calls Output to print to the standard logger. Arguments
// are handled in the manner of fmt.Print.
func Trace(v ...interface{}) {
std.Output("trace", fmt.Sprint(v...))
}
// Tracef - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Printf.
func Tracef(format string, v ...interface{}) {
std.Output("trace", fmt.Sprintf(format, v...))
}
// Traceln - This function calls Output to print to the standard logger.
// Arguments are handled in the manner of fmt.Println.
func Traceln(v ...interface{}) {
std.Output("trace", fmt.Sprintln(v...))
}
// Level - This function calls Output to print to the standard logger. The first
// parameter is a logging level, this allows the printing of arbitrary logging
// levels. Arguments are handled in the manner of fmt.Print.
func Level(level string, v ...interface{}) {
std.Output(level, fmt.Sprint(v...))
}
// Levelf - This function calls Output to print to the standard logger. The
// first parameter is a logging level, this allows the printing of arbitrary
// logging levels. Arguments are handled in the manner of fmt.Printf.
func Levelf(level, format string, v ...interface{}) {
std.Output(level, fmt.Sprintf(format, v...))
}
// Levelln - This function calls Output to print to the standard logger. The
// first parameter is a logging level, this allows the printing of arbitrary
// logging levels. Arguments are handled in the manner of fmt.Println.
func Levelln(level string, v ...interface{}) {
std.Output(level, fmt.Sprintln(v...))
}
// ----------------------------------------------------------------------

1
vendor/github.com/google/btree/.travis.yml generated vendored Normal file
View File

@ -0,0 +1 @@
language: go

202
vendor/github.com/google/btree/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

12
vendor/github.com/google/btree/README.md generated vendored Normal file
View File

@ -0,0 +1,12 @@
# BTree implementation for Go
![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
This package provides an in-memory B-Tree implementation for Go, useful as
an ordered, mutable data structure.
The API is based off of the wonderful
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
act as a drop-in replacement for gollrb trees.
See http://godoc.org/github.com/google/btree for documentation.

890
vendor/github.com/google/btree/btree.go generated vendored Normal file
View File

@ -0,0 +1,890 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
// It is not meant for persistent storage solutions.
//
// It has a flatter structure than an equivalent red-black or other binary tree,
// which in some cases yields better memory usage and/or performance.
// See some discussion on the matter here:
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
// Note, though, that this project is in no way related to the C++ B-Tree
// implementation written about there.
//
// Within this tree, each node contains a slice of items and a (possibly nil)
// slice of children. For basic numeric values or raw structs, this can cause
// efficiency differences when compared to equivalent C++ template code that
// stores values in arrays within the node:
// * Due to the overhead of storing values as interfaces (each
// value needs to be stored as the value itself, then 2 words for the
// interface pointing to that value and its type), resulting in higher
// memory use.
// * Since interfaces can point to values anywhere in memory, values are
// most likely not stored in contiguous blocks, resulting in a higher
// number of cache misses.
// These issues don't tend to matter, though, when working with strings or other
// heap-allocated structures, since C++-equivalent structures also must store
// pointers and also distribute their values across the heap.
//
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
// widely used ordered tree implementation in the Go ecosystem currently.
// Its functions, therefore, exactly mirror those of
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
// support storing multiple equivalent values.
package btree
import (
"fmt"
"io"
"sort"
"strings"
"sync"
)
// Item represents a single object in the tree.
type Item interface {
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
Less(than Item) bool
}
const (
DefaultFreeListSize = 32
)
var (
nilItems = make(items, 16)
nilChildren = make(children, 16)
)
// FreeList represents a free list of btree nodes. By default each
// BTree has its own FreeList, but multiple BTrees can share the same
// FreeList.
// Two Btrees using the same freelist are safe for concurrent write access.
type FreeList struct {
mu sync.Mutex
freelist []*node
}
// NewFreeList creates a new free list.
// size is the maximum size of the returned free list.
func NewFreeList(size int) *FreeList {
return &FreeList{freelist: make([]*node, 0, size)}
}
func (f *FreeList) newNode() (n *node) {
f.mu.Lock()
index := len(f.freelist) - 1
if index < 0 {
f.mu.Unlock()
return new(node)
}
n = f.freelist[index]
f.freelist[index] = nil
f.freelist = f.freelist[:index]
f.mu.Unlock()
return
}
// freeNode adds the given node to the list, returning true if it was added
// and false if it was discarded.
func (f *FreeList) freeNode(n *node) (out bool) {
f.mu.Lock()
if len(f.freelist) < cap(f.freelist) {
f.freelist = append(f.freelist, n)
out = true
}
f.mu.Unlock()
return
}
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
// the tree. When this function returns false, iteration will stop and the
// associated Ascend* function will immediately return.
type ItemIterator func(i Item) bool
// New creates a new B-Tree with the given degree.
//
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
// and 2-4 children).
func New(degree int) *BTree {
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
}
// NewWithFreeList creates a new B-Tree that uses the given node free list.
func NewWithFreeList(degree int, f *FreeList) *BTree {
if degree <= 1 {
panic("bad degree")
}
return &BTree{
degree: degree,
cow: &copyOnWriteContext{freelist: f},
}
}
// items stores items in a node.
type items []Item
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *items) insertAt(index int, item Item) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = item
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *items) removeAt(index int) Item {
item := (*s)[index]
copy((*s)[index:], (*s)[index+1:])
(*s)[len(*s)-1] = nil
*s = (*s)[:len(*s)-1]
return item
}
// pop removes and returns the last element in the list.
func (s *items) pop() (out Item) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// truncate truncates this instance at index so that it contains only the
// first index items. index must be less than or equal to length.
func (s *items) truncate(index int) {
var toClear items
*s, toClear = (*s)[:index], (*s)[index:]
for len(toClear) > 0 {
toClear = toClear[copy(toClear, nilItems):]
}
}
// find returns the index where the given item should be inserted into this
// list. 'found' is true if the item already exists in the list at the given
// index.
func (s items) find(item Item) (index int, found bool) {
i := sort.Search(len(s), func(i int) bool {
return item.Less(s[i])
})
if i > 0 && !s[i-1].Less(item) {
return i - 1, true
}
return i, false
}
// children stores child nodes in a node.
type children []*node
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *children) insertAt(index int, n *node) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = n
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *children) removeAt(index int) *node {
n := (*s)[index]
copy((*s)[index:], (*s)[index+1:])
(*s)[len(*s)-1] = nil
*s = (*s)[:len(*s)-1]
return n
}
// pop removes and returns the last element in the list.
func (s *children) pop() (out *node) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// truncate truncates this instance at index so that it contains only the
// first index children. index must be less than or equal to length.
func (s *children) truncate(index int) {
var toClear children
*s, toClear = (*s)[:index], (*s)[index:]
for len(toClear) > 0 {
toClear = toClear[copy(toClear, nilChildren):]
}
}
// node is an internal node in a tree.
//
// It must at all times maintain the invariant that either
// * len(children) == 0, len(items) unconstrained
// * len(children) == len(items) + 1
type node struct {
items items
children children
cow *copyOnWriteContext
}
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
if n.cow == cow {
return n
}
out := cow.newNode()
if cap(out.items) >= len(n.items) {
out.items = out.items[:len(n.items)]
} else {
out.items = make(items, len(n.items), cap(n.items))
}
copy(out.items, n.items)
// Copy children
if cap(out.children) >= len(n.children) {
out.children = out.children[:len(n.children)]
} else {
out.children = make(children, len(n.children), cap(n.children))
}
copy(out.children, n.children)
return out
}
func (n *node) mutableChild(i int) *node {
c := n.children[i].mutableFor(n.cow)
n.children[i] = c
return c
}
// split splits the given node at the given index. The current node shrinks,
// and this function returns the item that existed at that index and a new node
// containing all items/children after it.
func (n *node) split(i int) (Item, *node) {
item := n.items[i]
next := n.cow.newNode()
next.items = append(next.items, n.items[i+1:]...)
n.items.truncate(i)
if len(n.children) > 0 {
next.children = append(next.children, n.children[i+1:]...)
n.children.truncate(i + 1)
}
return item, next
}
// maybeSplitChild checks if a child should be split, and if so splits it.
// Returns whether or not a split occurred.
func (n *node) maybeSplitChild(i, maxItems int) bool {
if len(n.children[i].items) < maxItems {
return false
}
first := n.mutableChild(i)
item, second := first.split(maxItems / 2)
n.items.insertAt(i, item)
n.children.insertAt(i+1, second)
return true
}
// insert inserts an item into the subtree rooted at this node, making sure
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
// be found/replaced by insert, it will be returned.
func (n *node) insert(item Item, maxItems int) Item {
i, found := n.items.find(item)
if found {
out := n.items[i]
n.items[i] = item
return out
}
if len(n.children) == 0 {
n.items.insertAt(i, item)
return nil
}
if n.maybeSplitChild(i, maxItems) {
inTree := n.items[i]
switch {
case item.Less(inTree):
// no change, we want first split node
case inTree.Less(item):
i++ // we want second split node
default:
out := n.items[i]
n.items[i] = item
return out
}
}
return n.mutableChild(i).insert(item, maxItems)
}
// get finds the given key in the subtree and returns it.
func (n *node) get(key Item) Item {
i, found := n.items.find(key)
if found {
return n.items[i]
} else if len(n.children) > 0 {
return n.children[i].get(key)
}
return nil
}
// min returns the first item in the subtree.
func min(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[0]
}
if len(n.items) == 0 {
return nil
}
return n.items[0]
}
// max returns the last item in the subtree.
func max(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[len(n.children)-1]
}
if len(n.items) == 0 {
return nil
}
return n.items[len(n.items)-1]
}
// toRemove details what item to remove in a node.remove call.
type toRemove int
const (
removeItem toRemove = iota // removes the given item
removeMin // removes smallest item in the subtree
removeMax // removes largest item in the subtree
)
// remove removes an item from the subtree rooted at this node.
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
var i int
var found bool
switch typ {
case removeMax:
if len(n.children) == 0 {
return n.items.pop()
}
i = len(n.items)
case removeMin:
if len(n.children) == 0 {
return n.items.removeAt(0)
}
i = 0
case removeItem:
i, found = n.items.find(item)
if len(n.children) == 0 {
if found {
return n.items.removeAt(i)
}
return nil
}
default:
panic("invalid type")
}
// If we get to here, we have children.
if len(n.children[i].items) <= minItems {
return n.growChildAndRemove(i, item, minItems, typ)
}
child := n.mutableChild(i)
// Either we had enough items to begin with, or we've done some
// merging/stealing, because we've got enough now and we're ready to return
// stuff.
if found {
// The item exists at index 'i', and the child we've selected can give us a
// predecessor, since if we've gotten here it's got > minItems items in it.
out := n.items[i]
// We use our special-case 'remove' call with typ=maxItem to pull the
// predecessor of item i (the rightmost leaf of our immediate left child)
// and set it into where we pulled the item from.
n.items[i] = child.remove(nil, minItems, removeMax)
return out
}
// Final recursive call. Once we're here, we know that the item isn't in this
// node and that the child is big enough to remove from.
return child.remove(item, minItems, typ)
}
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
// item from it while keeping it at minItems, then calls remove to actually
// remove it.
//
// Most documentation says we have to do two sets of special casing:
// 1) item is in this node
// 2) item is in child
// In both cases, we need to handle the two subcases:
// A) node has enough values that it can spare one
// B) node doesn't have enough values
// For the latter, we have to check:
// a) left sibling has node to spare
// b) right sibling has node to spare
// c) we must merge
// To simplify our code here, we handle cases #1 and #2 the same:
// If a node doesn't have enough items, we make sure it does (using a,b,c).
// We then simply redo our remove call, and the second time (regardless of
// whether we're in case 1 or 2), we'll have enough items and can guarantee
// that we hit case A.
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
if i > 0 && len(n.children[i-1].items) > minItems {
// Steal from left child
child := n.mutableChild(i)
stealFrom := n.mutableChild(i - 1)
stolenItem := stealFrom.items.pop()
child.items.insertAt(0, n.items[i-1])
n.items[i-1] = stolenItem
if len(stealFrom.children) > 0 {
child.children.insertAt(0, stealFrom.children.pop())
}
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
// steal from right child
child := n.mutableChild(i)
stealFrom := n.mutableChild(i + 1)
stolenItem := stealFrom.items.removeAt(0)
child.items = append(child.items, n.items[i])
n.items[i] = stolenItem
if len(stealFrom.children) > 0 {
child.children = append(child.children, stealFrom.children.removeAt(0))
}
} else {
if i >= len(n.items) {
i--
}
child := n.mutableChild(i)
// merge with right child
mergeItem := n.items.removeAt(i)
mergeChild := n.children.removeAt(i + 1)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
n.cow.freeNode(mergeChild)
}
return n.remove(item, minItems, typ)
}
type direction int
const (
descend = direction(-1)
ascend = direction(+1)
)
// iterate provides a simple method for iterating over elements in the tree.
//
// When ascending, the 'start' should be less than 'stop' and when descending,
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
// will force the iterator to include the first item when it equals 'start',
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
// "greaterThan" or "lessThan" queries.
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
var ok, found bool
var index int
switch dir {
case ascend:
if start != nil {
index, _ = n.items.find(start)
}
for i := index; i < len(n.items); i++ {
if len(n.children) > 0 {
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
hit = true
continue
}
hit = true
if stop != nil && !n.items[i].Less(stop) {
return hit, false
}
if !iter(n.items[i]) {
return hit, false
}
}
if len(n.children) > 0 {
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
case descend:
if start != nil {
index, found = n.items.find(start)
if !found {
index = index - 1
}
} else {
index = len(n.items) - 1
}
for i := index; i >= 0; i-- {
if start != nil && !n.items[i].Less(start) {
if !includeStart || hit || start.Less(n.items[i]) {
continue
}
}
if len(n.children) > 0 {
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
if stop != nil && !stop.Less(n.items[i]) {
return hit, false // continue
}
hit = true
if !iter(n.items[i]) {
return hit, false
}
}
if len(n.children) > 0 {
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
return hit, false
}
}
}
return hit, true
}
// Used for testing/debugging purposes.
func (n *node) print(w io.Writer, level int) {
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
for _, c := range n.children {
c.print(w, level+1)
}
}
// BTree is an implementation of a B-Tree.
//
// BTree stores Item instances in an ordered structure, allowing easy insertion,
// removal, and iteration.
//
// Write operations are not safe for concurrent mutation by multiple
// goroutines, but Read operations are.
type BTree struct {
degree int
length int
root *node
cow *copyOnWriteContext
}
// copyOnWriteContext pointers determine node ownership... a tree with a write
// context equivalent to a node's write context is allowed to modify that node.
// A tree whose write context does not match a node's is not allowed to modify
// it, and must create a new, writable copy (IE: it's a Clone).
//
// When doing any write operation, we maintain the invariant that the current
// node's context is equal to the context of the tree that requested the write.
// We do this by, before we descend into any node, creating a copy with the
// correct context if the contexts don't match.
//
// Since the node we're currently visiting on any write has the requesting
// tree's context, that node is modifiable in place. Children of that node may
// not share context, but before we descend into them, we'll make a mutable
// copy.
type copyOnWriteContext struct {
freelist *FreeList
}
// Clone clones the btree, lazily. Clone should not be called concurrently,
// but the original tree (t) and the new tree (t2) can be used concurrently
// once the Clone call completes.
//
// The internal tree structure of b is marked read-only and shared between t and
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
// whenever one of b's original nodes would have been modified. Read operations
// should have no performance degredation. Write operations for both t and t2
// will initially experience minor slow-downs caused by additional allocs and
// copies due to the aforementioned copy-on-write logic, but should converge to
// the original performance characteristics of the original tree.
func (t *BTree) Clone() (t2 *BTree) {
// Create two entirely new copy-on-write contexts.
// This operation effectively creates three trees:
// the original, shared nodes (old b.cow)
// the new b.cow nodes
// the new out.cow nodes
cow1, cow2 := *t.cow, *t.cow
out := *t
t.cow = &cow1
out.cow = &cow2
return &out
}
// maxItems returns the max number of items to allow per node.
func (t *BTree) maxItems() int {
return t.degree*2 - 1
}
// minItems returns the min number of items to allow per node (ignored for the
// root node).
func (t *BTree) minItems() int {
return t.degree - 1
}
func (c *copyOnWriteContext) newNode() (n *node) {
n = c.freelist.newNode()
n.cow = c
return
}
type freeType int
const (
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
ftStored // node was stored in the freelist for later use
ftNotOwned // node was ignored by COW, since it's owned by another one
)
// freeNode frees a node within a given COW context, if it's owned by that
// context. It returns what happened to the node (see freeType const
// documentation).
func (c *copyOnWriteContext) freeNode(n *node) freeType {
if n.cow == c {
// clear to allow GC
n.items.truncate(0)
n.children.truncate(0)
n.cow = nil
if c.freelist.freeNode(n) {
return ftStored
} else {
return ftFreelistFull
}
} else {
return ftNotOwned
}
}
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
// already equals the given one, it is removed from the tree and returned.
// Otherwise, nil is returned.
//
// nil cannot be added to the tree (will panic).
func (t *BTree) ReplaceOrInsert(item Item) Item {
if item == nil {
panic("nil item being added to BTree")
}
if t.root == nil {
t.root = t.cow.newNode()
t.root.items = append(t.root.items, item)
t.length++
return nil
} else {
t.root = t.root.mutableFor(t.cow)
if len(t.root.items) >= t.maxItems() {
item2, second := t.root.split(t.maxItems() / 2)
oldroot := t.root
t.root = t.cow.newNode()
t.root.items = append(t.root.items, item2)
t.root.children = append(t.root.children, oldroot, second)
}
}
out := t.root.insert(item, t.maxItems())
if out == nil {
t.length++
}
return out
}
// Delete removes an item equal to the passed in item from the tree, returning
// it. If no such item exists, returns nil.
func (t *BTree) Delete(item Item) Item {
return t.deleteItem(item, removeItem)
}
// DeleteMin removes the smallest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMin() Item {
return t.deleteItem(nil, removeMin)
}
// DeleteMax removes the largest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMax() Item {
return t.deleteItem(nil, removeMax)
}
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
if t.root == nil || len(t.root.items) == 0 {
return nil
}
t.root = t.root.mutableFor(t.cow)
out := t.root.remove(item, t.minItems(), typ)
if len(t.root.items) == 0 && len(t.root.children) > 0 {
oldroot := t.root
t.root = t.root.children[0]
t.cow.freeNode(oldroot)
}
if out != nil {
t.length--
}
return out
}
// AscendRange calls the iterator for every value in the tree within the range
// [greaterOrEqual, lessThan), until iterator returns false.
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
}
// AscendLessThan calls the iterator for every value in the tree within the range
// [first, pivot), until iterator returns false.
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, nil, pivot, false, false, iterator)
}
// AscendGreaterOrEqual calls the iterator for every value in the tree within
// the range [pivot, last], until iterator returns false.
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, pivot, nil, true, false, iterator)
}
// Ascend calls the iterator for every value in the tree within the range
// [first, last], until iterator returns false.
func (t *BTree) Ascend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(ascend, nil, nil, false, false, iterator)
}
// DescendRange calls the iterator for every value in the tree within the range
// [lessOrEqual, greaterThan), until iterator returns false.
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
}
// DescendLessOrEqual calls the iterator for every value in the tree within the range
// [pivot, first], until iterator returns false.
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, pivot, nil, true, false, iterator)
}
// DescendGreaterThan calls the iterator for every value in the tree within
// the range (pivot, last], until iterator returns false.
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, nil, pivot, false, false, iterator)
}
// Descend calls the iterator for every value in the tree within the range
// [last, first], until iterator returns false.
func (t *BTree) Descend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(descend, nil, nil, false, false, iterator)
}
// Get looks for the key item in the tree, returning it. It returns nil if
// unable to find that item.
func (t *BTree) Get(key Item) Item {
if t.root == nil {
return nil
}
return t.root.get(key)
}
// Min returns the smallest item in the tree, or nil if the tree is empty.
func (t *BTree) Min() Item {
return min(t.root)
}
// Max returns the largest item in the tree, or nil if the tree is empty.
func (t *BTree) Max() Item {
return max(t.root)
}
// Has returns true if the given key is in the tree.
func (t *BTree) Has(key Item) bool {
return t.Get(key) != nil
}
// Len returns the number of items currently in the tree.
func (t *BTree) Len() int {
return t.length
}
// Clear removes all items from the btree. If addNodesToFreelist is true,
// t's nodes are added to its freelist as part of this call, until the freelist
// is full. Otherwise, the root node is simply dereferenced and the subtree
// left to Go's normal GC processes.
//
// This can be much faster
// than calling Delete on all elements, because that requires finding/removing
// each element in the tree and updating the tree accordingly. It also is
// somewhat faster than creating a new tree to replace the old one, because
// nodes from the old tree are reclaimed into the freelist for use by the new
// one, instead of being lost to the garbage collector.
//
// This call takes:
// O(1): when addNodesToFreelist is false, this is a single operation.
// O(1): when the freelist is already full, it breaks out immediately
// O(freelist size): when the freelist is empty and the nodes are all owned
// by this tree, nodes are added to the freelist until full.
// O(tree size): when all nodes are owned by another tree, all nodes are
// iterated over looking for nodes to add to the freelist, and due to
// ownership, none are.
func (t *BTree) Clear(addNodesToFreelist bool) {
if t.root != nil && addNodesToFreelist {
t.root.reset(t.cow)
}
t.root, t.length = nil, 0
}
// reset returns a subtree to the freelist. It breaks out immediately if the
// freelist is full, since the only benefit of iterating is to fill that
// freelist up. Returns true if parent reset call should continue.
func (n *node) reset(c *copyOnWriteContext) bool {
for _, child := range n.children {
if !child.reset(c) {
return false
}
}
return c.freeNode(n) != ftFreelistFull
}
// Int implements the Item interface for integers.
type Int int
// Less returns true if int(a) < int(b).
func (a Int) Less(b Item) bool {
return a < b.(Int)
}

8
vendor/github.com/gorilla/mux/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,8 @@
# This is the official list of gorilla/mux authors for copyright purposes.
#
# Please keep the list sorted.
Google LLC (https://opensource.google.com/)
Kamil Kisielk <kamil@kamilkisiel.net>
Matt Silverlock <matt@eatsleeprepeat.net>
Rodrigo Moraes (https://github.com/moraes)

27
vendor/github.com/gorilla/mux/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

805
vendor/github.com/gorilla/mux/README.md generated vendored Normal file
View File

@ -0,0 +1,805 @@
# gorilla/mux
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png)
https://www.gorillatoolkit.org/pkg/mux
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler.
The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
* URL hosts, paths and query values can have variables with an optional regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
---
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
* [Static Files](#static-files)
* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
* [Registered URLs](#registered-urls)
* [Walking Routes](#walking-routes)
* [Graceful Shutdown](#graceful-shutdown)
* [Middleware](#middleware)
* [Handling CORS Requests](#handling-cors-requests)
* [Testing Handlers](#testing-handlers)
* [Full Example](#full-example)
---
## Install
With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
```sh
go get -u github.com/gorilla/mux
```
## Examples
Let's start registering a couple of URL paths and handlers:
```go
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
```
Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
```go
func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Category: %v\n", vars["category"])
}
```
And this is all you need to know about the basic usage. More advanced options are explained below.
### Matching Routes
Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
```go
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.example.com")
```
There are several other matchers that can be added. To match path prefixes:
```go
r.PathPrefix("/products/")
```
...or HTTP methods:
```go
r.Methods("GET", "POST")
```
...or URL schemes:
```go
r.Schemes("https")
```
...or header values:
```go
r.Headers("X-Requested-With", "XMLHttpRequest")
```
...or query values:
```go
r.Queries("key", "value")
```
...or to use a custom matcher function:
```go
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
```
...and finally, it is possible to combine several matchers in a single route:
```go
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
```
Routes are tested in the order they were added to the router. If two routes match, the first one wins:
```go
r := mux.NewRouter()
r.HandleFunc("/specific", specificHandler)
r.PathPrefix("/").Handler(catchAllHandler)
```
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
```go
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
```
Then register routes in the subrouter:
```go
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
```
The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
```go
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
### Static Files
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
request that matches "/static/\*". This makes it easy to serve static files with mux:
```go
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
```
### Serving Single Page Applications
Most of the time it makes sense to serve your SPA on a separate web server from your API,
but sometimes it's desirable to serve them both from one place. It's possible to write a simple
handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
mux's powerful routing for your API endpoints.
```go
package main
import (
"encoding/json"
"log"
"net/http"
"os"
"path/filepath"
"time"
"github.com/gorilla/mux"
)
// spaHandler implements the http.Handler interface, so we can use it
// to respond to HTTP requests. The path to the static directory and
// path to the index file within that static directory are used to
// serve the SPA in the given static directory.
type spaHandler struct {
staticPath string
indexPath string
}
// ServeHTTP inspects the URL path to locate a file within the static dir
// on the SPA handler. If a file is found, it will be served. If not, the
// file located at the index path on the SPA handler will be served. This
// is suitable behavior for serving an SPA (single page application).
func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// get the absolute path to prevent directory traversal
path, err := filepath.Abs(r.URL.Path)
if err != nil {
// if we failed to get the absolute path respond with a 400 bad request
// and stop
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// prepend the path with the path to the static directory
path = filepath.Join(h.staticPath, path)
// check whether a file exists at the given path
_, err = os.Stat(path)
if os.IsNotExist(err) {
// file does not exist, serve index.html
http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
return
} else if err != nil {
// if we got an error (that wasn't that the file doesn't exist) stating the
// file, return a 500 internal server error and stop
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// otherwise, use http.FileServer to serve the static dir
http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
}
func main() {
router := mux.NewRouter()
router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
// an example API handler
json.NewEncoder(w).Encode(map[string]bool{"ok": true})
})
spa := spaHandler{staticPath: "build", indexPath: "index.html"}
router.PathPrefix("/").Handler(spa)
srv := &http.Server{
Handler: router,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
```
### Registered URLs
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
```go
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
```
To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
```go
url, err := r.Get("article").URL("category", "technology", "id", "42")
```
...and the result will be a `url.URL` with the following path:
```
"/articles/technology/42"
```
This also works for host and query value variables:
```go
r := mux.NewRouter()
r.Host("{subdomain}.example.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
"filter", "gorilla")
```
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
```go
r.HeadersRegexp("Content-Type", "application/(text|json)")
```
...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
```go
// "http://news.example.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
```
And if you use subrouters, host and path defined separately can be built as well:
```go
r := mux.NewRouter()
s := r.Host("{subdomain}.example.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.example.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
```
### Walking Routes
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
the following prints all of the registered routes:
```go
package main
import (
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
)
func handler(w http.ResponseWriter, r *http.Request) {
return
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.HandleFunc("/products", handler).Methods("POST")
r.HandleFunc("/articles", handler).Methods("GET")
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
pathTemplate, err := route.GetPathTemplate()
if err == nil {
fmt.Println("ROUTE:", pathTemplate)
}
pathRegexp, err := route.GetPathRegexp()
if err == nil {
fmt.Println("Path regexp:", pathRegexp)
}
queriesTemplates, err := route.GetQueriesTemplates()
if err == nil {
fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
}
queriesRegexps, err := route.GetQueriesRegexp()
if err == nil {
fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
}
methods, err := route.GetMethods()
if err == nil {
fmt.Println("Methods:", strings.Join(methods, ","))
}
fmt.Println()
return nil
})
if err != nil {
fmt.Println(err)
}
http.Handle("/", r)
}
```
### Graceful Shutdown
Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
```go
package main
import (
"context"
"flag"
"log"
"net/http"
"os"
"os/signal"
"time"
"github.com/gorilla/mux"
)
func main() {
var wait time.Duration
flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
flag.Parse()
r := mux.NewRouter()
// Add your routes as needed
srv := &http.Server{
Addr: "0.0.0.0:8080",
// Good practice to set timeouts to avoid Slowloris attacks.
WriteTimeout: time.Second * 15,
ReadTimeout: time.Second * 15,
IdleTimeout: time.Second * 60,
Handler: r, // Pass our instance of gorilla/mux in.
}
// Run our server in a goroutine so that it doesn't block.
go func() {
if err := srv.ListenAndServe(); err != nil {
log.Println(err)
}
}()
c := make(chan os.Signal, 1)
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
signal.Notify(c, os.Interrupt)
// Block until we receive our signal.
<-c
// Create a deadline to wait for.
ctx, cancel := context.WithTimeout(context.Background(), wait)
defer cancel()
// Doesn't block if no connections, but will otherwise wait
// until the timeout deadline.
srv.Shutdown(ctx)
// Optionally, you could run srv.Shutdown in a goroutine and block on
// <-ctx.Done() if your application should wait for other services
// to finalize based on context cancellation.
log.Println("shutting down")
os.Exit(0)
}
```
### Middleware
Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
Mux middlewares are defined using the de facto standard type:
```go
type MiddlewareFunc func(http.Handler) http.Handler
```
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
A very basic middleware which logs the URI of the request being handled could be written as:
```go
func loggingMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println(r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
```
Middlewares can be added to a router using `Router.Use()`:
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.Use(loggingMiddleware)
```
A more complex authentication middleware, which maps session token to users, could be written as:
```go
// Define our struct
type authenticationMiddleware struct {
tokenUsers map[string]string
}
// Initialize it somewhere
func (amw *authenticationMiddleware) Populate() {
amw.tokenUsers["00000000"] = "user0"
amw.tokenUsers["aaaaaaaa"] = "userA"
amw.tokenUsers["05f717e5"] = "randomUser"
amw.tokenUsers["deadbeef"] = "user0"
}
// Middleware function, which will be called for each request
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("X-Session-Token")
if user, found := amw.tokenUsers[token]; found {
// We found the token in our map
log.Printf("Authenticated user %s\n", user)
// Pass down the request to the next middleware (or final handler)
next.ServeHTTP(w, r)
} else {
// Write an error and stop the handler chain
http.Error(w, "Forbidden", http.StatusForbidden)
}
})
}
```
```go
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{}
amw.Populate()
r.Use(amw.Middleware)
```
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
### Handling CORS Requests
[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
* If you do not specify any methods, then:
> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
```go
package main
import (
"net/http"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
// IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
r.Use(mux.CORSMethodMiddleware(r))
http.ListenAndServe(":8080", r)
}
func fooHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
return
}
w.Write([]byte("foo"))
}
```
And an request to `/foo` using something like:
```bash
curl localhost:8080/foo -v
```
Would look like:
```bash
* Trying ::1...
* TCP_NODELAY set
* Connected to localhost (::1) port 8080 (#0)
> GET /foo HTTP/1.1
> Host: localhost:8080
> User-Agent: curl/7.59.0
> Accept: */*
>
< HTTP/1.1 200 OK
< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
< Access-Control-Allow-Origin: *
< Date: Fri, 28 Jun 2019 20:13:30 GMT
< Content-Length: 3
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
foo
```
### Testing Handlers
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
First, our simple HTTP handler:
```go
// endpoints.go
package main
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
// A very simple health check.
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// In the future we could report back on the status of our DB, or our cache
// (e.g. Redis) by performing a simple PING, and include them in the response.
io.WriteString(w, `{"alive": true}`)
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/health", HealthCheckHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
```
Our test code:
```go
// endpoints_test.go
package main
import (
"net/http"
"net/http/httptest"
"testing"
)
func TestHealthCheckHandler(t *testing.T) {
// Create a request to pass to our handler. We don't have any query parameters for now, so we'll
// pass 'nil' as the third parameter.
req, err := http.NewRequest("GET", "/health", nil)
if err != nil {
t.Fatal(err)
}
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
rr := httptest.NewRecorder()
handler := http.HandlerFunc(HealthCheckHandler)
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
// directly and pass in our Request and ResponseRecorder.
handler.ServeHTTP(rr, req)
// Check the status code is what we expect.
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
// Check the response body is what we expect.
expected := `{"alive": true}`
if rr.Body.String() != expected {
t.Errorf("handler returned unexpected body: got %v want %v",
rr.Body.String(), expected)
}
}
```
In the case that our routes have [variables](#examples), we can pass those in the request. We could write
[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
possible route variables as needed.
```go
// endpoints.go
func main() {
r := mux.NewRouter()
// A route with a route variable:
r.HandleFunc("/metrics/{type}", MetricsHandler)
log.Fatal(http.ListenAndServe("localhost:8080", r))
}
```
Our test file, with a table-driven test of `routeVariables`:
```go
// endpoints_test.go
func TestMetricsHandler(t *testing.T) {
tt := []struct{
routeVariable string
shouldPass bool
}{
{"goroutines", true},
{"heap", true},
{"counters", true},
{"queries", true},
{"adhadaeqm3k", false},
}
for _, tc := range tt {
path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
req, err := http.NewRequest("GET", path, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
// Need to create a router that we can pass the request through so that the vars will be added to the context
router := mux.NewRouter()
router.HandleFunc("/metrics/{type}", MetricsHandler)
router.ServeHTTP(rr, req)
// In this case, our MetricsHandler returns a non-200 response
// for a route variable it doesn't know about.
if rr.Code == http.StatusOK && !tc.shouldPass {
t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
tc.routeVariable, rr.Code, http.StatusOK)
}
}
}
```
## Full Example
Here's a complete, runnable example of a small `mux` based server:
```go
package main
import (
"net/http"
"log"
"github.com/gorilla/mux"
)
func YourHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Gorilla!\n"))
}
func main() {
r := mux.NewRouter()
// Routes consist of a path and a handler function.
r.HandleFunc("/", YourHandler)
// Bind to a port and pass our router in
log.Fatal(http.ListenAndServe(":8000", r))
}
```
## License
BSD licensed. See the LICENSE file for details.

306
vendor/github.com/gorilla/mux/doc.go generated vendored Normal file
View File

@ -0,0 +1,306 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package mux implements a request router and dispatcher.
The name mux stands for "HTTP request multiplexer". Like the standard
http.ServeMux, mux.Router matches incoming requests against a list of
registered routes and calls a handler for the route that matches the URL
or other conditions. The main features are:
* Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
* URL hosts, paths and query values can have variables with an optional
regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
* Routes can be used as subrouters: nested routes are only tested if the
parent route matches. This is useful to define groups of routes that
share common conditions like a host, a path prefix or other repeated
attributes. As a bonus, this optimizes request matching.
* It implements the http.Handler interface so it is compatible with the
standard http.ServeMux.
Let's start registering a couple of URL paths and handlers:
func main() {
r := mux.NewRouter()
r.HandleFunc("/", HomeHandler)
r.HandleFunc("/products", ProductsHandler)
r.HandleFunc("/articles", ArticlesHandler)
http.Handle("/", r)
}
Here we register three routes mapping URL paths to handlers. This is
equivalent to how http.HandleFunc() works: if an incoming request URL matches
one of the paths, the corresponding handler is called passing
(http.ResponseWriter, *http.Request) as parameters.
Paths can have variables. They are defined using the format {name} or
{name:pattern}. If a regular expression pattern is not defined, the matched
variable will be anything until the next slash. For example:
r := mux.NewRouter()
r.HandleFunc("/products/{key}", ProductHandler)
r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
The names are used to create a map of route variables which can be retrieved
calling mux.Vars():
vars := mux.Vars(request)
category := vars["category"]
Note that if any capturing groups are present, mux will panic() during parsing. To prevent
this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
when capturing groups were present.
And this is all you need to know about the basic usage. More advanced options
are explained below.
Routes can also be restricted to a domain or subdomain. Just define a host
pattern to be matched. They can also have variables:
r := mux.NewRouter()
// Only matches if domain is "www.example.com".
r.Host("www.example.com")
// Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com")
There are several other matchers that can be added. To match path prefixes:
r.PathPrefix("/products/")
...or HTTP methods:
r.Methods("GET", "POST")
...or URL schemes:
r.Schemes("https")
...or header values:
r.Headers("X-Requested-With", "XMLHttpRequest")
...or query values:
r.Queries("key", "value")
...or to use a custom matcher function:
r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
return r.ProtoMajor == 0
})
...and finally, it is possible to combine several matchers in a single route:
r.HandleFunc("/products", ProductsHandler).
Host("www.example.com").
Methods("GET").
Schemes("http")
Setting the same matching conditions again and again can be boring, so we have
a way to group several routes that share the same requirements.
We call it "subrouting".
For example, let's say we have several URLs that should only match when the
host is "www.example.com". Create a route for that host and get a "subrouter"
from it:
r := mux.NewRouter()
s := r.Host("www.example.com").Subrouter()
Then register routes in the subrouter:
s.HandleFunc("/products/", ProductsHandler)
s.HandleFunc("/products/{key}", ProductHandler)
s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
The three URL paths we registered above will only be tested if the domain is
"www.example.com", because the subrouter is tested first. This is not
only convenient, but also optimizes request matching. You can create
subrouters combining any attribute matchers accepted by a route.
Subrouters can be used to create domain or path "namespaces": you define
subrouters in a central place and then parts of the app can register its
paths relatively to a given subrouter.
There's one more thing about subroutes. When a subrouter has a path prefix,
the inner routes use it as base for their paths:
r := mux.NewRouter()
s := r.PathPrefix("/products").Subrouter()
// "/products/"
s.HandleFunc("/", ProductsHandler)
// "/products/{key}/"
s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
Note that the path provided to PathPrefix() represents a "wildcard": calling
PathPrefix("/static/").Handler(...) means that the handler will be passed any
request that matches "/static/*". This makes it easy to serve static files with mux:
func main() {
var dir string
flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
r := mux.NewRouter()
// This will serve files under http://localhost:8000/static/<filename>
r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
srv := &http.Server{
Handler: r,
Addr: "127.0.0.1:8000",
// Good practice: enforce timeouts for servers you create!
WriteTimeout: 15 * time.Second,
ReadTimeout: 15 * time.Second,
}
log.Fatal(srv.ListenAndServe())
}
Now let's see how to build registered URLs.
Routes can be named. All routes that define a name can have their URLs built,
or "reversed". We define a name calling Name() on a route. For example:
r := mux.NewRouter()
r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
Name("article")
To build a URL, get the route and call the URL() method, passing a sequence of
key/value pairs for the route variables. For the previous route, we would do:
url, err := r.Get("article").URL("category", "technology", "id", "42")
...and the result will be a url.URL with the following path:
"/articles/technology/42"
This also works for host and query value variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42",
"filter", "gorilla")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
generated URL will always match a registered route -- the only exception is
for explicitly defined "build-only" routes which never match.
Regex support also exists for matching Headers within a route. For example, we could do:
r.HeadersRegexp("Content-Type", "application/(text|json)")
...and the route will match both requests with a Content-Type of `application/json` as well as
`application/text`
There's also a way to build only the URL host or path for a route:
use the methods URLHost() or URLPath() instead. For the previous route,
we would do:
// "http://news.domain.com/"
host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42"
path, err := r.Get("article").URLPath("category", "technology", "id", "42")
And if you use subrouters, host and path defined separately can be built
as well:
r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler).
Name("article")
// "http://news.domain.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
"id", "42")
Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
type MiddlewareFunc func(http.Handler) http.Handler
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
A very basic middleware which logs the URI of the request being handled could be written as:
func simpleMw(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Do stuff here
log.Println(r.RequestURI)
// Call the next handler, which can be another middleware in the chain, or the final handler.
next.ServeHTTP(w, r)
})
}
Middlewares can be added to a router using `Router.Use()`:
r := mux.NewRouter()
r.HandleFunc("/", handler)
r.Use(simpleMw)
A more complex authentication middleware, which maps session token to users, could be written as:
// Define our struct
type authenticationMiddleware struct {
tokenUsers map[string]string
}
// Initialize it somewhere
func (amw *authenticationMiddleware) Populate() {
amw.tokenUsers["00000000"] = "user0"
amw.tokenUsers["aaaaaaaa"] = "userA"
amw.tokenUsers["05f717e5"] = "randomUser"
amw.tokenUsers["deadbeef"] = "user0"
}
// Middleware function, which will be called for each request
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
token := r.Header.Get("X-Session-Token")
if user, found := amw.tokenUsers[token]; found {
// We found the token in our map
log.Printf("Authenticated user %s\n", user)
next.ServeHTTP(w, r)
} else {
http.Error(w, "Forbidden", http.StatusForbidden)
}
})
}
r := mux.NewRouter()
r.HandleFunc("/", handler)
amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
amw.Populate()
r.Use(amw.Middleware)
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
*/
package mux

74
vendor/github.com/gorilla/mux/middleware.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package mux
import (
"net/http"
"strings"
)
// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
type MiddlewareFunc func(http.Handler) http.Handler
// middleware interface is anything which implements a MiddlewareFunc named Middleware.
type middleware interface {
Middleware(handler http.Handler) http.Handler
}
// Middleware allows MiddlewareFunc to implement the middleware interface.
func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
return mw(handler)
}
// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
func (r *Router) Use(mwf ...MiddlewareFunc) {
for _, fn := range mwf {
r.middlewares = append(r.middlewares, fn)
}
}
// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
func (r *Router) useInterface(mw middleware) {
r.middlewares = append(r.middlewares, mw)
}
// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
// on requests for routes that have an OPTIONS method matcher to all the method matchers on
// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
// by the middleware. See examples for usage.
func CORSMethodMiddleware(r *Router) MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
allMethods, err := getAllMethodsForRoute(r, req)
if err == nil {
for _, v := range allMethods {
if v == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
}
}
}
next.ServeHTTP(w, req)
})
}
}
// getAllMethodsForRoute returns all the methods from method matchers matching a given
// request.
func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
var allMethods []string
for _, route := range r.routes {
var match RouteMatch
if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
methods, err := route.GetMethods()
if err != nil {
return nil, err
}
allMethods = append(allMethods, methods...)
}
}
return allMethods, nil
}

607
vendor/github.com/gorilla/mux/mux.go generated vendored Normal file
View File

@ -0,0 +1,607 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"context"
"errors"
"fmt"
"net/http"
"path"
"regexp"
)
var (
// ErrMethodMismatch is returned when the method in the request does not match
// the method defined against the route.
ErrMethodMismatch = errors.New("method is not allowed")
// ErrNotFound is returned when no route match is found.
ErrNotFound = errors.New("no matching route was found")
)
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route)}
}
// Router registers routes to be matched and dispatches a handler.
//
// It implements the http.Handler interface, so it can be registered to serve
// requests:
//
// var router = mux.NewRouter()
//
// func main() {
// http.Handle("/", router)
// }
//
// Or, for Google App Engine, register it in a init() function:
//
// func init() {
// http.Handle("/", router)
// }
//
// This will send all incoming requests to the router.
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
// Configurable Handler to be used when the request method does not match the route.
MethodNotAllowedHandler http.Handler
// Routes to be matched, in order.
routes []*Route
// Routes by name for URL building.
namedRoutes map[string]*Route
// If true, do not clear the request context after handling the request.
//
// Deprecated: No effect, since the context is stored on the request itself.
KeepContext bool
// Slice of middlewares to be called after a match is found
middlewares []middleware
// configuration shared with `Route`
routeConf
}
// common route configuration shared between `Router` and `Route`
type routeConf struct {
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
useEncodedPath bool
// If true, when the path pattern is "/path/", accessing "/path" will
// redirect to the former and vice versa.
strictSlash bool
// If true, when the path pattern is "/path//to", accessing "/path//to"
// will not redirect
skipClean bool
// Manager for the variables from host and path.
regexp routeRegexpGroup
// List of matchers.
matchers []matcher
// The scheme used when building URLs.
buildScheme string
buildVarsFunc BuildVarsFunc
}
// returns an effective deep copy of `routeConf`
func copyRouteConf(r routeConf) routeConf {
c := r
if r.regexp.path != nil {
c.regexp.path = copyRouteRegexp(r.regexp.path)
}
if r.regexp.host != nil {
c.regexp.host = copyRouteRegexp(r.regexp.host)
}
c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
for _, q := range r.regexp.queries {
c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
}
c.matchers = make([]matcher, len(r.matchers))
copy(c.matchers, r.matchers)
return c
}
func copyRouteRegexp(r *routeRegexp) *routeRegexp {
c := *r
return &c
}
// Match attempts to match the given request against the router's registered routes.
//
// If the request matches a route of this router or one of its subrouters the Route,
// Handler, and Vars fields of the the match argument are filled and this function
// returns true.
//
// If the request does not match any of this router's or its subrouters' routes
// then this function returns false. If available, a reason for the match failure
// will be filled in the match argument's MatchErr field. If the match failure type
// (eg: not found) has a registered handler, the handler is assigned to the Handler
// field of the match argument.
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
for _, route := range r.routes {
if route.Match(req, match) {
// Build middleware chain if no error was found
if match.MatchErr == nil {
for i := len(r.middlewares) - 1; i >= 0; i-- {
match.Handler = r.middlewares[i].Middleware(match.Handler)
}
}
return true
}
}
if match.MatchErr == ErrMethodMismatch {
if r.MethodNotAllowedHandler != nil {
match.Handler = r.MethodNotAllowedHandler
return true
}
return false
}
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
match.MatchErr = ErrNotFound
return true
}
match.MatchErr = ErrNotFound
return false
}
// ServeHTTP dispatches the handler registered in the matched route.
//
// When there is a match, the route variables can be retrieved calling
// mux.Vars(request).
func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if !r.skipClean {
path := req.URL.Path
if r.useEncodedPath {
path = req.URL.EscapedPath()
}
// Clean path to canonical form and redirect.
if p := cleanPath(path); p != path {
// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
// This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue:
// http://code.google.com/p/go/issues/detail?id=5252
url := *req.URL
url.Path = p
p = url.String()
w.Header().Set("Location", p)
w.WriteHeader(http.StatusMovedPermanently)
return
}
}
var match RouteMatch
var handler http.Handler
if r.Match(req, &match) {
handler = match.Handler
req = requestWithVars(req, match.Vars)
req = requestWithRoute(req, match.Route)
}
if handler == nil && match.MatchErr == ErrMethodMismatch {
handler = methodNotAllowedHandler()
}
if handler == nil {
handler = http.NotFoundHandler()
}
handler.ServeHTTP(w, req)
}
// Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route {
return r.namedRoutes[name]
}
// GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route {
return r.namedRoutes[name]
}
// StrictSlash defines the trailing slash behavior for new routes. The initial
// value is false.
//
// When true, if the route path is "/path/", accessing "/path" will perform a redirect
// to the former and vice versa. In other words, your application will always
// see the path as specified in the route.
//
// When false, if the route path is "/path", accessing "/path/" will not match
// this route and vice versa.
//
// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
// request will be made as a GET by most clients. Use middleware or client settings
// to modify this behaviour as needed.
//
// Special case: when a route sets a path prefix using the PathPrefix() method,
// strict slash is ignored for that route because the redirect behavior can't
// be determined from a prefix alone. However, any subrouters created from that
// route inherit the original StrictSlash setting.
func (r *Router) StrictSlash(value bool) *Router {
r.strictSlash = value
return r
}
// SkipClean defines the path cleaning behaviour for new routes. The initial
// value is false. Users should be careful about which routes are not cleaned
//
// When true, if the route path is "/path//to", it will remain with the double
// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
//
// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
// become /fetch/http/xkcd.com/534
func (r *Router) SkipClean(value bool) *Router {
r.skipClean = value
return r
}
// UseEncodedPath tells the router to match the encoded original path
// to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
//
// If not called, the router will match the unencoded path to the routes.
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
func (r *Router) UseEncodedPath() *Router {
r.useEncodedPath = true
return r
}
// ----------------------------------------------------------------------------
// Route factories
// ----------------------------------------------------------------------------
// NewRoute registers an empty route.
func (r *Router) NewRoute() *Route {
// initialize a route with a copy of the parent router's configuration
route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.routes = append(r.routes, route)
return route
}
// Name registers a new route with a name.
// See Route.Name().
func (r *Router) Name(name string) *Route {
return r.NewRoute().Name(name)
}
// Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route {
return r.NewRoute().Path(path).Handler(handler)
}
// HandleFunc registers a new route with a matcher for the URL path.
// See Route.Path() and Route.HandlerFunc().
func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
*http.Request)) *Route {
return r.NewRoute().Path(path).HandlerFunc(f)
}
// Headers registers a new route with a matcher for request header values.
// See Route.Headers().
func (r *Router) Headers(pairs ...string) *Route {
return r.NewRoute().Headers(pairs...)
}
// Host registers a new route with a matcher for the URL host.
// See Route.Host().
func (r *Router) Host(tpl string) *Route {
return r.NewRoute().Host(tpl)
}
// MatcherFunc registers a new route with a custom matcher function.
// See Route.MatcherFunc().
func (r *Router) MatcherFunc(f MatcherFunc) *Route {
return r.NewRoute().MatcherFunc(f)
}
// Methods registers a new route with a matcher for HTTP methods.
// See Route.Methods().
func (r *Router) Methods(methods ...string) *Route {
return r.NewRoute().Methods(methods...)
}
// Path registers a new route with a matcher for the URL path.
// See Route.Path().
func (r *Router) Path(tpl string) *Route {
return r.NewRoute().Path(tpl)
}
// PathPrefix registers a new route with a matcher for the URL path prefix.
// See Route.PathPrefix().
func (r *Router) PathPrefix(tpl string) *Route {
return r.NewRoute().PathPrefix(tpl)
}
// Queries registers a new route with a matcher for URL query values.
// See Route.Queries().
func (r *Router) Queries(pairs ...string) *Route {
return r.NewRoute().Queries(pairs...)
}
// Schemes registers a new route with a matcher for URL schemes.
// See Route.Schemes().
func (r *Router) Schemes(schemes ...string) *Route {
return r.NewRoute().Schemes(schemes...)
}
// BuildVarsFunc registers a new route with a custom function for modifying
// route variables before building a URL.
func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
return r.NewRoute().BuildVarsFunc(f)
}
// Walk walks the router and all its sub-routers, calling walkFn for each route
// in the tree. The routes are walked in the order they were added. Sub-routers
// are explored depth-first.
func (r *Router) Walk(walkFn WalkFunc) error {
return r.walk(walkFn, []*Route{})
}
// SkipRouter is used as a return value from WalkFuncs to indicate that the
// router that walk is about to descend down to should be skipped.
var SkipRouter = errors.New("skip this router")
// WalkFunc is the type of the function called for each route visited by Walk.
// At every invocation, it is given the current route, and the current router,
// and a list of ancestor routes that lead to the current route.
type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
for _, t := range r.routes {
err := walkFn(t, r, ancestors)
if err == SkipRouter {
continue
}
if err != nil {
return err
}
for _, sr := range t.matchers {
if h, ok := sr.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
if h, ok := t.handler.(*Router); ok {
ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
ancestors = ancestors[:len(ancestors)-1]
}
}
return nil
}
// ----------------------------------------------------------------------------
// Context
// ----------------------------------------------------------------------------
// RouteMatch stores information about a matched route.
type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
// MatchErr is set to appropriate matching error
// It is set to ErrMethodMismatch if there is a mismatch in
// the request method and route method
MatchErr error
}
type contextKey int
const (
varsKey contextKey = iota
routeKey
)
// Vars returns the route variables for the current request, if any.
func Vars(r *http.Request) map[string]string {
if rv := r.Context().Value(varsKey); rv != nil {
return rv.(map[string]string)
}
return nil
}
// CurrentRoute returns the matched route for the current request, if any.
// This only works when called inside the handler of the matched route
// because the matched route is stored in the request context which is cleared
// after the handler returns, unless the KeepContext option is set on the
// Router.
func CurrentRoute(r *http.Request) *Route {
if rv := r.Context().Value(routeKey); rv != nil {
return rv.(*Route)
}
return nil
}
func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
ctx := context.WithValue(r.Context(), varsKey, vars)
return r.WithContext(ctx)
}
func requestWithRoute(r *http.Request, route *Route) *http.Request {
ctx := context.WithValue(r.Context(), routeKey, route)
return r.WithContext(ctx)
}
// ----------------------------------------------------------------------------
// Helpers
// ----------------------------------------------------------------------------
// cleanPath returns the canonical path for p, eliminating . and .. elements.
// Borrowed from the net/http package.
func cleanPath(p string) string {
if p == "" {
return "/"
}
if p[0] != '/' {
p = "/" + p
}
np := path.Clean(p)
// path.Clean removes trailing slash except for root;
// put the trailing slash back if necessary.
if p[len(p)-1] == '/' && np != "/" {
np += "/"
}
return np
}
// uniqueVars returns an error if two slices contain duplicated strings.
func uniqueVars(s1, s2 []string) error {
for _, v1 := range s1 {
for _, v2 := range s2 {
if v1 == v2 {
return fmt.Errorf("mux: duplicated route variable %q", v2)
}
}
}
return nil
}
// checkPairs returns the count of strings passed in, and an error if
// the count is not an even number.
func checkPairs(pairs ...string) (int, error) {
length := len(pairs)
if length%2 != 0 {
return length, fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
}
return length, nil
}
// mapFromPairsToString converts variadic string parameters to a
// string to string map.
func mapFromPairsToString(pairs ...string) (map[string]string, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]string, length/2)
for i := 0; i < length; i += 2 {
m[pairs[i]] = pairs[i+1]
}
return m, nil
}
// mapFromPairsToRegex converts variadic string parameters to a
// string to regex map.
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
length, err := checkPairs(pairs...)
if err != nil {
return nil, err
}
m := make(map[string]*regexp.Regexp, length/2)
for i := 0; i < length; i += 2 {
regex, err := regexp.Compile(pairs[i+1])
if err != nil {
return nil, err
}
m[pairs[i]] = regex
}
return m, nil
}
// matchInArray returns true if the given string value is in the array.
func matchInArray(arr []string, value string) bool {
for _, v := range arr {
if v == value {
return true
}
}
return false
}
// matchMapWithString returns true if the given key/value pairs exist in a given map.
func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != "" {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v == value {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
// the given regex
func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
for k, v := range toCheck {
// Check if key exists.
if canonicalKey {
k = http.CanonicalHeaderKey(k)
}
if values := toMatch[k]; values == nil {
return false
} else if v != nil {
// If value was defined as an empty string we only check that the
// key exists. Otherwise we also check for equality.
valueExists := false
for _, value := range values {
if v.MatchString(value) {
valueExists = true
break
}
}
if !valueExists {
return false
}
}
}
return true
}
// methodNotAllowed replies to the request with an HTTP status code 405.
func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
}
// methodNotAllowedHandler returns a simple request handler
// that replies to each request with a status code 405.
func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }

382
vendor/github.com/gorilla/mux/regexp.go generated vendored Normal file
View File

@ -0,0 +1,382 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"bytes"
"fmt"
"net/http"
"net/url"
"regexp"
"strconv"
"strings"
)
type routeRegexpOptions struct {
strictSlash bool
useEncodedPath bool
}
type regexpType int
const (
regexpTypePath regexpType = 0
regexpTypeHost regexpType = 1
regexpTypePrefix regexpType = 2
regexpTypeQuery regexpType = 3
)
// newRouteRegexp parses a route template and returns a routeRegexp,
// used to match a host, a path or a query string.
//
// It will extract named variables, assemble a regexp to be matched, create
// a "reverse" template to build URLs and compile regexps to validate variable
// values used in URL building.
//
// Previously we accepted only Python-like identifiers for variable
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
// name and pattern can't be empty, and names can't contain a colon.
func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
// Check if it is well-formed.
idxs, errBraces := braceIndices(tpl)
if errBraces != nil {
return nil, errBraces
}
// Backup the original.
template := tpl
// Now let's parse it.
defaultPattern := "[^/]+"
if typ == regexpTypeQuery {
defaultPattern = ".*"
} else if typ == regexpTypeHost {
defaultPattern = "[^.]+"
}
// Only match strict slash if not matching
if typ != regexpTypePath {
options.strictSlash = false
}
// Set a flag for strictSlash.
endSlash := false
if options.strictSlash && strings.HasSuffix(tpl, "/") {
tpl = tpl[:len(tpl)-1]
endSlash = true
}
varsN := make([]string, len(idxs)/2)
varsR := make([]*regexp.Regexp, len(idxs)/2)
pattern := bytes.NewBufferString("")
pattern.WriteByte('^')
reverse := bytes.NewBufferString("")
var end int
var err error
for i := 0; i < len(idxs); i += 2 {
// Set all values we are interested in.
raw := tpl[end:idxs[i]]
end = idxs[i+1]
parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
name := parts[0]
patt := defaultPattern
if len(parts) == 2 {
patt = parts[1]
}
// Name or pattern can't be empty.
if name == "" || patt == "" {
return nil, fmt.Errorf("mux: missing name or pattern in %q",
tpl[idxs[i]:end])
}
// Build the regexp pattern.
fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
// Build the reverse template.
fmt.Fprintf(reverse, "%s%%s", raw)
// Append variable name and compiled pattern.
varsN[i/2] = name
varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
if err != nil {
return nil, err
}
}
// Add the remaining.
raw := tpl[end:]
pattern.WriteString(regexp.QuoteMeta(raw))
if options.strictSlash {
pattern.WriteString("[/]?")
}
if typ == regexpTypeQuery {
// Add the default pattern if the query value is empty
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
pattern.WriteString(defaultPattern)
}
}
if typ != regexpTypePrefix {
pattern.WriteByte('$')
}
var wildcardHostPort bool
if typ == regexpTypeHost {
if !strings.Contains(pattern.String(), ":") {
wildcardHostPort = true
}
}
reverse.WriteString(raw)
if endSlash {
reverse.WriteByte('/')
}
// Compile full regexp.
reg, errCompile := regexp.Compile(pattern.String())
if errCompile != nil {
return nil, errCompile
}
// Check for capturing groups which used to work in older versions
if reg.NumSubexp() != len(idxs)/2 {
panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
}
// Done!
return &routeRegexp{
template: template,
regexpType: typ,
options: options,
regexp: reg,
reverse: reverse.String(),
varsN: varsN,
varsR: varsR,
wildcardHostPort: wildcardHostPort,
}, nil
}
// routeRegexp stores a regexp to match a host or path and information to
// collect and validate route variables.
type routeRegexp struct {
// The unmodified template.
template string
// The type of match
regexpType regexpType
// Options for matching
options routeRegexpOptions
// Expanded regexp.
regexp *regexp.Regexp
// Reverse template.
reverse string
// Variable names.
varsN []string
// Variable regexps (validators).
varsR []*regexp.Regexp
// Wildcard host-port (no strict port match in hostname)
wildcardHostPort bool
}
// Match matches the regexp against the URL host or path.
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
if r.regexpType == regexpTypeHost {
host := getHost(req)
if r.wildcardHostPort {
// Don't be strict on the port match
if i := strings.Index(host, ":"); i != -1 {
host = host[:i]
}
}
return r.regexp.MatchString(host)
}
if r.regexpType == regexpTypeQuery {
return r.matchQueryString(req)
}
path := req.URL.Path
if r.options.useEncodedPath {
path = req.URL.EscapedPath()
}
return r.regexp.MatchString(path)
}
// url builds a URL part using the given values.
func (r *routeRegexp) url(values map[string]string) (string, error) {
urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
for k, v := range r.varsN {
value, ok := values[v]
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
if r.regexpType == regexpTypeQuery {
value = url.QueryEscape(value)
}
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
if !r.regexp.MatchString(rv) {
// The URL is checked against the full regexp, instead of checking
// individual variables. This is faster but to provide a good error
// message, we check individual regexps if the URL doesn't match.
for k, v := range r.varsN {
if !r.varsR[k].MatchString(values[v]) {
return "", fmt.Errorf(
"mux: variable %q doesn't match, expected %q", values[v],
r.varsR[k].String())
}
}
}
return rv, nil
}
// getURLQuery returns a single query parameter from a request URL.
// For a URL with foo=bar&baz=ding, we return only the relevant key
// value pair for the routeRegexp.
func (r *routeRegexp) getURLQuery(req *http.Request) string {
if r.regexpType != regexpTypeQuery {
return ""
}
templateKey := strings.SplitN(r.template, "=", 2)[0]
val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
if ok {
return templateKey + "=" + val
}
return ""
}
// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
// If key was not found, empty string and false is returned.
func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
query := []byte(rawQuery)
for len(query) > 0 {
foundKey := query
if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
foundKey, query = foundKey[:i], foundKey[i+1:]
} else {
query = query[:0]
}
if len(foundKey) == 0 {
continue
}
var value []byte
if i := bytes.IndexByte(foundKey, '='); i >= 0 {
foundKey, value = foundKey[:i], foundKey[i+1:]
}
if len(foundKey) < len(key) {
// Cannot possibly be key.
continue
}
keyString, err := url.QueryUnescape(string(foundKey))
if err != nil {
continue
}
if keyString != key {
continue
}
valueString, err := url.QueryUnescape(string(value))
if err != nil {
continue
}
return valueString, true
}
return "", false
}
func (r *routeRegexp) matchQueryString(req *http.Request) bool {
return r.regexp.MatchString(r.getURLQuery(req))
}
// braceIndices returns the first level curly brace indices from a string.
// It returns an error in case of unbalanced braces.
func braceIndices(s string) ([]int, error) {
var level, idx int
var idxs []int
for i := 0; i < len(s); i++ {
switch s[i] {
case '{':
if level++; level == 1 {
idx = i
}
case '}':
if level--; level == 0 {
idxs = append(idxs, idx, i+1)
} else if level < 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
}
}
if level != 0 {
return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
}
return idxs, nil
}
// varGroupName builds a capturing group name for the indexed variable.
func varGroupName(idx int) string {
return "v" + strconv.Itoa(idx)
}
// ----------------------------------------------------------------------------
// routeRegexpGroup
// ----------------------------------------------------------------------------
// routeRegexpGroup groups the route matchers that carry variables.
type routeRegexpGroup struct {
host *routeRegexp
path *routeRegexp
queries []*routeRegexp
}
// setMatch extracts the variables from the URL once a route matches.
func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
// Store host variables.
if v.host != nil {
host := getHost(req)
matches := v.host.regexp.FindStringSubmatchIndex(host)
if len(matches) > 0 {
extractVars(host, matches, v.host.varsN, m.Vars)
}
}
path := req.URL.Path
if r.useEncodedPath {
path = req.URL.EscapedPath()
}
// Store path variables.
if v.path != nil {
matches := v.path.regexp.FindStringSubmatchIndex(path)
if len(matches) > 0 {
extractVars(path, matches, v.path.varsN, m.Vars)
// Check if we should redirect.
if v.path.options.strictSlash {
p1 := strings.HasSuffix(path, "/")
p2 := strings.HasSuffix(v.path.template, "/")
if p1 != p2 {
u, _ := url.Parse(req.URL.String())
if p1 {
u.Path = u.Path[:len(u.Path)-1]
} else {
u.Path += "/"
}
m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
}
}
}
}
// Store query string variables.
for _, q := range v.queries {
queryURL := q.getURLQuery(req)
matches := q.regexp.FindStringSubmatchIndex(queryURL)
if len(matches) > 0 {
extractVars(queryURL, matches, q.varsN, m.Vars)
}
}
}
// getHost tries its best to return the request host.
// According to section 14.23 of RFC 2616 the Host header
// can include the port number if the default value of 80 is not used.
func getHost(r *http.Request) string {
if r.URL.IsAbs() {
return r.URL.Host
}
return r.Host
}
func extractVars(input string, matches []int, names []string, output map[string]string) {
for i, name := range names {
output[name] = input[matches[2*i+2]:matches[2*i+3]]
}
}

736
vendor/github.com/gorilla/mux/route.go generated vendored Normal file
View File

@ -0,0 +1,736 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import (
"errors"
"fmt"
"net/http"
"net/url"
"regexp"
"strings"
)
// Route stores information to match a request and build URLs.
type Route struct {
// Request handler for the route.
handler http.Handler
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
name string
// Error resulted from building a route.
err error
// "global" reference to all named routes
namedRoutes map[string]*Route
// config possibly passed in from `Router`
routeConf
}
// SkipClean reports whether path cleaning is enabled for this route via
// Router.SkipClean.
func (r *Route) SkipClean() bool {
return r.skipClean
}
// Match matches the route against the request.
func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
var matchErr error
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
if _, ok := m.(methodMatcher); ok {
matchErr = ErrMethodMismatch
continue
}
// Ignore ErrNotFound errors. These errors arise from match call
// to Subrouters.
//
// This prevents subsequent matching subrouters from failing to
// run middleware. If not ignored, the middleware would see a
// non-nil MatchErr and be skipped, even when there was a
// matching route.
if match.MatchErr == ErrNotFound {
match.MatchErr = nil
}
matchErr = nil
return false
}
}
if matchErr != nil {
match.MatchErr = matchErr
return false
}
if match.MatchErr == ErrMethodMismatch && r.handler != nil {
// We found a route which matches request method, clear MatchErr
match.MatchErr = nil
// Then override the mis-matched handler
match.Handler = r.handler
}
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
}
if match.Handler == nil {
match.Handler = r.handler
}
if match.Vars == nil {
match.Vars = make(map[string]string)
}
// Set variables.
r.regexp.setMatch(req, match, r)
return true
}
// ----------------------------------------------------------------------------
// Route attributes
// ----------------------------------------------------------------------------
// GetError returns an error resulted from building the route, if any.
func (r *Route) GetError() error {
return r.err
}
// BuildOnly sets the route to never match: it is only used to build URLs.
func (r *Route) BuildOnly() *Route {
r.buildOnly = true
return r
}
// Handler --------------------------------------------------------------------
// Handler sets a handler for the route.
func (r *Route) Handler(handler http.Handler) *Route {
if r.err == nil {
r.handler = handler
}
return r
}
// HandlerFunc sets a handler function for the route.
func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
return r.Handler(http.HandlerFunc(f))
}
// GetHandler returns the handler for the route, if any.
func (r *Route) GetHandler() http.Handler {
return r.handler
}
// Name -----------------------------------------------------------------------
// Name sets the name for the route, used to build URLs.
// It is an error to call Name more than once on a route.
func (r *Route) Name(name string) *Route {
if r.name != "" {
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
r.name, name)
}
if r.err == nil {
r.name = name
r.namedRoutes[name] = r
}
return r
}
// GetName returns the name for the route, if any.
func (r *Route) GetName() string {
return r.name
}
// ----------------------------------------------------------------------------
// Matchers
// ----------------------------------------------------------------------------
// matcher types try to match a request.
type matcher interface {
Match(*http.Request, *RouteMatch) bool
}
// addMatcher adds a matcher to the route.
func (r *Route) addMatcher(m matcher) *Route {
if r.err == nil {
r.matchers = append(r.matchers, m)
}
return r
}
// addRegexpMatcher adds a host or path matcher and builder to a route.
func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
if r.err != nil {
return r.err
}
if typ == regexpTypePath || typ == regexpTypePrefix {
if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
if r.regexp.path != nil {
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
}
}
rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
strictSlash: r.strictSlash,
useEncodedPath: r.useEncodedPath,
})
if err != nil {
return err
}
for _, q := range r.regexp.queries {
if err = uniqueVars(rr.varsN, q.varsN); err != nil {
return err
}
}
if typ == regexpTypeHost {
if r.regexp.path != nil {
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
return err
}
}
r.regexp.host = rr
} else {
if r.regexp.host != nil {
if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
return err
}
}
if typ == regexpTypeQuery {
r.regexp.queries = append(r.regexp.queries, rr)
} else {
r.regexp.path = rr
}
}
r.addMatcher(rr)
return nil
}
// Headers --------------------------------------------------------------------
// headerMatcher matches the request against header values.
type headerMatcher map[string]string
func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithString(m, r.Header, true)
}
// Headers adds a matcher for request header values.
// It accepts a sequence of key/value pairs to be matched. For example:
//
// r := mux.NewRouter()
// r.Headers("Content-Type", "application/json",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both request header values match.
// If the value is an empty string, it will match any value if the key is set.
func (r *Route) Headers(pairs ...string) *Route {
if r.err == nil {
var headers map[string]string
headers, r.err = mapFromPairsToString(pairs...)
return r.addMatcher(headerMatcher(headers))
}
return r
}
// headerRegexMatcher matches the request against the route given a regex for the header
type headerRegexMatcher map[string]*regexp.Regexp
func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchMapWithRegex(m, r.Header, true)
}
// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
// support. For example:
//
// r := mux.NewRouter()
// r.HeadersRegexp("Content-Type", "application/(text|json)",
// "X-Requested-With", "XMLHttpRequest")
//
// The above route will only match if both the request header matches both regular expressions.
// If the value is an empty string, it will match any value if the key is set.
// Use the start and end of string anchors (^ and $) to match an exact value.
func (r *Route) HeadersRegexp(pairs ...string) *Route {
if r.err == nil {
var headers map[string]*regexp.Regexp
headers, r.err = mapFromPairsToRegex(pairs...)
return r.addMatcher(headerRegexMatcher(headers))
}
return r
}
// Host -----------------------------------------------------------------------
// Host adds a matcher for the URL host.
// It accepts a template with zero or more URL variables enclosed by {}.
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next dot.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Host("www.example.com")
// r.Host("{subdomain}.domain.com")
// r.Host("{subdomain:[a-z]+}.domain.com")
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Host(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
return r
}
// MatcherFunc ----------------------------------------------------------------
// MatcherFunc is the function signature used by custom matchers.
type MatcherFunc func(*http.Request, *RouteMatch) bool
// Match returns the match for a given request.
func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
return m(r, match)
}
// MatcherFunc adds a custom function to be used as request matcher.
func (r *Route) MatcherFunc(f MatcherFunc) *Route {
return r.addMatcher(f)
}
// Methods --------------------------------------------------------------------
// methodMatcher matches the request against HTTP methods.
type methodMatcher []string
func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
return matchInArray(m, r.Method)
}
// Methods adds a matcher for HTTP methods.
// It accepts a sequence of one or more methods to be matched, e.g.:
// "GET", "POST", "PUT".
func (r *Route) Methods(methods ...string) *Route {
for k, v := range methods {
methods[k] = strings.ToUpper(v)
}
return r.addMatcher(methodMatcher(methods))
}
// Path -----------------------------------------------------------------------
// Path adds a matcher for the URL path.
// It accepts a template with zero or more URL variables enclosed by {}. The
// template must start with a "/".
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
//
// For example:
//
// r := mux.NewRouter()
// r.Path("/products/").Handler(ProductsHandler)
// r.Path("/products/{key}").Handler(ProductsHandler)
// r.Path("/articles/{category}/{id:[0-9]+}").
// Handler(ArticleHandler)
//
// Variable names must be unique in a given route. They can be retrieved
// calling mux.Vars(request).
func (r *Route) Path(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypePath)
return r
}
// PathPrefix -----------------------------------------------------------------
// PathPrefix adds a matcher for the URL path prefix. This matches if the given
// template is a prefix of the full URL path. See Route.Path() for details on
// the tpl argument.
//
// Note that it does not treat slashes specially ("/foobar/" will be matched by
// the prefix "/foo") so you may want to use a trailing slash here.
//
// Also note that the setting of Router.StrictSlash() has no effect on routes
// with a PathPrefix matcher.
func (r *Route) PathPrefix(tpl string) *Route {
r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
return r
}
// Query ----------------------------------------------------------------------
// Queries adds a matcher for URL query values.
// It accepts a sequence of key/value pairs. Values may define variables.
// For example:
//
// r := mux.NewRouter()
// r.Queries("foo", "bar", "id", "{id:[0-9]+}")
//
// The above route will only match if the URL contains the defined queries
// values, e.g.: ?foo=bar&id=42.
//
// If the value is an empty string, it will match any value if the key is set.
//
// Variables can define an optional regexp pattern to be matched:
//
// - {name} matches anything until the next slash.
//
// - {name:pattern} matches the given regexp pattern.
func (r *Route) Queries(pairs ...string) *Route {
length := len(pairs)
if length%2 != 0 {
r.err = fmt.Errorf(
"mux: number of parameters must be multiple of 2, got %v", pairs)
return nil
}
for i := 0; i < length; i += 2 {
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
return r
}
}
return r
}
// Schemes --------------------------------------------------------------------
// schemeMatcher matches the request against URL schemes.
type schemeMatcher []string
func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
scheme := r.URL.Scheme
// https://golang.org/pkg/net/http/#Request
// "For [most] server requests, fields other than Path and RawQuery will be
// empty."
// Since we're an http muxer, the scheme is either going to be http or https
// though, so we can just set it based on the tls termination state.
if scheme == "" {
if r.TLS == nil {
scheme = "http"
} else {
scheme = "https"
}
}
return matchInArray(m, scheme)
}
// Schemes adds a matcher for URL schemes.
// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
// If the request's URL has a scheme set, it will be matched against.
// Generally, the URL scheme will only be set if a previous handler set it,
// such as the ProxyHeaders handler from gorilla/handlers.
// If unset, the scheme will be determined based on the request's TLS
// termination state.
// The first argument to Schemes will be used when constructing a route URL.
func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
if len(schemes) > 0 {
r.buildScheme = schemes[0]
}
return r.addMatcher(schemeMatcher(schemes))
}
// BuildVarsFunc --------------------------------------------------------------
// BuildVarsFunc is the function signature used by custom build variable
// functions (which can modify route variables before a route's URL is built).
type BuildVarsFunc func(map[string]string) map[string]string
// BuildVarsFunc adds a custom function to be used to modify build variables
// before a route's URL is built.
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
if r.buildVarsFunc != nil {
// compose the old and new functions
old := r.buildVarsFunc
r.buildVarsFunc = func(m map[string]string) map[string]string {
return f(old(m))
}
} else {
r.buildVarsFunc = f
}
return r
}
// Subrouter ------------------------------------------------------------------
// Subrouter creates a subrouter for the route.
//
// It will test the inner routes only if the parent route matched. For example:
//
// r := mux.NewRouter()
// s := r.Host("www.example.com").Subrouter()
// s.HandleFunc("/products/", ProductsHandler)
// s.HandleFunc("/products/{key}", ProductHandler)
// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
//
// Here, the routes registered in the subrouter won't be tested if the host
// doesn't match.
func (r *Route) Subrouter() *Router {
// initialize a subrouter with a copy of the parent route's configuration
router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.addMatcher(router)
return router
}
// ----------------------------------------------------------------------------
// URL building
// ----------------------------------------------------------------------------
// URL builds a URL for the route.
//
// It accepts a sequence of key/value pairs for the route variables. For
// example, given this route:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Name("article")
//
// ...a URL for it can be built using:
//
// url, err := r.Get("article").URL("category", "technology", "id", "42")
//
// ...which will return an url.URL with the following path:
//
// "/articles/technology/42"
//
// This also works for host variables:
//
// r := mux.NewRouter()
// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
// Host("{subdomain}.domain.com").
// Name("article")
//
// // url.String() will be "http://news.domain.com/articles/technology/42"
// url, err := r.Get("article").URL("subdomain", "news",
// "category", "technology",
// "id", "42")
//
// The scheme of the resulting url will be the first argument that was passed to Schemes:
//
// // url.String() will be "https://example.com"
// r := mux.NewRouter()
// url, err := r.Host("example.com")
// .Schemes("https", "http").URL()
//
// All variables defined in the route are required, and their values must
// conform to the corresponding patterns.
func (r *Route) URL(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
var scheme, host, path string
queries := make([]string, 0, len(r.regexp.queries))
if r.regexp.host != nil {
if host, err = r.regexp.host.url(values); err != nil {
return nil, err
}
scheme = "http"
if r.buildScheme != "" {
scheme = r.buildScheme
}
}
if r.regexp.path != nil {
if path, err = r.regexp.path.url(values); err != nil {
return nil, err
}
}
for _, q := range r.regexp.queries {
var query string
if query, err = q.url(values); err != nil {
return nil, err
}
queries = append(queries, query)
}
return &url.URL{
Scheme: scheme,
Host: host,
Path: path,
RawQuery: strings.Join(queries, "&"),
}, nil
}
// URLHost builds the host part of the URL for a route. See Route.URL().
//
// The route must have a host defined.
func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.host == nil {
return nil, errors.New("mux: route doesn't have a host")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
host, err := r.regexp.host.url(values)
if err != nil {
return nil, err
}
u := &url.URL{
Scheme: "http",
Host: host,
}
if r.buildScheme != "" {
u.Scheme = r.buildScheme
}
return u, nil
}
// URLPath builds the path part of the URL for a route. See Route.URL().
//
// The route must have a path defined.
func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.path == nil {
return nil, errors.New("mux: route doesn't have a path")
}
values, err := r.prepareVars(pairs...)
if err != nil {
return nil, err
}
path, err := r.regexp.path.url(values)
if err != nil {
return nil, err
}
return &url.URL{
Path: path,
}, nil
}
// GetPathTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.path == nil {
return "", errors.New("mux: route doesn't have a path")
}
return r.regexp.path.template, nil
}
// GetPathRegexp returns the expanded regular expression used to match route path.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a path.
func (r *Route) GetPathRegexp() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.path == nil {
return "", errors.New("mux: route does not have a path")
}
return r.regexp.path.regexp.String(), nil
}
// GetQueriesRegexp returns the expanded regular expressions used to match the
// route queries.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not have queries.
func (r *Route) GetQueriesRegexp() ([]string, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
queries := make([]string, 0, len(r.regexp.queries))
for _, query := range r.regexp.queries {
queries = append(queries, query.regexp.String())
}
return queries, nil
}
// GetQueriesTemplates returns the templates used to build the
// query matching.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define queries.
func (r *Route) GetQueriesTemplates() ([]string, error) {
if r.err != nil {
return nil, r.err
}
if r.regexp.queries == nil {
return nil, errors.New("mux: route doesn't have queries")
}
queries := make([]string, 0, len(r.regexp.queries))
for _, query := range r.regexp.queries {
queries = append(queries, query.template)
}
return queries, nil
}
// GetMethods returns the methods the route matches against
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if route does not have methods.
func (r *Route) GetMethods() ([]string, error) {
if r.err != nil {
return nil, r.err
}
for _, m := range r.matchers {
if methods, ok := m.(methodMatcher); ok {
return []string(methods), nil
}
}
return nil, errors.New("mux: route doesn't have methods")
}
// GetHostTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
// against third-party services.
// An error will be returned if the route does not define a host.
func (r *Route) GetHostTemplate() (string, error) {
if r.err != nil {
return "", r.err
}
if r.regexp.host == nil {
return "", errors.New("mux: route doesn't have a host")
}
return r.regexp.host.template, nil
}
// prepareVars converts the route variable pairs into a map. If the route has a
// BuildVarsFunc, it is invoked.
func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
m, err := mapFromPairsToString(pairs...)
if err != nil {
return nil, err
}
return r.buildVars(m), nil
}
func (r *Route) buildVars(m map[string]string) map[string]string {
if r.buildVarsFunc != nil {
m = r.buildVarsFunc(m)
}
return m
}

19
vendor/github.com/gorilla/mux/test_helpers.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mux
import "net/http"
// SetURLVars sets the URL variables for the given request, to be accessed via
// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
// copy is returned.
//
// This API should only be used for testing purposes; it provides a way to
// inject variables into the request context. Alternatively, URL variables
// can be set by making a route that captures the required variables,
// starting a server and sending the request to that server.
func SetURLVars(r *http.Request, val map[string]string) *http.Request {
return requestWithVars(r, val)
}

19
vendor/github.com/peterbourgon/diskv/v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2011-2012 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

191
vendor/github.com/peterbourgon/diskv/v3/README.md generated vendored Normal file
View File

@ -0,0 +1,191 @@
# What is diskv?
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
language. It starts with an incredibly simple API for storing arbitrary data on
a filesystem by key, and builds several layers of performance-enhancing
abstraction on top. The end result is a conceptually simple, but highly
performant, disk-backed storage system.
[![Build Status][1]][2]
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
# Installing
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
Then,
```bash
$ go get github.com/peterbourgon/diskv/v3
```
[3]: http://golang.org
[4]: http://golang.org/doc/install/source
[5]: http://golang.org/doc/install
# Usage
```go
package main
import (
"fmt"
"github.com/peterbourgon/diskv/v3"
)
func main() {
// Simplest transform function: put all the data files into the base dir.
flatTransform := func(s string) []string { return []string{} }
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
d := diskv.New(diskv.Options{
BasePath: "my-data-dir",
Transform: flatTransform,
CacheSizeMax: 1024 * 1024,
})
// Write three bytes to the key "alpha".
key := "alpha"
d.Write(key, []byte{'1', '2', '3'})
// Read the value back out of the store.
value, _ := d.Read(key)
fmt.Printf("%v\n", value)
// Erase the key+value from the store (and the disk).
d.Erase(key)
}
```
More complex examples can be found in the "examples" subdirectory.
# Theory
## Basic idea
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
The data is written to a single file on disk, with the same name as the key.
The key determines where that file will be stored, via a user-provided
`TransformFunc`, which takes a key and returns a slice (`[]string`)
corresponding to a path list where the key file will be stored. The simplest
TransformFunc,
```go
func SimpleTransform (key string) []string {
return []string{}
}
```
will place all keys in the same, base directory. The design is inspired by
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
behavior is available in the content-addressable-storage example.
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
**Note** that your TransformFunc should ensure that one valid key doesn't
transform to a subset of another valid key. That is, it shouldn't be possible
to construct valid keys that resolve to directory names. As a concrete example,
if your TransformFunc splits on every 3 characters, then
```go
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
```
This will be addressed in an upcoming version of diskv.
Probably the most important design principle behind diskv is that your data is
always flatly available on the disk. diskv will never do anything that would
prevent you from accessing, copying, backing up, or otherwise interacting with
your data via common UNIX commandline tools.
## Advanced path transformation
If you need more control over the file name written to disk or if you want to support
slashes in your key name or special characters in the keys, you can use the
AdvancedTransform property. You must supply a function that returns
a special PathKey structure, which is a breakdown of a path and a file name. Strings
returned must be clean of any slashes or special characters:
```go
func AdvancedTransformExample(key string) *diskv.PathKey {
path := strings.Split(key, "/")
last := len(path) - 1
return &diskv.PathKey{
Path: path[:last],
FileName: path[last] + ".txt",
}
}
// If you provide an AdvancedTransform, you must also provide its
// inverse:
func InverseTransformExample(pathKey *diskv.PathKey) (key string) {
txt := pathKey.FileName[len(pathKey.FileName)-4:]
if txt != ".txt" {
panic("Invalid file found in storage folder!")
}
return strings.Join(pathKey.Path, "/") + pathKey.FileName[:len(pathKey.FileName)-4]
}
func main() {
d := diskv.New(diskv.Options{
BasePath: "my-data-dir",
AdvancedTransform: AdvancedTransformExample,
InverseTransform: InverseTransformExample,
CacheSizeMax: 1024 * 1024,
})
// Write some text to the key "alpha/beta/gamma".
key := "alpha/beta/gamma"
d.WriteString(key, "¡Hola!") // will be stored in "<basedir>/alpha/beta/gamma.txt"
fmt.Println(d.ReadString("alpha/beta/gamma"))
}
```
## Adding a cache
An in-memory caching layer is provided by combining the BasicStore
functionality with a simple map structure, and keeping it up-to-date as
appropriate. Since the map structure in Go is not threadsafe, it's combined
with a RWMutex to provide safe concurrent access.
## Adding order
diskv is a key-value store and therefore inherently unordered. An ordering
system can be injected into the store by passing something which satisfies the
diskv.Index interface. (A default implementation, using Google's
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
user-provided Less function) index of the keys, which can be queried.
[7]: https://github.com/google/btree
## Adding compression
Something which implements the diskv.Compression interface may be passed
during store creation, so that all Writes and Reads are filtered through
a compression/decompression pipeline. Several default implementations,
using stdlib compression algorithms, are provided. Note that data is cached
compressed; the cost of decompression is borne with each Read.
## Streaming
diskv also now provides ReadStream and WriteStream methods, to allow very large
data to be handled efficiently.
# Future plans
* Needs plenty of robust testing: huge datasets, etc...
* More thorough benchmarking
* Your suggestions for use-cases I haven't thought of
# Credits and contributions
Original idea, design and implementation: [Peter Bourgon](https://github.com/peterbourgon)
Other collaborations: [Javier Peletier](https://github.com/jpeletier) ([Epic Labs](https://www.epiclabs.io))

64
vendor/github.com/peterbourgon/diskv/v3/compression.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package diskv
import (
"compress/flate"
"compress/gzip"
"compress/zlib"
"io"
)
// Compression is an interface that Diskv uses to implement compression of
// data. Writer takes a destination io.Writer and returns a WriteCloser that
// compresses all data written through it. Reader takes a source io.Reader and
// returns a ReadCloser that decompresses all data read through it. You may
// define these methods on your own type, or use one of the NewCompression
// helpers.
type Compression interface {
Writer(dst io.Writer) (io.WriteCloser, error)
Reader(src io.Reader) (io.ReadCloser, error)
}
// NewGzipCompression returns a Gzip-based Compression.
func NewGzipCompression() Compression {
return NewGzipCompressionLevel(flate.DefaultCompression)
}
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
func NewGzipCompressionLevel(level int) Compression {
return &genericCompression{
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
}
}
// NewZlibCompression returns a Zlib-based Compression.
func NewZlibCompression() Compression {
return NewZlibCompressionLevel(flate.DefaultCompression)
}
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
func NewZlibCompressionLevel(level int) Compression {
return NewZlibCompressionLevelDict(level, nil)
}
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
// level, based on the given dictionary.
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
return &genericCompression{
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
}
}
type genericCompression struct {
wf func(w io.Writer) (io.WriteCloser, error)
rf func(r io.Reader) (io.ReadCloser, error)
}
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
return g.wf(dst)
}
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
return g.rf(src)
}

729
vendor/github.com/peterbourgon/diskv/v3/diskv.go generated vendored Normal file
View File

@ -0,0 +1,729 @@
// Diskv (disk-vee) is a simple, persistent, key-value store.
// It stores all data flatly on the filesystem.
package diskv
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
const (
defaultBasePath = "diskv"
defaultFilePerm os.FileMode = 0666
defaultPathPerm os.FileMode = 0777
)
// PathKey represents a string key that has been transformed to
// a directory and file name where the content will eventually
// be stored
type PathKey struct {
Path []string
FileName string
originalKey string
}
var (
defaultAdvancedTransform = func(s string) *PathKey { return &PathKey{Path: []string{}, FileName: s} }
defaultInverseTransform = func(pathKey *PathKey) string { return pathKey.FileName }
errCanceled = errors.New("canceled")
errEmptyKey = errors.New("empty key")
errBadKey = errors.New("bad key")
errImportDirectory = errors.New("can't import a directory")
)
// TransformFunction transforms a key into a slice of strings, with each
// element in the slice representing a directory in the file path where the
// key's entry will eventually be stored.
//
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
type TransformFunction func(s string) []string
// AdvancedTransformFunction transforms a key into a PathKey.
//
// A PathKey contains a slice of strings, where each element in the slice
// represents a directory in the file path where the key's entry will eventually
// be stored, as well as the filename.
//
// For example, if AdvancedTransformFunc transforms "abcdef/file.txt" to the
// PathKey {Path: ["ab", "cde", "f"], FileName: "file.txt"}, the final location
// of the data file will be <basedir>/ab/cde/f/file.txt.
//
// You must provide an InverseTransformFunction if you use an
// AdvancedTransformFunction.
type AdvancedTransformFunction func(s string) *PathKey
// InverseTransformFunction takes a PathKey and converts it back to a Diskv key.
// In effect, it's the opposite of an AdvancedTransformFunction.
type InverseTransformFunction func(pathKey *PathKey) string
// Options define a set of properties that dictate Diskv behavior.
// All values are optional.
type Options struct {
BasePath string
Transform TransformFunction
AdvancedTransform AdvancedTransformFunction
InverseTransform InverseTransformFunction
CacheSizeMax uint64 // bytes
PathPerm os.FileMode
FilePerm os.FileMode
// If TempDir is set, it will enable filesystem atomic writes by
// writing temporary files to that location before being moved
// to BasePath.
// Note that TempDir MUST be on the same device/partition as
// BasePath.
TempDir string
Index Index
IndexLess LessFunction
Compression Compression
}
// Diskv implements the Diskv interface. You shouldn't construct Diskv
// structures directly; instead, use the New constructor.
type Diskv struct {
Options
mu sync.RWMutex
cache map[string][]byte
cacheSize uint64
}
// New returns an initialized Diskv structure, ready to use.
// If the path identified by baseDir already contains data,
// it will be accessible, but not yet cached.
func New(o Options) *Diskv {
if o.BasePath == "" {
o.BasePath = defaultBasePath
}
if o.AdvancedTransform == nil {
if o.Transform == nil {
o.AdvancedTransform = defaultAdvancedTransform
} else {
o.AdvancedTransform = convertToAdvancedTransform(o.Transform)
}
if o.InverseTransform == nil {
o.InverseTransform = defaultInverseTransform
}
} else {
if o.InverseTransform == nil {
panic("You must provide an InverseTransform function in advanced mode")
}
}
if o.PathPerm == 0 {
o.PathPerm = defaultPathPerm
}
if o.FilePerm == 0 {
o.FilePerm = defaultFilePerm
}
d := &Diskv{
Options: o,
cache: map[string][]byte{},
cacheSize: 0,
}
if d.Index != nil && d.IndexLess != nil {
d.Index.Initialize(d.IndexLess, d.Keys(nil))
}
return d
}
// convertToAdvancedTransform takes a classic Transform function and
// converts it to the new AdvancedTransform
func convertToAdvancedTransform(oldFunc func(s string) []string) AdvancedTransformFunction {
return func(s string) *PathKey {
return &PathKey{Path: oldFunc(s), FileName: s}
}
}
// Write synchronously writes the key-value pair to disk, making it immediately
// available for reads. Write relies on the filesystem to perform an eventual
// sync to physical media. If you need stronger guarantees, see WriteStream.
func (d *Diskv) Write(key string, val []byte) error {
return d.WriteStream(key, bytes.NewReader(val), false)
}
// WriteString writes a string key-value pair to disk
func (d *Diskv) WriteString(key string, val string) error {
return d.Write(key, []byte(val))
}
func (d *Diskv) transform(key string) (pathKey *PathKey) {
pathKey = d.AdvancedTransform(key)
pathKey.originalKey = key
return pathKey
}
// WriteStream writes the data represented by the io.Reader to the disk, under
// the provided key. If sync is true, WriteStream performs an explicit sync on
// the file as soon as it's written.
//
// bytes.Buffer provides io.Reader semantics for basic data types.
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
if len(key) <= 0 {
return errEmptyKey
}
pathKey := d.transform(key)
// Ensure keys cannot evaluate to paths that would not exist
for _, pathPart := range pathKey.Path {
if strings.ContainsRune(pathPart, os.PathSeparator) {
return errBadKey
}
}
if strings.ContainsRune(pathKey.FileName, os.PathSeparator) {
return errBadKey
}
d.mu.Lock()
defer d.mu.Unlock()
return d.writeStreamWithLock(pathKey, r, sync)
}
// createKeyFileWithLock either creates the key file directly, or
// creates a temporary file in TempDir if it is set.
func (d *Diskv) createKeyFileWithLock(pathKey *PathKey) (*os.File, error) {
if d.TempDir != "" {
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
return nil, fmt.Errorf("temp mkdir: %s", err)
}
f, err := ioutil.TempFile(d.TempDir, "")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
if err := os.Chmod(f.Name(), d.FilePerm); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return nil, fmt.Errorf("chmod: %s", err)
}
return f, nil
}
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
f, err := os.OpenFile(d.completeFilename(pathKey), mode, d.FilePerm)
if err != nil {
return nil, fmt.Errorf("open file: %s", err)
}
return f, nil
}
// writeStream does no input validation checking.
func (d *Diskv) writeStreamWithLock(pathKey *PathKey, r io.Reader, sync bool) error {
if err := d.ensurePathWithLock(pathKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
f, err := d.createKeyFileWithLock(pathKey)
if err != nil {
return fmt.Errorf("create key file: %s", err)
}
wc := io.WriteCloser(&nopWriteCloser{f})
if d.Compression != nil {
wc, err = d.Compression.Writer(f)
if err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression writer: %s", err)
}
}
if _, err := io.Copy(wc, r); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("i/o copy: %s", err)
}
if err := wc.Close(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression close: %s", err)
}
if sync {
if err := f.Sync(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("file sync: %s", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("file close: %s", err)
}
fullPath := d.completeFilename(pathKey)
if f.Name() != fullPath {
if err := os.Rename(f.Name(), fullPath); err != nil {
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("rename: %s", err)
}
}
if d.Index != nil {
d.Index.Insert(pathKey.originalKey)
}
d.bustCacheWithLock(pathKey.originalKey) // cache only on read
return nil
}
// Import imports the source file into diskv under the destination key. If the
// destination key already exists, it's overwritten. If move is true, the
// source file is removed after a successful import.
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
if dstKey == "" {
return errEmptyKey
}
if fi, err := os.Stat(srcFilename); err != nil {
return err
} else if fi.IsDir() {
return errImportDirectory
}
dstPathKey := d.transform(dstKey)
d.mu.Lock()
defer d.mu.Unlock()
if err := d.ensurePathWithLock(dstPathKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
if move {
if err := syscall.Rename(srcFilename, d.completeFilename(dstPathKey)); err == nil {
d.bustCacheWithLock(dstPathKey.originalKey)
return nil
} else if err != syscall.EXDEV {
// If it failed due to being on a different device, fall back to copying
return err
}
}
f, err := os.Open(srcFilename)
if err != nil {
return err
}
defer f.Close()
err = d.writeStreamWithLock(dstPathKey, f, false)
if err == nil && move {
err = os.Remove(srcFilename)
}
return err
}
// Read reads the key and returns the value.
// If the key is available in the cache, Read won't touch the disk.
// If the key is not in the cache, Read will have the side-effect of
// lazily caching the value.
func (d *Diskv) Read(key string) ([]byte, error) {
rc, err := d.ReadStream(key, false)
if err != nil {
return []byte{}, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// ReadString reads the key and returns a string value
// In case of error, an empty string is returned
func (d *Diskv) ReadString(key string) string {
value, _ := d.Read(key)
return string(value)
}
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
// If the value is cached from a previous read, and direct is false,
// ReadStream will use the cached value. Otherwise, it will return a handle to
// the file on disk, and cache the data on read.
//
// If direct is true, ReadStream will lazily delete any cached value for the
// key, and return a direct handle to the file on disk.
//
// If compression is enabled, ReadStream taps into the io.Reader stream prior
// to decompression, and caches the compressed data.
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
pathKey := d.transform(key)
d.mu.RLock()
defer d.mu.RUnlock()
if val, ok := d.cache[key]; ok {
if !direct {
buf := bytes.NewReader(val)
if d.Compression != nil {
return d.Compression.Reader(buf)
}
return ioutil.NopCloser(buf), nil
}
go func() {
d.mu.Lock()
defer d.mu.Unlock()
d.uncacheWithLock(key, uint64(len(val)))
}()
}
return d.readWithRLock(pathKey)
}
// read ignores the cache, and returns an io.ReadCloser representing the
// decompressed data for the given key, streamed from the disk. Clients should
// acquire a read lock on the Diskv and check the cache themselves before
// calling read.
func (d *Diskv) readWithRLock(pathKey *PathKey) (io.ReadCloser, error) {
filename := d.completeFilename(pathKey)
fi, err := os.Stat(filename)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var r io.Reader
if d.CacheSizeMax > 0 {
r = newSiphon(f, d, pathKey.originalKey)
} else {
r = &closingReader{f}
}
var rc = io.ReadCloser(ioutil.NopCloser(r))
if d.Compression != nil {
rc, err = d.Compression.Reader(r)
if err != nil {
return nil, err
}
}
return rc, nil
}
// closingReader provides a Reader that automatically closes the
// embedded ReadCloser when it reaches EOF
type closingReader struct {
rc io.ReadCloser
}
func (cr closingReader) Read(p []byte) (int, error) {
n, err := cr.rc.Read(p)
if err == io.EOF {
if closeErr := cr.rc.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
}
return n, err
}
// siphon is like a TeeReader: it copies all data read through it to an
// internal buffer, and moves that buffer to the cache at EOF.
type siphon struct {
f *os.File
d *Diskv
key string
buf *bytes.Buffer
}
// newSiphon constructs a siphoning reader that represents the passed file.
// When a successful series of reads ends in an EOF, the siphon will write
// the buffered data to Diskv's cache under the given key.
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
return &siphon{
f: f,
d: d,
key: key,
buf: &bytes.Buffer{},
}
}
// Read implements the io.Reader interface for siphon.
func (s *siphon) Read(p []byte) (int, error) {
n, err := s.f.Read(p)
if err == nil {
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
}
if err == io.EOF {
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
if closeErr := s.f.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
return n, err
}
return n, err
}
// Erase synchronously erases the given key from the disk and the cache.
func (d *Diskv) Erase(key string) error {
pathKey := d.transform(key)
d.mu.Lock()
defer d.mu.Unlock()
d.bustCacheWithLock(key)
// erase from index
if d.Index != nil {
d.Index.Delete(key)
}
// erase from disk
filename := d.completeFilename(pathKey)
if s, err := os.Stat(filename); err == nil {
if s.IsDir() {
return errBadKey
}
if err = os.Remove(filename); err != nil {
return err
}
} else {
// Return err as-is so caller can do os.IsNotExist(err).
return err
}
// clean up and return
d.pruneDirsWithLock(key)
return nil
}
// EraseAll will delete all of the data from the store, both in the cache and on
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
// diskv-related data. Care should be taken to always specify a diskv base
// directory that is exclusively for diskv data.
func (d *Diskv) EraseAll() error {
d.mu.Lock()
defer d.mu.Unlock()
d.cache = make(map[string][]byte)
d.cacheSize = 0
if d.TempDir != "" {
os.RemoveAll(d.TempDir) // errors ignored
}
return os.RemoveAll(d.BasePath)
}
// Has returns true if the given key exists.
func (d *Diskv) Has(key string) bool {
pathKey := d.transform(key)
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.cache[key]; ok {
return true
}
filename := d.completeFilename(pathKey)
s, err := os.Stat(filename)
if err != nil {
return false
}
if s.IsDir() {
return false
}
return true
}
// Keys returns a channel that will yield every key accessible by the store,
// in undefined order. If a cancel channel is provided, closing it will
// terminate and close the keys channel.
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
return d.KeysPrefix("", cancel)
}
// KeysPrefix returns a channel that will yield every key accessible by the
// store with the given prefix, in undefined order. If a cancel channel is
// provided, closing it will terminate and close the keys channel. If the
// provided prefix is the empty string, all keys will be yielded.
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
var prepath string
if prefix == "" {
prepath = d.BasePath
} else {
prefixKey := d.transform(prefix)
prepath = d.pathFor(prefixKey)
}
c := make(chan string)
go func() {
filepath.Walk(prepath, d.walker(c, prefix, cancel))
close(c)
}()
return c
}
// walker returns a function which satisfies the filepath.WalkFunc interface.
// It sends every non-directory file entry down the channel c.
func (d *Diskv) walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relPath, _ := filepath.Rel(d.BasePath, path)
dir, file := filepath.Split(relPath)
pathSplit := strings.Split(dir, string(filepath.Separator))
pathSplit = pathSplit[:len(pathSplit)-1]
pathKey := &PathKey{
Path: pathSplit,
FileName: file,
}
key := d.InverseTransform(pathKey)
if info.IsDir() || !strings.HasPrefix(key, prefix) {
return nil // "pass"
}
select {
case c <- key:
case <-cancel:
return errCanceled
}
return nil
}
}
// pathFor returns the absolute path for location on the filesystem where the
// data for the given key will be stored.
func (d *Diskv) pathFor(pathKey *PathKey) string {
return filepath.Join(d.BasePath, filepath.Join(pathKey.Path...))
}
// ensurePathWithLock is a helper function that generates all necessary
// directories on the filesystem for the given key.
func (d *Diskv) ensurePathWithLock(pathKey *PathKey) error {
return os.MkdirAll(d.pathFor(pathKey), d.PathPerm)
}
// completeFilename returns the absolute path to the file for the given key.
func (d *Diskv) completeFilename(pathKey *PathKey) string {
return filepath.Join(d.pathFor(pathKey), pathKey.FileName)
}
// cacheWithLock attempts to cache the given key-value pair in the store's
// cache. It can fail if the value is larger than the cache's maximum size.
func (d *Diskv) cacheWithLock(key string, val []byte) error {
// If the key already exists, delete it.
d.bustCacheWithLock(key)
valueSize := uint64(len(val))
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
return fmt.Errorf("%s; not caching", err)
}
// be very strict about memory guarantees
if (d.cacheSize + valueSize) > d.CacheSizeMax {
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
}
d.cache[key] = val
d.cacheSize += valueSize
return nil
}
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.cacheWithLock(key, val)
}
func (d *Diskv) bustCacheWithLock(key string) {
if val, ok := d.cache[key]; ok {
d.uncacheWithLock(key, uint64(len(val)))
}
}
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
d.cacheSize -= sz
delete(d.cache, key)
}
// pruneDirsWithLock deletes empty directories in the path walk leading to the
// key k. Typically this function is called after an Erase is made.
func (d *Diskv) pruneDirsWithLock(key string) error {
pathlist := d.transform(key).Path
for i := range pathlist {
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
// thanks to Steven Blenkinsop for this snippet
switch fi, err := os.Stat(dir); true {
case err != nil:
return err
case !fi.IsDir():
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
}
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
if err != nil {
return err
} else if len(nlinks) > 0 {
return nil // has subdirs -- do not prune
}
if err = os.Remove(dir); err != nil {
return err
}
}
return nil
}
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
// until the cache has at least valueSize bytes available.
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
if valueSize > d.CacheSizeMax {
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
}
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
for key, val := range d.cache {
if safe() {
break
}
d.uncacheWithLock(key, uint64(len(val)))
}
if !safe() {
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
}
return nil
}
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
// satisfy the io.WriteCloser interface.
type nopWriteCloser struct {
io.Writer
}
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
func (wc *nopWriteCloser) Close() error { return nil }

115
vendor/github.com/peterbourgon/diskv/v3/index.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package diskv
import (
"sync"
"github.com/google/btree"
)
// Index is a generic interface for things that can
// provide an ordered list of keys.
type Index interface {
Initialize(less LessFunction, keys <-chan string)
Insert(key string)
Delete(key string)
Keys(from string, n int) []string
}
// LessFunction is used to initialize an Index of keys in a specific order.
type LessFunction func(string, string) bool
// btreeString is a custom data type that satisfies the BTree Less interface,
// making the strings it wraps sortable by the BTree package.
type btreeString struct {
s string
l LessFunction
}
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
func (s btreeString) Less(i btree.Item) bool {
return s.l(s.s, i.(btreeString).s)
}
// BTreeIndex is an implementation of the Index interface using google/btree.
type BTreeIndex struct {
sync.RWMutex
LessFunction
*btree.BTree
}
// Initialize populates the BTree tree with data from the keys channel,
// according to the passed less function. It's destructive to the BTreeIndex.
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
i.Lock()
defer i.Unlock()
i.LessFunction = less
i.BTree = rebuild(less, keys)
}
// Insert inserts the given key (only) into the BTree tree.
func (i *BTreeIndex) Insert(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
}
// Delete removes the given key (only) from the BTree tree.
func (i *BTreeIndex) Delete(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
}
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
// first key in the returned slice will be the key that immediately follows the
// passed key, in key order.
func (i *BTreeIndex) Keys(from string, n int) []string {
i.RLock()
defer i.RUnlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
if i.BTree.Len() <= 0 {
return []string{}
}
btreeFrom := btreeString{s: from, l: i.LessFunction}
skipFirst := true
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
// no such key, so fabricate an always-smallest item
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
skipFirst = false
}
keys := []string{}
iterator := func(i btree.Item) bool {
keys = append(keys, i.(btreeString).s)
return len(keys) < n
}
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
if skipFirst && len(keys) > 0 {
keys = keys[1:]
}
return keys
}
// rebuildIndex does the work of regenerating the index
// with the given keys.
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
tree := btree.New(2)
for key := range keys {
tree.ReplaceOrInsert(btreeString{s: key, l: less})
}
return tree
}

2
vendor/github.com/writeas/activityserve/.gitignore generated vendored Normal file
View File

@ -0,0 +1,2 @@
*.snip
storage

22
vendor/github.com/writeas/activityserve/LICENSE generated vendored Normal file
View File

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2020 Write.as and authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

57
vendor/github.com/writeas/activityserve/TODO generated vendored Normal file
View File

@ -0,0 +1,57 @@
[✔] Follow users
[✔] Announcements
[✔] Federate the post to our followers (hardcoded for now)
[✔] Handle more than one local actors
[✔] Handle the /actor endpoint
[✔] Create configuration file
[✔] Implement database backend
[✔] Create a file with the actors we have, their following
and their followers.
[✔] `MakeActor` should create a file with that actor.
[✔] Implement `LoadActor`
[✔] `actor.Follow` should write the new following to file
[✔] Handle being followed
[✔] When followed, the handler should write the new follower to file
[✔] Make sure we send our boosts to all our followers
[x] Write incoming activities to disk (do we have to?)
[✔] Write all the announcements (boosts) to the database to
their correct actors
[✔] Check if we are already following users
[✔] On GetOutbox read the database and present a list of the
last posts.
[✔] Make OS-independent (mosty directory separators)
[✔] Create outbox.json programmatically
[✔] Make storage configurable (search for "storage" in project)
[✔] Check if we're boosting only stuff from actors we follow, not whatever comes
through in our inbox
[✔] Boost not only articles but other things too
[✔] Sanitize input, never allow slashes or dots
[✔] Add summary to actors.json
[✔] Check local actor names for characters illegal for filenames and ban them
(Done in pherephone, not activityserve)
[✔] Create debug flag
[✔] Write to following only upon accept
(waiting to actually get an accept so that I can test this)
[✔] Implement webfinger
[✔] Make sure masto finds signature
[✔] Implement Unfollow
[✔] Implement accept (accept when other follow us)
(done but can't test it pending http signatures)
Works in pleroma/pixelfed not working on masto
(nothing works on masto)
[ ] Implement nodeinfo and statistics
[✔] Accept even if already follows us
[✔] Handle paging
[✔] Test paging
[✔] Handle http signatures
[ ] Verify http signatures
[✔] Refactor, comment and clean up
[✔] Split to pherephone and activityServe
[ ] Decide what's to be done with actors removed from `actors.json`.
[ ] Remove them?
[ ] Leave them read-only?
[✔] Leave them as is?
[✔] Handle followers and following uri's
[ ] Do I care about the inbox?
[✔] Expose configuration to apps
[✔] Do not boost replies (configurable)

816
vendor/github.com/writeas/activityserve/actor.go generated vendored Normal file
View File

@ -0,0 +1,816 @@
package activityserve
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/gologme/log"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"github.com/dchest/uniuri"
"github.com/go-fed/httpsig"
)
// Actor represents a local actor we can act on
// behalf of.
type Actor struct {
Name, summary, actorType, iri string
followersIRI string
nuIri *url.URL
followers, following, rejected map[string]interface{}
requested map[string]interface{}
posts map[int]map[string]interface{}
publicKey crypto.PublicKey
privateKey crypto.PrivateKey
publicKeyPem string
privateKeyPem string
publicKeyID string
OnFollow func(map[string]interface{})
OnReceiveContent func(map[string]interface{})
}
// ActorToSave is a stripped down actor representation
// with exported properties in order for json to be
// able to marshal it.
// see https://stackoverflow.com/questions/26327391/json-marshalstruct-returns
type ActorToSave struct {
Name, Summary, ActorType, IRI, PublicKey, PrivateKey string
Followers, Following, Rejected, Requested map[string]interface{}
}
// MakeActor creates and returns a new local actor we can act
// on behalf of. It also creates its files on disk
func MakeActor(name, summary, actorType string) (Actor, error) {
followers := make(map[string]interface{})
following := make(map[string]interface{})
rejected := make(map[string]interface{})
requested := make(map[string]interface{})
followersIRI := baseURL + name + "/followers"
publicKeyID := baseURL + name + "#main-key"
iri := baseURL + name
nuIri, err := url.Parse(iri)
if err != nil {
log.Info("Something went wrong when parsing the local actor uri into net/url")
return Actor{}, err
}
actor := Actor{
Name: name,
summary: summary,
actorType: actorType,
iri: iri,
nuIri: nuIri,
followers: followers,
following: following,
rejected: rejected,
requested: requested,
followersIRI: followersIRI,
publicKeyID: publicKeyID,
}
// set auto accept by default (this could be a configuration value)
actor.OnFollow = func(activity map[string]interface{}) { actor.Accept(activity) }
actor.OnReceiveContent = func(activity map[string]interface{}) {}
// create actor's keypair
rng := rand.Reader
privateKey, err := rsa.GenerateKey(rng, 2048)
publicKey := privateKey.PublicKey
actor.publicKey = publicKey
actor.privateKey = privateKey
// marshal the crypto to pem
privateKeyDer := x509.MarshalPKCS1PrivateKey(privateKey)
privateKeyBlock := pem.Block{
Type: "RSA PRIVATE KEY",
Headers: nil,
Bytes: privateKeyDer,
}
actor.privateKeyPem = string(pem.EncodeToMemory(&privateKeyBlock))
publicKeyDer, err := x509.MarshalPKIXPublicKey(&publicKey)
if err != nil {
log.Info("Can't marshal public key")
return Actor{}, err
}
publicKeyBlock := pem.Block{
Type: "PUBLIC KEY",
Headers: nil,
Bytes: publicKeyDer,
}
actor.publicKeyPem = string(pem.EncodeToMemory(&publicKeyBlock))
err = actor.save()
if err != nil {
return actor, err
}
return actor, nil
}
// GetOutboxIRI returns the outbox iri in net/url format
func (a *Actor) GetOutboxIRI() *url.URL {
iri := a.iri + "/outbox"
nuiri, _ := url.Parse(iri)
return nuiri
}
// LoadActor searches the filesystem and creates an Actor
// from the data in <name>.json
// This does not preserve events so use with caution
func LoadActor(name string) (Actor, error) {
// make sure our users can't read our hard drive
if strings.ContainsAny(name, "./ ") {
log.Info("Illegal characters in actor name")
return Actor{}, errors.New("Illegal characters in actor name")
}
jsonFile := storage + slash + "actors" + slash + name + slash + name + ".json"
fileHandle, err := os.Open(jsonFile)
if os.IsNotExist(err) {
log.Info(name)
log.Info("We don't have this kind of actor stored")
return Actor{}, err
}
byteValue, err := ioutil.ReadAll(fileHandle)
if err != nil {
log.Info("Error reading actor file")
return Actor{}, err
}
jsonData := make(map[string]interface{})
json.Unmarshal(byteValue, &jsonData)
nuIri, err := url.Parse(jsonData["IRI"].(string))
if err != nil {
log.Info("Something went wrong when parsing the local actor uri into net/url")
return Actor{}, err
}
publicKeyDecoded, rest := pem.Decode([]byte(jsonData["PublicKey"].(string)))
if publicKeyDecoded == nil {
log.Info(rest)
panic("failed to parse PEM block containing the public key")
}
publicKey, err := x509.ParsePKIXPublicKey(publicKeyDecoded.Bytes)
if err != nil {
log.Info("Can't parse public keys")
log.Info(err)
return Actor{}, err
}
privateKeyDecoded, rest := pem.Decode([]byte(jsonData["PrivateKey"].(string)))
if privateKeyDecoded == nil {
log.Info(rest)
panic("failed to parse PEM block containing the private key")
}
privateKey, err := x509.ParsePKCS1PrivateKey(privateKeyDecoded.Bytes)
if err != nil {
log.Info("Can't parse private keys")
log.Info(err)
return Actor{}, err
}
actor := Actor{
Name: name,
summary: jsonData["Summary"].(string),
actorType: jsonData["ActorType"].(string),
iri: jsonData["IRI"].(string),
nuIri: nuIri,
followers: jsonData["Followers"].(map[string]interface{}),
following: jsonData["Following"].(map[string]interface{}),
rejected: jsonData["Rejected"].(map[string]interface{}),
requested: jsonData["Requested"].(map[string]interface{}),
publicKey: publicKey,
privateKey: privateKey,
publicKeyPem: jsonData["PublicKey"].(string),
privateKeyPem: jsonData["PrivateKey"].(string),
followersIRI: baseURL + name + "/followers",
publicKeyID: baseURL + name + "#main-key",
}
actor.OnFollow = func(activity map[string]interface{}) { actor.Accept(activity) }
actor.OnReceiveContent = func(activity map[string]interface{}) {}
return actor, nil
}
// GetActor attempts to LoadActor and if it doesn't exist
// creates one
func GetActor(name, summary, actorType string) (Actor, error) {
actor, err := LoadActor(name)
if err != nil {
log.Info("Actor doesn't exist, creating...")
actor, err = MakeActor(name, summary, actorType)
if err != nil {
log.Info("Can't create actor!")
return Actor{}, err
}
}
// if the info provided for the actor is different
// from what the actor has, edit the actor
save := false
if summary != actor.summary {
actor.summary = summary
save = true
}
if actorType != actor.actorType {
actor.actorType = actorType
save = true
}
// if anything changed write it to disk
if save {
actor.save()
}
return actor, nil
}
// func LoadActorFromIRI(iri string) a Actor{
// TODO, this should parse the iri and load the right actor
// }
// save the actor to file
func (a *Actor) save() error {
// check if we already have a directory to save actors
// and if not, create it
dir := storage + slash + "actors" + slash + a.Name + slash + "items"
if _, err := os.Stat(dir); os.IsNotExist(err) {
os.MkdirAll(dir, 0755)
}
actorToSave := ActorToSave{
Name: a.Name,
Summary: a.summary,
ActorType: a.actorType,
IRI: a.iri,
Followers: a.followers,
Following: a.following,
Rejected: a.rejected,
Requested: a.requested,
PublicKey: a.publicKeyPem,
PrivateKey: a.privateKeyPem,
}
actorJSON, err := json.MarshalIndent(actorToSave, "", "\t")
if err != nil {
log.Info("error Marshalling actor json")
return err
}
// log.Info(actorToSave)
// log.Info(string(actorJSON))
err = ioutil.WriteFile(storage+slash+"actors"+slash+a.Name+slash+a.Name+".json", actorJSON, 0644)
if err != nil {
log.Printf("WriteFileJson ERROR: %+v", err)
return err
}
return nil
}
func (a *Actor) whoAmI() string {
self := make(map[string]interface{})
self["@context"] = context()
self["type"] = a.actorType
self["id"] = baseURL + a.Name
self["name"] = a.Name
self["preferredUsername"] = a.Name
self["summary"] = a.summary
self["inbox"] = baseURL + a.Name + "/inbox"
self["outbox"] = baseURL + a.Name + "/outbox"
self["followers"] = baseURL + a.Name + "/peers/followers"
self["following"] = baseURL + a.Name + "/peers/following"
self["publicKey"] = map[string]string{
"id": baseURL + a.Name + "#main-key",
"owner": baseURL + a.Name,
"publicKeyPem": a.publicKeyPem,
}
selfString, _ := json.Marshal(self)
return string(selfString)
}
func (a *Actor) newItemID() (hash string, url string) {
hash = uniuri.New()
return hash, baseURL + a.Name + "/item/" + hash
}
func (a *Actor) newID() (hash string, url string) {
hash = uniuri.New()
return hash, baseURL + a.Name + "/" + hash
}
// TODO Reply(content string, inReplyTo string)
// ReplyNote sends a note to a specific actor in reply to
// a post
//TODO
// DM sends a direct message to a user
// TODO
// CreateNote posts an activityPub note to our followers
//
func (a *Actor) CreateNote(content, inReplyTo string) {
// for now I will just write this to the outbox
hash, id := a.newItemID()
create := make(map[string]interface{})
note := make(map[string]interface{})
create["@context"] = context()
create["actor"] = baseURL + a.Name
create["cc"] = a.followersIRI
create["id"] = id
create["object"] = note
note["attributedTo"] = baseURL + a.Name
note["cc"] = a.followersIRI
note["content"] = content
if inReplyTo != "" {
note["inReplyTo"] = inReplyTo
}
note["id"] = id
note["published"] = time.Now().Format(time.RFC3339)
note["url"] = create["id"]
note["type"] = "Note"
note["to"] = "https://www.w3.org/ns/activitystreams#Public"
create["published"] = note["published"]
create["type"] = "Create"
go a.sendToFollowers(create)
err := a.saveItem(hash, create)
if err != nil {
log.Info("Could not save note to disk")
}
err = a.appendToOutbox(id)
if err != nil {
log.Info("Could not append Note to outbox.txt")
}
}
// saveItem saves an activity to disk under the actor and with the id as
// filename
func (a *Actor) saveItem(hash string, content map[string]interface{}) error {
JSON, _ := json.MarshalIndent(content, "", "\t")
dir := storage + slash + "actors" + slash + a.Name + slash + "items"
err := ioutil.WriteFile(dir+slash+hash+".json", JSON, 0644)
if err != nil {
log.Printf("WriteFileJson ERROR: %+v", err)
return err
}
return nil
}
func (a *Actor) loadItem(hash string) (item map[string]interface{}, err error) {
dir := storage + slash + "actors" + slash + a.Name + slash + "items"
jsonFile := dir + slash + hash + ".json"
fileHandle, err := os.Open(jsonFile)
if os.IsNotExist(err) {
log.Info("We don't have this item stored")
return
}
byteValue, err := ioutil.ReadAll(fileHandle)
if err != nil {
log.Info("Error reading item file")
return
}
json.Unmarshal(byteValue, &item)
return
}
// send is here for backward compatibility and maybe extra pre-processing
// not always required
func (a *Actor) send(content map[string]interface{}, to *url.URL) (err error) {
return a.signedHTTPPost(content, to.String())
}
// getPeers gets followers or following depending on `who`
func (a *Actor) getPeers(page int, who string) (response []byte, err error) {
// if there's no page parameter mastodon displays an
// OrderedCollection with info of where to find orderedCollectionPages
// with the actual information. We are mirroring that behavior
var collection map[string]interface{}
if who == "followers" {
collection = a.followers
} else if who == "following" {
collection = a.following
} else {
return nil, errors.New("cannot find collection" + who)
}
themap := make(map[string]interface{})
themap["@context"] = context()
if page == 0 {
themap["first"] = baseURL + a.Name + "/peers/" + who + "?page=1"
themap["id"] = baseURL + a.Name + "/peers/" + who
themap["totalItems"] = strconv.Itoa(len(collection))
themap["type"] = "OrderedCollection"
} else if page == 1 { // implement pagination
themap["id"] = baseURL + a.Name + who + "?page=" + strconv.Itoa(page)
items := make([]string, 0, len(collection))
for k := range collection {
items = append(items, k)
}
themap["orderedItems"] = items
themap["partOf"] = baseURL + a.Name + "/peers/" + who
themap["totalItems"] = len(collection)
themap["type"] = "OrderedCollectionPage"
}
response, _ = json.Marshal(themap)
return
}
// GetFollowers returns a list of people that follow us
func (a *Actor) GetFollowers(page int) (response []byte, err error) {
return a.getPeers(page, "followers")
}
// GetFollowing returns a list of people that we follow
func (a *Actor) GetFollowing(page int) (response []byte, err error) {
return a.getPeers(page, "following")
}
// signedHTTPPost performs an HTTP post on behalf of Actor with the
// request-target, date, host and digest headers signed
// with the actor's private key.
func (a *Actor) signedHTTPPost(content map[string]interface{}, to string) (err error) {
b, err := json.Marshal(content)
if err != nil {
log.Info("Can't marshal JSON")
log.Info(err)
return
}
postSigner, _, _ := httpsig.NewSigner([]httpsig.Algorithm{httpsig.RSA_SHA256}, "SHA-256", []string{"(request-target)", "date", "host", "digest"}, httpsig.Signature)
byteCopy := make([]byte, len(b))
copy(byteCopy, b)
buf := bytes.NewBuffer(byteCopy)
req, err := http.NewRequest("POST", to, buf)
if err != nil {
log.Info(err)
return
}
// I prefer to deal with strings and just parse to net/url if and when
// needed, even if here we do one extra round trip
iri, err := url.Parse(to)
if err != nil {
log.Error("cannot parse url for POST, check your syntax")
return err
}
req.Header.Add("Accept-Charset", "utf-8")
req.Header.Add("Date", time.Now().UTC().Format("Mon, 02 Jan 2006 15:04:05")+" GMT")
req.Header.Add("User-Agent", userAgent+" "+version)
req.Header.Add("Host", iri.Host)
req.Header.Add("Accept", "application/activity+json; charset=utf-8")
req.Header.Add("Content-Type", "application/activity+json; charset=utf-8")
err = postSigner.SignRequest(a.privateKey, a.publicKeyID, req, byteCopy)
if err != nil {
log.Info(err)
return
}
resp, err := client.Do(req)
if err != nil {
log.Info(err)
return
}
defer resp.Body.Close()
if !isSuccess(resp.StatusCode) {
responseData, _ := ioutil.ReadAll(resp.Body)
err = fmt.Errorf("POST request to %s failed (%d): %s\nResponse: %s \nRequest: %s \nHeaders: %s", to, resp.StatusCode, resp.Status, FormatJSON(responseData), FormatJSON(byteCopy), FormatHeaders(req.Header))
log.Info(err)
return
}
responseData, _ := ioutil.ReadAll(resp.Body)
log.Errorf("POST request to %s succeeded (%d): %s \nResponse: %s \nRequest: %s \nHeaders: %s", to, resp.StatusCode, resp.Status, FormatJSON(responseData), FormatJSON(byteCopy), FormatHeaders(req.Header))
return
}
func (a *Actor) signedHTTPGet(address string) (string, error) {
req, err := http.NewRequest("GET", address, nil)
if err != nil {
log.Error("cannot create new http.request")
return "", err
}
iri, err := url.Parse(address)
if err != nil {
log.Error("cannot parse url for GET, check your syntax")
return "", err
}
req.Header.Add("Accept-Charset", "utf-8")
req.Header.Add("Date", time.Now().UTC().Format("Mon, 02 Jan 2006 15:04:05")+" GMT")
req.Header.Add("User-Agent", fmt.Sprintf("%s %s %s", userAgent, libName, version))
req.Header.Add("host", iri.Host)
req.Header.Add("digest", "")
req.Header.Add("Accept", "application/activity+json; profile=\"https://www.w3.org/ns/activitystreams\"")
// set up the http signer
signer, _, _ := httpsig.NewSigner([]httpsig.Algorithm{httpsig.RSA_SHA256}, "SHA-256", []string{"(request-target)", "date", "host", "digest"}, httpsig.Signature)
err = signer.SignRequest(a.privateKey, a.publicKeyID, req, nil)
if err != nil {
log.Error("Can't sign the request")
return "", err
}
resp, err := client.Do(req)
if err != nil {
log.Error("Cannot perform the GET request")
log.Error(err)
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
responseData, _ := ioutil.ReadAll(resp.Body)
return "", fmt.Errorf("GET request to %s failed (%d): %s \n%s", iri.String(), resp.StatusCode, resp.Status, FormatJSON(responseData))
}
responseData, _ := ioutil.ReadAll(resp.Body)
fmt.Println("GET request succeeded:", iri.String(), req.Header, resp.StatusCode, resp.Status, "\n", FormatJSON(responseData))
responseText := string(responseData)
return responseText, nil
}
// NewFollower records a new follower to the actor file
func (a *Actor) NewFollower(iri string, inbox string) error {
a.followers[iri] = inbox
return a.save()
}
// appendToOutbox adds a new line with the id of the activity
// to outbox.txt
func (a *Actor) appendToOutbox(iri string) (err error) {
// create outbox file if it doesn't exist
var outbox *os.File
outboxFilePath := storage + slash + "actors" + slash + a.Name + slash + "outbox.txt"
outbox, err = os.OpenFile(outboxFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Info("Cannot create or open outbox file")
log.Info(err)
return err
}
defer outbox.Close()
outbox.Write([]byte(iri + "\n"))
return nil
}
// batchSend sends a batch of http posts to a list of recipients
func (a *Actor) batchSend(activity map[string]interface{}, recipients []string) (err error) {
for _, v := range recipients {
err := a.signedHTTPPost(activity, v)
if err != nil {
log.Info("Failed to deliver message to " + v)
}
}
return
}
// send to followers sends a batch of http posts to each one of the followers
func (a *Actor) sendToFollowers(activity map[string]interface{}) (err error) {
recipients := make([]string, len(a.followers))
i := 0
for _, inbox := range a.followers {
recipients[i] = inbox.(string)
i++
}
a.batchSend(activity, recipients)
return
}
// Follow a remote user by their iri
func (a *Actor) Follow(user string) (err error) {
remote, err := NewRemoteActor(user)
if err != nil {
log.Info("Can't contact " + user + " to get their inbox")
return
}
follow := make(map[string]interface{})
hash, id := a.newItemID()
follow["@context"] = context()
follow["actor"] = a.iri
follow["id"] = id
follow["object"] = user
follow["type"] = "Follow"
// if we are not already following them
if _, ok := a.following[user]; !ok {
// if we have not been rejected previously
if _, ok := a.rejected[user]; !ok {
go func() {
err := a.signedHTTPPost(follow, remote.inbox)
if err != nil {
log.Info("Couldn't follow " + user)
log.Info(err)
return
}
// save the activity
a.saveItem(hash, follow)
a.requested[user] = hash
a.save()
// we are going to save the request here
// and save the follow only on accept so look at
// the http handler for the accept code
}()
}
}
return nil
}
// Unfollow the user declared by the iri in `user`
// this recreates the original follow activity
// , wraps it in an Undo activity, sets it's
// id to the id of the original Follow activity that
// was accepted when initially following that user
// (this is read from the `actor.following` map
func (a *Actor) Unfollow(user string) {
// if we have a request to follow this user cancel it
cancelRequest := false
if _, ok := a.requested[user]; ok {
log.Info("Cancelling follow request")
cancelRequest = true
// then continue to send the unfollow to the receipient
// to inform them that the request is cancelled.
} else if _, ok := a.following[user]; !ok {
log.Info("We are not following this user, ignoring...")
return
}
log.Info("Unfollowing " + user)
var hash string
// find the id of the original follow
if cancelRequest {
hash = a.requested[user].(string)
} else {
hash = a.following[user].(string)
}
// create an undo activiy
undo := make(map[string]interface{})
undo["@context"] = context()
undo["actor"] = a.iri
undo["id"] = baseURL + "item/" + hash + "/undo"
undo["type"] = "Undo"
follow := make(map[string]interface{})
follow["@context"] = context()
follow["actor"] = a.iri
follow["id"] = baseURL + "item/" + hash
follow["object"] = user
follow["type"] = "Follow"
// add the properties to the undo activity
undo["object"] = follow
// get the remote user's inbox
remoteUser, err := NewRemoteActor(user)
if err != nil {
log.Info("Failed to contact remote actor")
return
}
PrettyPrint(undo)
go func() {
err := a.signedHTTPPost(undo, remoteUser.inbox)
if err != nil {
log.Info("Couldn't unfollow " + user)
log.Info(err)
return
}
// if there was no error then delete the follow
// from the list
if cancelRequest {
delete(a.requested, user)
} else {
delete(a.following, user)
}
a.save()
}()
}
// Announce this activity to our followers
func (a *Actor) Announce(url string) {
// our announcements are public. Public stuff have a "To" to the url below
toURL := []string{"https://www.w3.org/ns/activitystreams#Public"}
hash, id := a.newItemID()
announce := make(map[string]interface{})
announce["@context"] = context()
announce["id"] = id
announce["type"] = "Announce"
announce["object"] = url
announce["actor"] = a.iri
announce["to"] = toURL
// cc this to all our followers one by one
// I've seen activities to just include the url of the
// collection but for now this works.
// It seems that sharedInbox will be deprecated
// so this is probably a better idea anyway (#APConf)
announce["cc"] = a.followersSlice()
// add a timestamp
announce["published"] = time.Now().Format(time.RFC3339)
a.appendToOutbox(announce["id"].(string))
a.saveItem(hash, announce)
a.sendToFollowers(announce)
}
func (a *Actor) followersSlice() []string {
followersSlice := make([]string, 0)
followersSlice = append(followersSlice, a.followersIRI)
for k := range a.followers {
followersSlice = append(followersSlice, k)
}
return followersSlice
}
// Accept a follow request
func (a *Actor) Accept(follow map[string]interface{}) {
// it's a follow, write it down
newFollower := follow["actor"].(string)
// check we aren't following ourselves
if newFollower == follow["object"] {
log.Info("You can't follow yourself")
return
}
follower, err := NewRemoteActor(follow["actor"].(string))
// check if this user is already following us
if _, ok := a.followers[newFollower]; ok {
log.Info("You're already following us, yay!")
// do nothing, they're already following us
} else {
a.NewFollower(newFollower, follower.inbox)
}
// send accept anyway even if they are following us already
// this is very verbose. I would prefer creating a map by hand
// remove @context from the inner activity
delete(follow, "@context")
accept := make(map[string]interface{})
accept["@context"] = "https://www.w3.org/ns/activitystreams"
accept["to"] = follow["actor"]
_, accept["id"] = a.newID()
accept["actor"] = a.iri
accept["object"] = follow
accept["type"] = "Accept"
if err != nil {
log.Info("Couldn't retrieve remote actor info, maybe server is down?")
log.Info(err)
}
// Maybe we need to save this accept?
go a.signedHTTPPost(accept, follower.inbox)
}
// Followers returns the list of followers
func (a *Actor) Followers() map[string]string {
f := make(map[string]string)
for follower, inbox := range a.followers {
f[follower] = inbox.(string)
}
return f
}
// Following returns the list of followers
func (a *Actor) Following() map[string]string {
f := make(map[string]string)
for followee, hash := range a.following {
f[followee] = hash.(string)
}
return f
}

352
vendor/github.com/writeas/activityserve/http.go generated vendored Normal file
View File

@ -0,0 +1,352 @@
package activityserve
import (
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"github.com/gologme/log"
"github.com/gorilla/mux"
"github.com/writefreely/go-nodeinfo"
"encoding/json"
)
// ServeSingleActor just simplifies the call from main so
// that onboarding is as easy as possible
func ServeSingleActor(actor Actor) {
Serve(map[string]Actor{actor.Name: actor})
}
// Serve starts an http server with all the required handlers
func Serve(actors map[string]Actor) {
var webfingerHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/jrd+json; charset=utf-8")
account := r.URL.Query().Get("resource") // should be something like acct:user@example.com
account = strings.Replace(account, "acct:", "", 1) // remove acct:
server := strings.Split(baseURL, "://")[1] // remove protocol from baseURL. Should get example.com
server = strings.TrimSuffix(server, "/") // remove protocol from baseURL. Should get example.com
account = strings.Replace(account, "@"+server, "", 1) // remove server from handle. Should get user
actor, err := LoadActor(account)
// error out if this actor does not exist
if err != nil {
log.Info("No such actor")
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "404 - actor not found")
return
}
// response := `{"subject":"acct:` + actor.name + `@` + server + `","aliases":["` + baseURL + actor.name + `","` + baseURL + actor.name + `"],"links":[{"href":"` + baseURL + `","type":"text/html","rel":"https://webfinger.net/rel/profile-page"},{"href":"` + baseURL + actor.name + `","type":"application/activity+json","rel":"self"}]}`
responseMap := make(map[string]interface{})
responseMap["subject"] = "acct:" + actor.Name + "@" + server
// links is a json array with a single element
var links [1]map[string]string
link1 := make(map[string]string)
link1["rel"] = "self"
link1["type"] = "application/activity+json"
link1["href"] = baseURL + actor.Name
links[0] = link1
responseMap["links"] = links
response, err := json.Marshal(responseMap)
if err != nil {
log.Error("problem creating the webfinger response json")
}
PrettyPrintJSON(response)
w.Write([]byte(response))
}
var actorHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/activity+json; charset=utf-8")
log.Info("Remote server " + r.RemoteAddr + " just fetched our /actor endpoint")
username := mux.Vars(r)["actor"]
log.Info(username)
if username == ".well-known" || username == "favicon.ico" {
log.Info("well-known, skipping...")
return
}
actor, err := LoadActor(username)
// error out if this actor does not exist (or there are dots or slashes in his name)
if err != nil {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "404 - page not found")
log.Info("Can't create local actor")
return
}
fmt.Fprintf(w, actor.whoAmI())
// Show some debugging information
printer.Info("")
body, _ := ioutil.ReadAll(r.Body)
PrettyPrintJSON(body)
log.Info(FormatHeaders(r.Header))
printer.Info("")
}
var outboxHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/activity+json; charset=utf-8")
pageStr := r.URL.Query().Get("page") // get the page from the query string as string
username := mux.Vars(r)["actor"] // get the needed actor from the muxer (url variable {actor} below)
actor, err := LoadActor(username) // load the actor from disk
if err != nil { // either actor requested has illegal characters or
log.Info("Can't load local actor") // we don't have such actor
fmt.Fprintf(w, "404 - page not found")
w.WriteHeader(http.StatusNotFound)
return
}
postsPerPage := 100
var response []byte
filename := storage + slash + "actors" + slash + actor.Name + slash + "outbox.txt"
totalLines, err := lineCounter(filename)
if err != nil {
log.Info("Can't read outbox.txt")
log.Info(err)
return
}
if pageStr == "" {
//TODO fix total items
response = []byte(`{
"@context" : "https://www.w3.org/ns/activitystreams",
"first" : "` + baseURL + actor.Name + `/outbox?page=1",
"id" : "` + baseURL + actor.Name + `/outbox",
"last" : "` + baseURL + actor.Name + `/outbox?page=` + strconv.Itoa(totalLines/postsPerPage+1) + `",
"totalItems" : ` + strconv.Itoa(totalLines) + `,
"type" : "OrderedCollection"
}`)
} else {
page, err := strconv.Atoi(pageStr) // get page number from query string
if err != nil {
log.Info("Page number not a number, assuming 1")
page = 1
}
lines, err := ReadLines(filename, (page-1)*postsPerPage, page*(postsPerPage+1)-1)
if err != nil {
log.Info("Can't read outbox file")
log.Info(err)
return
}
responseMap := make(map[string]interface{})
responseMap["@context"] = context()
responseMap["id"] = baseURL + actor.Name + "/outbox?page=" + pageStr
if page*postsPerPage < totalLines {
responseMap["next"] = baseURL + actor.Name + "/outbox?page=" + strconv.Itoa(page+1)
}
if page > 1 {
responseMap["prev"] = baseURL + actor.Name + "/outbox?page=" + strconv.Itoa(page-1)
}
responseMap["partOf"] = baseURL + actor.Name + "/outbox"
responseMap["type"] = "OrderedCollectionPage"
orderedItems := make([]interface{}, 0, postsPerPage)
for _, item := range lines {
// split the line
parts := strings.Split(item, "/")
// keep the hash
hash := parts[len(parts)-1]
// build the filename
filename := storage + slash + "actors" + slash + actor.Name + slash + "items" + slash + hash + ".json"
// open the file
activityJSON, err := ioutil.ReadFile(filename)
if err != nil {
log.Error("can't read activity")
log.Info(filename)
return
}
var temp map[string]interface{}
// put it into a map
json.Unmarshal(activityJSON, &temp)
// append to orderedItems
orderedItems = append(orderedItems, temp)
}
responseMap["orderedItems"] = orderedItems
response, err = json.Marshal(responseMap)
if err != nil {
log.Info("can't marshal map to json")
log.Info(err)
return
}
}
w.Write(response)
}
var inboxHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
activity := make(map[string]interface{})
err = json.Unmarshal(b, &activity)
if err != nil {
log.Error("Probably this request didn't have (valid) JSON inside it")
return
}
// TODO check if it's actually an activity
// check if case is going to be an issue
switch activity["type"] {
case "Follow":
// load the object as actor
actor, err := LoadActor(mux.Vars(r)["actor"]) // load the actor from disk
if err != nil {
log.Error("No such actor")
return
}
actor.OnFollow(activity)
case "Accept":
acceptor := activity["actor"].(string)
actor, err := LoadActor(mux.Vars(r)["actor"]) // load the actor from disk
if err != nil {
log.Error("No such actor")
return
}
// From here down this could be moved to Actor (TBD)
follow := activity["object"].(map[string]interface{})
id := follow["id"].(string)
// check if the object of the follow is us
if follow["actor"].(string) != baseURL+actor.Name {
log.Info("This is not for us, ignoring")
return
}
// try to get the hash only
hash := strings.Replace(id, baseURL+actor.Name+"/item/", "", 1)
// if there are still slashes in the result this means the
// above didn't work
if strings.ContainsAny(hash, "/") {
// log.Info(follow)
log.Info("The id of this follow is probably wrong")
// we could return here but pixelfed returns
// the id as http://domain.tld/actor instead of
// http://domain.tld/actor/item/hash so this chokes
// return
}
// Have we already requested this follow or are we following anybody that
// sprays accepts?
// pixelfed doesn't return the original follow thus the id is wrong so we
// need to just check if we requested this actor
// pixelfed doesn't return the original follow thus the id is wrong so we
// need to just check if we requested this actor
if _, ok := actor.requested[acceptor]; !ok {
log.Info("We never requested this follow from " + acceptor +", ignoring the Accept")
return
}
// if pixelfed fixes https://github.com/pixelfed/pixelfed/issues/1710 we should uncomment
// hash is the _ from above
// if hash != id {
// log.Info("Id mismatch between Follow request and Accept")
// return
// }
actor.following[acceptor] = hash
PrettyPrint(activity)
delete(actor.requested, acceptor)
actor.save()
case "Reject":
rejector := activity["actor"].(string)
actor, err := LoadActor(mux.Vars(r)["actor"]) // load the actor from disk
if err != nil {
log.Error("No such actor")
return
}
// write the actor to the list of rejected follows so that
// we won't try following them again
actor.rejected[rejector] = ""
actor.save()
case "Create":
actor, ok := actors[mux.Vars(r)["actor"]] // load the actor from memory
if !ok {
// log.Error(actors)
log.Error("No such actor: " + mux.Vars(r)["actor"])
return
}
log.Info("Received the following activity from: " + r.UserAgent())
PrettyPrintJSON(b)
actor.OnReceiveContent(activity)
default:
}
}
var peersHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/activity+json; charset=utf-8")
username := mux.Vars(r)["actor"]
collection := mux.Vars(r)["peers"]
if collection != "followers" && collection != "following" {
w.WriteHeader(http.StatusNotFound)
w.Write([]byte("404 - No such collection"))
return
}
actor, err := LoadActor(username)
// error out if this actor does not exist
if err != nil {
log.Info("Can't create local actor")
return
}
var page int
pageS := r.URL.Query().Get("page")
if pageS == "" {
page = 0
} else {
page, _ = strconv.Atoi(pageS)
}
response, _ := actor.getPeers(page, collection)
w.Write(response)
}
var postHandler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("content-type", "application/activity+json; charset=utf-8")
username := mux.Vars(r)["actor"]
hash := mux.Vars(r)["hash"]
actor, err := LoadActor(username)
// error out if this actor does not exist
if err != nil {
log.Info("Can't create local actor")
return
}
post, err := actor.loadItem(hash)
if err != nil {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintf(w, "404 - post not found")
return
}
postJSON, err := json.Marshal(post)
if err != nil {
log.Info("failed to marshal json from item " + hash + " text")
return
}
w.Write(postJSON)
}
// Add the handlers to a HTTP server
gorilla := mux.NewRouter()
niCfg := nodeInfoConfig(baseURL)
ni := nodeinfo.NewService(*niCfg, nodeInfoResolver{len(actors)})
gorilla.HandleFunc(nodeinfo.NodeInfoPath, http.HandlerFunc(ni.NodeInfoDiscover))
gorilla.HandleFunc(niCfg.InfoURL, http.HandlerFunc(ni.NodeInfo))
gorilla.HandleFunc("/.well-known/webfinger", webfingerHandler)
gorilla.HandleFunc("/{actor}/peers/{peers}", peersHandler)
gorilla.HandleFunc("/{actor}/outbox", outboxHandler)
gorilla.HandleFunc("/{actor}/outbox/", outboxHandler)
gorilla.HandleFunc("/{actor}/inbox", inboxHandler)
gorilla.HandleFunc("/{actor}/inbox/", inboxHandler)
gorilla.HandleFunc("/{actor}/", actorHandler)
gorilla.HandleFunc("/{actor}", actorHandler)
gorilla.HandleFunc("/{actor}/item/{hash}", postHandler)
http.Handle("/", gorilla)
log.Fatal(http.ListenAndServe(":8081", nil))
}

52
vendor/github.com/writeas/activityserve/nodeinfo.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package activityserve
import (
"strings"
"github.com/writefreely/go-nodeinfo"
)
type nodeInfoResolver struct {
actors int
}
func nodeInfoConfig(baseURL string) *nodeinfo.Config {
name := "Pherephone"
desc := "An ActivityPub repeater."
return &nodeinfo.Config{
BaseURL: baseURL,
InfoURL: "/api/nodeinfo",
Metadata: nodeinfo.Metadata{
NodeName: name,
NodeDescription: desc,
Software: nodeinfo.SoftwareMeta{
HomePage: "https://pherephone.org",
GitHub: "https://github.com/writeas/pherephone",
},
},
Protocols: []nodeinfo.NodeProtocol{
nodeinfo.ProtocolActivityPub,
},
Services: nodeinfo.Services{
Inbound: []nodeinfo.NodeService{},
Outbound: []nodeinfo.NodeService{},
},
Software: nodeinfo.SoftwareInfo{
Name: strings.ToLower(libName),
Version: version,
},
}
}
func (r nodeInfoResolver) IsOpenRegistration() (bool, error) {
return false, nil
}
func (r nodeInfoResolver) Usage() (nodeinfo.Usage, error) {
return nodeinfo.Usage{
Users: nodeinfo.UsageUsers{
Total: r.actors,
},
}, nil
}

11
vendor/github.com/writeas/activityserve/readme.md generated vendored Normal file
View File

@ -0,0 +1,11 @@
# ActivityServe
## A very light ActivityPub library in go
This library was built to support the very little functions that [pherephone](https://github.com/writeas/pherephone) requires. It might never be feature-complete but it's a very good point to start your activityPub journey. Take a look at [activityserve-example](https://github.com/writeas/activityserve-example) for a simple main file that uses **activityserve** to post a "Hello, world" message.
For now it supports following and unfollowing users, accepting follows, announcing (boosting) other posts and this is pretty much it.
The library is still a moving target and the api is not guaranteed to be stable.
You can override the auto-accept upon follow by setting the `actor.OnFollow` to a custom function.

108
vendor/github.com/writeas/activityserve/remoteActor.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
package activityserve
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"github.com/gologme/log"
)
// RemoteActor is a type that holds an actor
// that we want to interact with
type RemoteActor struct {
iri, outbox, inbox, sharedInbox string
info map[string]interface{}
}
// NewRemoteActor returns a remoteActor which holds
// all the info required for an actor we want to
// interact with (not essentially sitting in our instance)
func NewRemoteActor(iri string) (RemoteActor, error) {
info, err := get(iri)
if err != nil {
log.Info("Couldn't get remote actor information")
log.Info(err)
return RemoteActor{}, err
}
outbox, _ := info["outbox"].(string)
inbox, _ := info["inbox"].(string)
var endpoints map[string]interface{}
var sharedInbox string
if info["endpoints"] != nil {
endpoints = info["endpoints"].(map[string]interface{})
if val, ok := endpoints["sharedInbox"]; ok {
sharedInbox = val.(string)
}
}
return RemoteActor{
iri: iri,
outbox: outbox,
inbox: inbox,
sharedInbox: sharedInbox,
}, err
}
func (ra RemoteActor) getLatestPosts(number int) (map[string]interface{}, error) {
return get(ra.outbox)
}
func get(iri string) (info map[string]interface{}, err error) {
buf := new(bytes.Buffer)
req, err := http.NewRequest("GET", iri, buf)
if err != nil {
log.Info(err)
return
}
req.Header.Add("Accept", "application/activity+json")
req.Header.Add("User-Agent", userAgent+" "+version)
req.Header.Add("Accept-Charset", "utf-8")
resp, err := client.Do(req)
if err != nil {
log.Info("Cannot perform the request")
log.Info(err)
return
}
responseData, _ := ioutil.ReadAll(resp.Body)
if !isSuccess(resp.StatusCode) {
err = fmt.Errorf("GET request to %s failed (%d): %s\nResponse: %s \nHeaders: %s", iri, resp.StatusCode, resp.Status, FormatJSON(responseData), FormatHeaders(req.Header))
log.Info(err)
return
}
var e interface{}
err = json.Unmarshal(responseData, &e)
if err != nil {
log.Info("something went wrong when unmarshalling the json")
log.Info(err)
return
}
info = e.(map[string]interface{})
return
}
// GetInbox returns the inbox url of the actor
func (ra RemoteActor) GetInbox() string {
return ra.inbox
}
// GetSharedInbox returns the inbox url of the actor
func (ra RemoteActor) GetSharedInbox() string {
if ra.sharedInbox == "" {
return ra.inbox
}
return ra.sharedInbox
}

85
vendor/github.com/writeas/activityserve/setup.go generated vendored Normal file
View File

@ -0,0 +1,85 @@
package activityserve
import (
"fmt"
"net/http"
"os"
"github.com/gologme/log"
"gopkg.in/ini.v1"
)
var slash = string(os.PathSeparator)
var baseURL = "http://example.com/"
var storage = "storage"
var userAgent = "activityserve"
var printer *log.Logger
const libName = "activityserve"
const version = "0.99"
var client = http.Client{}
// Setup sets our environment up
func Setup(configurationFile string, debug bool) *ini.File {
// read configuration file (config.ini)
if configurationFile == "" {
configurationFile = "config.ini"
}
cfg, err := ini.Load("config.ini")
if err != nil {
fmt.Printf("Fail to read file: %v", err)
os.Exit(1)
}
// Load base url from configuration file
baseURL = cfg.Section("general").Key("baseURL").String()
// check if it ends with a / and append one if not
if baseURL[len(baseURL)-1:] != "/" {
baseURL += "/"
}
// print it for our users
fmt.Println()
fmt.Println("Domain Name:", baseURL)
// Load storage location (only local filesystem supported for now) from config
storage = cfg.Section("general").Key("storage").String()
cwd, err := os.Getwd()
fmt.Println("Storage Location:", cwd+slash+storage)
fmt.Println()
SetupStorage(storage)
// Load user agent
userAgent = cfg.Section("general").Key("userAgent").String()
// I prefer long file so that I can click it in the terminal and open it
// in the editor above
log.SetFlags(log.Llongfile)
// log.SetFlags(log.LstdFlags | log.Lshortfile)
log.EnableLevel("warn")
// create a logger with levels but without prefixes for easier to read
// debug output
printer = log.New(os.Stdout, " ", 0)
if debug == true {
fmt.Println()
fmt.Println("debug mode on")
log.EnableLevel("info")
printer.EnableLevel("info")
}
return cfg
}
// SetupStorage creates storage
func SetupStorage(storage string) {
// prepare storage for foreign activities (activities we store that don't
// belong to us)
foreignDir := storage + slash + "foreign"
if _, err := os.Stat(foreignDir); os.IsNotExist(err) {
os.MkdirAll(foreignDir, 0755)
}
}

86
vendor/github.com/writeas/activityserve/snips.md generated vendored Normal file
View File

@ -0,0 +1,86 @@
## When we follow someone from pherephone 1.00
``` json
{
"@context": "https://www.w3.org/ns/activitystreams",
"actor": "https://floorb.qwazix.com/myAwesomeList1",
"id": "https://floorb.qwazix.com/myAwesomeList1/Xm9UHyJXyFYduqXz",
"object": "https://cybre.space/users/qwazix",
"to": "https://cybre.space/users/qwazix",
"type": "Follow"
}
```
``` yaml
Accept: application/activity+json
Accept-Charset: utf-8
Date: Tue, 10 Sep 2019 05:31:22 GMT
Digest: SHA-256=uL1LvGU4+gSDm8Qci6XibZODTaNCsXWXWgkMWAqBvG8=
Host: cybre.space
Signature: keyId="https://floorb.qwazix.com/myAwesomeList1#main-key",algorithm="rsa-sha256",headers="(request-target) date host digest",signature="c6oipeXu/2zqX3qZF1x7KLNTYifcyqwwDySoslAowjpYlKWO3qAZMU1A//trYm23AtnItXkH2mY3tPq8X7fy9P1+CMFmiTzV01MGwwwJLDtEXKoq8W7L7lWuQhDD5rjiZqWyei4T13FW7MOCRbAtC4kZqkHrp5Z3l8HhPvmgUV5VOuSGWrtbmCN3hlAEHVugQTMPC6UjlaHva6Qm/SNlFmpUdG7WmUUPJIZ6a/ysBk4cLkF1+Hb03grXKexLHAU4bPIRcjwFpUl06yp8fZ8CCLhNhIsBACiizV85D3votmdxAollE5JXSwBp4f6jrZbgiJEusFoxiVKKqZRHRESQBQ=="
```
## Pherephone 1 Accept Activity
``` yaml
Accept: application/activity+json
Accept-Charset: utf-8
Date: Tue, 10 Sep 2019 07:28:49 GMT
Digest: SHA-256=GTy9bhYjOnbeCJzAzpqI/HEw/5p81NnoPLJkVAiZ4K0=
Host: cybre.space
Signature: keyId="https://floorb.qwazix.com/activityserve_test_actor_1#main-key",algorithm="rsa-sha256",headers="(request-target) date host digest",signature="jAeTEy9v1t+bCwQJB2R4Cscu/fGu5i4luHXlzJcJVyRbsHGqxbNEOxlk/G0S5BGbX3Kuoerq2oMpkFV5kCWPlpAmfhz38NKIrWhjnEUpFOfiG+ZJBpQsb3VQp7M3RGPZ9K4hmV6BSzkC8npsFGPI/HkAaj9u/txW5Cp4v6dMOYteoRLcKc3UVPK9j4hCbjq6SPhpwfM+StARSDnUFfpDe4YYQiVnO2WoINPUr4xvELmCYdBclSBCKcG66g8sBpnx4McjIlu0VISeBxzIHZYOONPteLY2uZW3Axi9JIAq88Y2Ecw4vV6Ctp7KcmD7M3kAJLqao2p/XZNZ3ExsTGfrXA=="
User-Agent: activityserve 0.0
```
``` json
{
"@context": "https://www.w3.org/ns/activitystreams",
"actor": "https://floorb.qwazix.com/myAwesomeList1",
"id": "https://floorb.qwazix.com/myAwesomeList1/SABRE7xlDAjtDcZb",
"object": {
"actor": "https://cybre.space/users/qwazix",
"id": "https://cybre.space/3e7336af-4bcd-4f77-aa69-6a145be824aa",
"object": "https://floorb.qwazix.com/myAwesomeList1",
"type": "Follow"
},
"to": "https://cybre.space/users/qwazix",
"type": "Accept"
}
```
## Pherephone 2 Accept Activity
``` yaml
Accept: application/activity+json
Accept-Charset: utf-8
Date: Tue, 10 Sep 2019 07:32:08 GMT
Digest: SHA-256=yKzA6srSMx0b5GXn9DyflXVdqWd6ADBGt5hO9t/yc44=
Host: cybre.space
Signature: keyId="https://floorb.qwazix.com/myAwesomeList1#main-key",algorithm="rsa-sha256",headers="(request-target) date host digest",signature="WERXWDRFS7aGiIoz+HSujtuv9XNFBPxHkJSsCPu7PNIUDoAB2jdwW3rZc5jbrSLxi9Aqhr2BiBV/VYELQ8gITPzzIYH5sizPcPyLyARPUw37t6zA3HinahpfBKXhf73q9u+CYE/7DMKQ2Pvv2lQPaZ8hl27R2KJmcc3Jhmn5nxrQ+kxAtn6qYpNT/BqLWlXKx5rpYM2r+mHjFyYRYsjlAmi+RQNDEmv/uwn+XuNKzEtrL8Oq7mM13Lsid0a3gJi/t0b/luoyRyvi3fHUM/b1epfVogG/FulsZ0A92310v8MbastceQjjUzTzjKHILl7qNewkqtlzn2ARm3cZlAprSg=="
User-Agent: pherephone (go-fed/activity v1.0.0)
```
``` json
{
"@context": "https://www.w3.org/ns/activitystreams",
"actor": "https://floorb.qwazix.com/activityserve_test_actor_1",
"id": "https://floorb.qwazix.com/activityserve_test_actor_1/4wJ9DrBab4eIE3Bt",
"object": {
"actor": "https://cybre.space/users/qwazix",
"id": "https://cybre.space/9123da78-21a5-44bc-bce5-4039a4072e4c",
"object": "https://floorb.qwazix.com/activityserve_test_actor_1",
"type": "Follow"
},
"to": "https://cybre.space/users/qwazix",
"type": "Accept"
}
```

105
vendor/github.com/writeas/activityserve/util.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
package activityserve
import (
"bufio"
"io"
"net/http"
"os"
// "net/url"
"bytes"
"encoding/json"
// "time"
// "fmt"
"github.com/gologme/log"
// "github.com/go-fed/httpsig"
)
func isSuccess(code int) bool {
return code == http.StatusOK ||
code == http.StatusCreated ||
code == http.StatusAccepted ||
code == http.StatusNoContent
}
//PrettyPrint maps
func PrettyPrint(themap map[string]interface{}) {
b, err := json.MarshalIndent(themap, "", " ")
if err != nil {
log.Info("error:", err)
}
log.Print(string(b))
}
//PrettyPrintJSON does what it's name says
func PrettyPrintJSON(theJSON []byte) {
dst := new(bytes.Buffer)
json.Indent(dst, theJSON, "", "\t")
log.Info(dst)
}
// FormatJSON formats json with tabs and
// returns the new string
func FormatJSON(theJSON []byte) string {
dst := new(bytes.Buffer)
json.Indent(dst, theJSON, "", "\t")
return dst.String()
}
// FormatHeaders to string for printing
func FormatHeaders(header http.Header) string {
buf := new(bytes.Buffer)
header.Write(buf)
return buf.String()
}
func context() [1]string {
return [1]string{"https://www.w3.org/ns/activitystreams"}
}
// ReadLines reads specific lines from a file and returns them as
// an array of strings
func ReadLines(filename string, from, to int) (lines []string, err error) {
lines = make([]string, 0, to-from)
reader, err := os.Open(filename)
if err != nil {
log.Info("could not read file")
log.Info(err)
return
}
sc := bufio.NewScanner(reader)
line := 0
for sc.Scan() {
line++
if line >= from && line <= to {
lines = append(lines, sc.Text())
}
}
return lines, nil
}
func lineCounter(filename string) (int, error) {
r, err := os.Open(filename)
if err != nil {
log.Info("could not read file")
log.Info(err)
return 0, nil
}
buf := make([]byte, 32*1024)
count := 0
lineSep := []byte{'\n'}
for {
c, err := r.Read(buf)
count += bytes.Count(buf[:c], lineSep)
switch {
case err == io.EOF:
return count, nil
case err != nil:
return count, err
}
}
}

1
vendor/github.com/writeas/go-webfinger/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
*.swp

21
vendor/github.com/writeas/go-webfinger/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Sheena Artrip
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

58
vendor/github.com/writeas/go-webfinger/README.md generated vendored Normal file
View File

@ -0,0 +1,58 @@
[![GoDoc](https://godoc.org/code.as/writeas/go-webfinger?status.svg)](https://godoc.org/code.as/writeas/go-webfinger)
# go-webfinger
go-webfinger is a golang webfinger server implementation. See [v1.0](https://github.com/writeas/go-webfinger/releases/tag/1.0) for the latest stable version, and our [Code.as repo](https://code.as/writeas/go-webfinger) for the Write.as-specific implementation.
Past v1.0, this fork was made especially for federation support on [Write.as](https://write.as), which includes users across write.as, \*.writeas.com, and custom domains we host. The `master` branch contains changes specific to our implementation, and will change without notification.
## Usage
`webfinger.Service` is implemented as a net/http handler, which means
usage is simply registering the object with your http service.
Using the webfinger service as the main ServeHTTP:
```go
myResolver = ...
wf := webfinger.Default(myResolver{})
wf.NotFoundHandler = // the rest of your app
http.ListenAndService(":8080", wf)
```
Using the webfinger service as a route on an existing HTTP router:
```go
myResolver = ...
wf := webfinger.Default(myResolver{})
http.Handle(webfinger.WebFingerPath, http.HandlerFunc(wf.Webfinger))
http.ListenAndService(":8080", nil)
```
## Defaults
The webfinger service is installed with a few defaults. Some of these
defaults ensure we stick closely to the webfinger specification (tls-only, CORS, Content-Type)
and other defaults are simply useful for development (no-cache)
The full list of defaults can be found in the godoc for `webfinger.Service`. They are exposed
as public variables which can be overriden.
`PreHandlers` are the list of preflight HTTP handlers to run. You can add your own via `wf.PreHandlers["my-custom-name"] = ...`, however,
execution order is not guaranteed.
### TLS-Only
Handler which routes to the TLS version of the page. Disable via `wf.NoTLSHandler = nil`.
### No-Cache
A PreFlight handler which sets no-cache headers on anything under `/.well-known/webfinger`. Disable or override via `wf.PreHandlers[webfinger.NoCacheMiddleware]`
### Content Type as application/jrd+json
A PreFlight handler which sets the Content-Type to `application/jrd+json`. Disable or override via `wf.PreHandlers[webfinger.ContentTypeMiddleware]`.
### CORS
A PreFlight handler which adds the CORS headers. Disable or override via `wf.PreHandlers[webfinger.CorsMiddleware].`

30
vendor/github.com/writeas/go-webfinger/account.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package webfinger
import (
"errors"
"strings"
)
type account struct {
Name string
Hostname string
}
func (a *account) ParseString(str string) (err error) {
items := strings.Split(str, "@")
if strings.HasPrefix(str, "acct:") {
a.Name = items[0][5:]
} else {
a.Name = items[0]
}
if len(items) < 2 {
//TODO: this might not be required
err = errors.New("No domain on account")
return
}
a.Hostname = strings.Split(items[1], "/")[0]
return
}

23
vendor/github.com/writeas/go-webfinger/doc.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Package webfinger is a server implementation of the webfinger specification. This
// is a general-case package which provides the HTTP handlers and interfaces
// for adding webfinger support for your system and resources.
//
// The simplest way to use this is to call webfinger.Default() and
// then register the object as an HTTP handler:
//
// myResolver = ...
// wf := webfinger.Default(myResolver{})
// wf.NotFoundHandler = // the rest of your app
// http.ListenAndService(":8080", wf)
//
// However, you can also register the specific webfinger handler to a path. This should
// work on any router that supports net/http.
//
// myResolver = ...
// wf := webfinger.Default(myResolver{})
// http.Handle(webfinger.WebFingerPath, http.HandlerFunc(wf.Webfinger))
// http.ListenAndService(":8080", nil)
//
// In either case, the handlers attached to the webfinger service get invoked as
// needed.
package webfinger

31
vendor/github.com/writeas/go-webfinger/error.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package webfinger
import (
"context"
"net/http"
)
// ErrorKeyType is the type for the context error key
type ErrorKeyType int
// ErrorKey is the key for the context error
var ErrorKey ErrorKeyType
// ErrorFromContext gets the error from the context
func ErrorFromContext(ctx context.Context) error {
v, ok := ctx.Value(ErrorKey).(error)
if !ok {
return nil
}
return v
}
func addError(r *http.Request, err error) *http.Request {
if err == nil {
return r
}
ctx := r.Context()
ctx = context.WithValue(ctx, ErrorKey, err)
r = r.WithContext(ctx)
return r
}

113
vendor/github.com/writeas/go-webfinger/http.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
package webfinger
import (
"encoding/json"
"errors"
"net/http"
)
// WebFingerPath defines the default path of the webfinger handler.
const WebFingerPath = "/.well-known/webfinger"
func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
//TODO: support host-meta as a path
path := r.URL.Path
switch path {
case WebFingerPath:
s.Webfinger(w, r)
default:
s.NotFoundHandler.ServeHTTP(w, r)
}
}
// Webfinger is the webfinger handler
func (s *Service) Webfinger(w http.ResponseWriter, r *http.Request) {
s.runPrehandlers(w, r)
if r.TLS == nil && s.NoTLSHandler != nil {
s.NoTLSHandler.ServeHTTP(w, r)
return
}
//NOTE: should this run before or after the pre-run handlers?
if r.Method != "GET" {
s.MethodNotSupportedHandler.ServeHTTP(w, r)
return
}
if len(r.URL.Query()["resource"]) != 1 {
s.MalformedRequestHandler.ServeHTTP(w, addError(r, errors.New("Malformed resource parameter")))
return
}
resource := r.URL.Query().Get("resource")
var a account
if err := a.ParseString(resource); err != nil {
s.MalformedRequestHandler.ServeHTTP(w, addError(r, err))
return
}
relStrings := r.URL.Query()["rel"]
var rels []Rel
for _, r := range relStrings {
rels = append(rels, Rel(r))
}
rsc, err := s.Resolver.FindUser(a.Name, a.Hostname, r.Host, rels)
if err != nil {
if !s.Resolver.IsNotFoundError(err) {
s.ErrorHandler.ServeHTTP(w, addError(r, err))
return
}
rsc, err = s.Resolver.DummyUser(a.Name, a.Hostname, rels)
if err != nil && !s.Resolver.IsNotFoundError(err) {
s.ErrorHandler.ServeHTTP(w, addError(r, err))
return
} else if s.Resolver.IsNotFoundError(err) {
s.NotFoundHandler.ServeHTTP(w, r)
return
}
}
if err := json.NewEncoder(w).Encode(&rsc); err != nil {
s.ErrorHandler.ServeHTTP(w, addError(r, err))
return
}
}
func (s *Service) runPrehandlers(w http.ResponseWriter, r *http.Request) {
if s.PreHandlers == nil {
return
}
for _, val := range s.PreHandlers {
if val != nil {
val.ServeHTTP(w, r)
}
}
}
func (s *Service) defaultErrorHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}
func (s *Service) defaultNotFoundHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
}
func (s *Service) defaultMethodNotSupportedHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
}
func (s *Service) defaultMalformedRequestHandler(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusBadRequest)
}
func (s *Service) defaultNoTLSHandler(w http.ResponseWriter, r *http.Request) {
u := *r.URL
u.Scheme = "https"
w.Header().Set("Location", u.String())
w.WriteHeader(http.StatusSeeOther)
}

13
vendor/github.com/writeas/go-webfinger/link.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package webfinger
// A Link is a series of user details
type Link struct {
HRef string `json:"href"`
Type string `json:"type,omitempty"`
Rel string `json:"rel"`
Properties map[string]*string `json:"properties,omitempty"`
Titles map[string]string `json:"titles,omitempty"`
}
// Rel allows referencing a subset of the users details
type Rel string

21
vendor/github.com/writeas/go-webfinger/middleware.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package webfinger
import "net/http"
// Middleware constant keys
const (
NoCacheMiddleware string = "NoCache"
CorsMiddleware string = "Cors"
ContentTypeMiddleware string = "Content-Type"
)
// noCache sets the headers to disable caching
func noCache(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Pragma", "no-cache")
}
// jrdSetup sets the content-type
func jrdSetup(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/jrd+json")
}

21
vendor/github.com/writeas/go-webfinger/resolver.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package webfinger
// The Resolver is how the webfinger service looks up a user or resource. The resolver
// must be provided by the developer using this package, as each webfinger
// service may be exposing a different set or users or resources or services.
type Resolver interface {
// FindUser finds the user given the username and hostname.
FindUser(username string, hostname, requestHost string, r []Rel) (*Resource, error)
// DummyUser allows us to return a dummy user to avoid user-enumeration via webfinger 404s. This
// can be done in the webfinger code itself but then it would be obvious which users are real
// and which are not real via differences in how the implementation works vs how
// the general webfinger code works. This does not match the webfinger specification
// but is an extra precaution. Returning a NotFound error here will
// keep the webfinger 404 behavior.
DummyUser(username string, hostname string, r []Rel) (*Resource, error)
// IsNotFoundError returns true if the given error is a not found error.
IsNotFoundError(err error) bool
}

9
vendor/github.com/writeas/go-webfinger/resource.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
package webfinger
// A Resource is the top-level JRD resource object.
type Resource struct {
Subject string `json:"subject,omitempty"`
Aliases []string `json:"aliases,omitempty"`
Properties map[string]string `json:"properties,omitempty"`
Links []Link `json:"links"`
}

65
vendor/github.com/writeas/go-webfinger/service.go generated vendored Normal file
View File

@ -0,0 +1,65 @@
package webfinger
import (
"net/http"
"github.com/captncraig/cors"
)
// Service is the webfinger service containing the required
// HTTP handlers and defaults for webfinger implementations.
type Service struct {
// PreHandlers are invoked at the start of each HTTP method, used to
// setup things like CORS, caching, etc. You can delete or replace
// a handler by setting service.PreHandlers[name] = nil
PreHandlers map[string]http.Handler
// NotFoundHandler is the handler to invoke when a URL is not matched. It does NOT
// handle the case of a non-existing users unless your Resolver.DummyUser returns
// an error that matches Resolver.IsNotFoundError(err) == true.
NotFoundHandler http.Handler
// MethodNotSupportedHandler is the handler invoked when an unsupported
// method is called on the webfinger HTTP service.
MethodNotSupportedHandler http.Handler
// MalformedRequestHandler is the handler invoked if the request routes
// but is malformed in some way. The default behavior is to return 400 BadRequest,
// per the webfinger specification
MalformedRequestHandler http.Handler
// NoTLSHandler is the handler invoked if the request is not
// a TLS request. The default behavior is to redirect to the TLS
// version of the URL, per the webfinger specification. Setting
// this to nil will allow nonTLS connections, but that is not advised.
NoTLSHandler http.Handler
// ErrorHandler is the handler invoked when an error is called. The request
// context contains the error in the webfinger.ErrorKey and can be fetched
// via webfinger.ErrorFromContext(ctx)
ErrorHandler http.Handler
// Resolver is the interface for resolving user details
Resolver Resolver
}
// Default creates a new service with the default registered handlers
func Default(ur Resolver) *Service {
var c = cors.Default()
s := &Service{}
s.Resolver = ur
s.ErrorHandler = http.HandlerFunc(s.defaultErrorHandler)
s.NotFoundHandler = http.HandlerFunc(s.defaultNotFoundHandler)
s.MethodNotSupportedHandler = http.HandlerFunc(s.defaultMethodNotSupportedHandler)
s.MalformedRequestHandler = http.HandlerFunc(s.defaultMalformedRequestHandler)
s.NoTLSHandler = http.HandlerFunc(s.defaultNoTLSHandler)
s.PreHandlers = make(map[string]http.Handler)
s.PreHandlers[NoCacheMiddleware] = http.HandlerFunc(noCache)
s.PreHandlers[CorsMiddleware] = http.HandlerFunc(c.HandleRequest)
s.PreHandlers[ContentTypeMiddleware] = http.HandlerFunc(jrdSetup)
return s
}

1
vendor/github.com/writefreely/go-nodeinfo/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
*.swp

21
vendor/github.com/writefreely/go-nodeinfo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2018 Write.as
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

70
vendor/github.com/writefreely/go-nodeinfo/README.md generated vendored Normal file
View File

@ -0,0 +1,70 @@
# go-nodeinfo
[![GoDoc](https://godoc.org/github.com/writefreely/go-nodeinfo?status.svg)](https://godoc.org/github.com/writefreely/go-nodeinfo)
[![Discuss on our forum](https://img.shields.io/discourse/https/discuss.write.as/users.svg?label=forum)](https://discuss.write.as/c/development)
go-nodeinfo is an implementation of [NodeInfo](https://github.com/jhass/nodeinfo), a standard metadata format for federated social networks, in Go (golang).
This fork contains extra metadata specifically for WriteFreely.
## Usage
`nodeinfo.Service` integrates with your existing `net/http` server.
```go
package main
import (
"github.com/writefreely/go-nodeinfo"
"net/http"
)
func main() {
cfg := nodeinfo.Config{
BaseURL: "http://localhost:8080",
InfoURL: "/api/nodeinfo",
Metadata: nodeinfo.Metadata{
NodeName: "Agora",
NodeDescription: "A federated something-something.",
Private: false,
},
Protocols: []nodeinfo.NodeProtocol{
nodeinfo.ProtocolActivityPub,
},
Services: nodeinfo.Services{
Inbound: []nodeinfo.NodeService{},
Outbound: []nodeinfo.NodeService{
nodeinfo.ServiceTwitter,
nodeinfo.ServiceTumblr,
},
},
Software: nodeinfo.SoftwareInfo{
Name: "Agora",
Version: "1.0",
},
}
ni := nodeinfo.NewService(cfg, nodeInfoResolver{})
http.Handle(nodeinfo.NodeInfoPath, http.HandlerFunc(ni.NodeInfoDiscover))
http.Handle(cfg.InfoURL, http.HandlerFunc(ni.NodeInfo))
http.ListenAndServe(":8080", nil)
}
type nodeInfoResolver struct{}
func (r nodeInfoResolver) IsOpenRegistration() (bool, error) {
return true, nil
}
func (r nodeInfoResolver) Usage() (nodeinfo.Usage, error) {
u := nodeinfo.Usage{
Users: nodeinfo.UsageUsers{
Total: 1,
},
LocalPosts: 1,
}
return u, nil
}
```

98
vendor/github.com/writefreely/go-nodeinfo/nodeinfo.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
package nodeinfo
type (
NodeProtocol string
NodeService string
)
const (
// Protocols that can be supported by this node.
ProtocolActivityPub NodeProtocol = "activitypub"
ProtocolBuddyCloud = "buddycloud"
ProtocolDFRN = "dfrn"
ProtocolDisaspora = "diaspora"
ProtocolLibertree = "libertree"
ProtocolOStatus = "ostatus"
ProtocolPumpIO = "pumpio"
ProtocolTent = "tent"
ProtocolXMPP = "xmpp"
ProtocolZot = "zot"
// Services that can be supported (inbound or outbound) by this node's API.
ServiceAtom NodeService = "atom1.0"
ServiceGNUSocial = "gnusocial"
ServiceIMAP = "imap"
ServicePnut = "pnut"
ServicePOP3 = "pop3"
ServicePumpIO = "pumpio"
ServiceRSS = "rss2.0"
ServiceTwitter = "twitter"
ServiceTumblr = "tumblr"
)
type Config struct {
BaseURL string
InfoURL string
Metadata Metadata
Protocols []NodeProtocol
Services Services
Software SoftwareInfo
}
type (
// NodeInfo includes all required node info.
NodeInfo struct {
Metadata Metadata `json:"metadata"`
OpenRegistrations bool `json:"openRegistrations"`
Protocols []NodeProtocol `json:"protocols"`
Services Services `json:"services"`
Software SoftwareInfo `json:"software"`
Usage Usage `json:"usage"`
Version string `json:"version"`
}
// Metadata for nodeinfo. Properties are based on what Pleroma uses.
//
// From the spec: Free form key value pairs for software specific values.
// Clients should not rely on any specific key present.
Metadata struct {
NodeName string `json:"nodeName,omitempty"`
NodeDescription string `json:"nodeDescription,omitempty"`
Private bool `json:"private,omitempty"`
Software SoftwareMeta `json:"software,omitempty"`
MaxBlogs int `json:"maxBlogs,omitempty"`
PublicReader bool `json:"publicReader"`
Invites bool `json:"invites"`
}
Services struct {
Inbound []NodeService `json:"inbound"`
Outbound []NodeService `json:"outbound"`
}
SoftwareInfo struct {
// Name (canonical) of this server software.
Name string `json:"name"`
// Version of this server software.
Version string `json:"version"`
}
SoftwareMeta struct {
HomePage string `json:"homepage"`
GitHub string `json:"github"`
Follow string `json:"follow"`
}
// Usage is usage statistics for this server.
Usage struct {
Users UsageUsers `json:"users"`
LocalPosts int `json:"localPosts,omitempty"`
LocalComments int `json:"localComments,omitempty"`
}
UsageUsers struct {
Total int `json:"total,omitempty"`
ActiveHalfYear int `json:"activeHalfyear,omitempty"`
ActiveMonth int `json:"activeMonth,omitempty"`
}
)

View File

@ -0,0 +1,8 @@
package nodeinfo
type Resolver interface {
// IsOpenRegistration returns whether or not registration is open on this node.
IsOpenRegistration() (bool, error)
// Usage returns usage stats for this node.
Usage() (Usage, error)
}

64
vendor/github.com/writefreely/go-nodeinfo/routes.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package nodeinfo
import (
"encoding/json"
"github.com/writeas/go-webfinger"
"log"
"net/http"
)
// NodeInfoPath defines the default path of the nodeinfo handler.
const NodeInfoPath = "/.well-known/nodeinfo"
type discoverInfo struct {
Links []webfinger.Link `json:"links"`
}
func (s *Service) NodeInfoDiscover(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
i := discoverInfo{
Links: []webfinger.Link{
{
Rel: profile,
HRef: s.InfoURL,
},
},
}
body, err := json.Marshal(i)
if err != nil {
log.Printf("Unable to marshal nodeinfo discovery: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
_, err = w.Write(body)
if err != nil {
log.Printf("Unable to write body: %v", err)
return
}
}
func (s *Service) NodeInfo(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; profile="+profile+"#")
i := s.BuildInfo()
body, err := json.Marshal(i)
if err != nil {
log.Printf("Unable to marshal nodeinfo: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
_, err = w.Write(body)
if err != nil {
log.Printf("Unable to write body: %v", err)
return
}
}

34
vendor/github.com/writefreely/go-nodeinfo/service.go generated vendored Normal file
View File

@ -0,0 +1,34 @@
package nodeinfo
const (
profileVer = "2.0"
profile = "http://nodeinfo.diaspora.software/ns/schema/" + profileVer
)
type Service struct {
InfoURL string
Info NodeInfo
resolver Resolver
}
func NewService(cfg Config, r Resolver) *Service {
return &Service{
InfoURL: cfg.BaseURL + cfg.InfoURL,
Info: NodeInfo{
Metadata: cfg.Metadata,
Protocols: cfg.Protocols,
Services: cfg.Services,
Software: cfg.Software,
},
resolver: r,
}
}
func (s Service) BuildInfo() NodeInfo {
ni := s.Info
ni.OpenRegistrations, _ = s.resolver.IsOpenRegistration()
ni.Usage, _ = s.resolver.Usage()
ni.Version = profileVer
return ni
}

3
vendor/golang.org/x/crypto/AUTHORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at https://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/crypto/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at https://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/crypto/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/crypto/PATENTS generated vendored Normal file
View File

@ -0,0 +1,22 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

289
vendor/golang.org/x/crypto/blake2b/blake2b.go generated vendored Normal file
View File

@ -0,0 +1,289 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693
// and the extendable output function (XOF) BLAKE2Xb.
//
// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf
// and for BLAKE2Xb see https://blake2.net/blake2x.pdf
//
// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512).
// If you need a secret-key MAC (message authentication code), use the New512
// function with a non-nil key.
//
// BLAKE2X is a construction to compute hash values larger than 64 bytes. It
// can produce hash values between 0 and 4 GiB.
package blake2b
import (
"encoding/binary"
"errors"
"hash"
)
const (
// The blocksize of BLAKE2b in bytes.
BlockSize = 128
// The hash size of BLAKE2b-512 in bytes.
Size = 64
// The hash size of BLAKE2b-384 in bytes.
Size384 = 48
// The hash size of BLAKE2b-256 in bytes.
Size256 = 32
)
var (
useAVX2 bool
useAVX bool
useSSE4 bool
)
var (
errKeySize = errors.New("blake2b: invalid key size")
errHashSize = errors.New("blake2b: invalid hash size")
)
var iv = [8]uint64{
0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
}
// Sum512 returns the BLAKE2b-512 checksum of the data.
func Sum512(data []byte) [Size]byte {
var sum [Size]byte
checkSum(&sum, Size, data)
return sum
}
// Sum384 returns the BLAKE2b-384 checksum of the data.
func Sum384(data []byte) [Size384]byte {
var sum [Size]byte
var sum384 [Size384]byte
checkSum(&sum, Size384, data)
copy(sum384[:], sum[:Size384])
return sum384
}
// Sum256 returns the BLAKE2b-256 checksum of the data.
func Sum256(data []byte) [Size256]byte {
var sum [Size]byte
var sum256 [Size256]byte
checkSum(&sum, Size256, data)
copy(sum256[:], sum[:Size256])
return sum256
}
// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) }
// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil
// key turns the hash into a MAC. The key must be between zero and 64 bytes long.
func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) }
// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length.
// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long.
// The hash size can be a value between 1 and 64 but it is highly recommended to use
// values equal or greater than:
// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long).
// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long).
// When the key is nil, the returned hash.Hash implements BinaryMarshaler
// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash.
func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) }
func newDigest(hashSize int, key []byte) (*digest, error) {
if hashSize < 1 || hashSize > Size {
return nil, errHashSize
}
if len(key) > Size {
return nil, errKeySize
}
d := &digest{
size: hashSize,
keyLen: len(key),
}
copy(d.key[:], key)
d.Reset()
return d, nil
}
func checkSum(sum *[Size]byte, hashSize int, data []byte) {
h := iv
h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24)
var c [2]uint64
if length := len(data); length > BlockSize {
n := length &^ (BlockSize - 1)
if length == n {
n -= BlockSize
}
hashBlocks(&h, &c, 0, data[:n])
data = data[n:]
}
var block [BlockSize]byte
offset := copy(block[:], data)
remaining := uint64(BlockSize - offset)
if c[0] < remaining {
c[1]--
}
c[0] -= remaining
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
for i, v := range h[:(hashSize+7)/8] {
binary.LittleEndian.PutUint64(sum[8*i:], v)
}
}
type digest struct {
h [8]uint64
c [2]uint64
size int
block [BlockSize]byte
offset int
key [BlockSize]byte
keyLen int
}
const (
magic = "b2b"
marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1
)
func (d *digest) MarshalBinary() ([]byte, error) {
if d.keyLen != 0 {
return nil, errors.New("crypto/blake2b: cannot marshal MACs")
}
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
for i := 0; i < 8; i++ {
b = appendUint64(b, d.h[i])
}
b = appendUint64(b, d.c[0])
b = appendUint64(b, d.c[1])
// Maximum value for size is 64
b = append(b, byte(d.size))
b = append(b, d.block[:]...)
b = append(b, byte(d.offset))
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("crypto/blake2b: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/blake2b: invalid hash state size")
}
b = b[len(magic):]
for i := 0; i < 8; i++ {
b, d.h[i] = consumeUint64(b)
}
b, d.c[0] = consumeUint64(b)
b, d.c[1] = consumeUint64(b)
d.size = int(b[0])
b = b[1:]
copy(d.block[:], b[:BlockSize])
b = b[BlockSize:]
d.offset = int(b[0])
return nil
}
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Size() int { return d.size }
func (d *digest) Reset() {
d.h = iv
d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24)
d.offset, d.c[0], d.c[1] = 0, 0, 0
if d.keyLen > 0 {
d.block = d.key
d.offset = BlockSize
}
}
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
if d.offset > 0 {
remaining := BlockSize - d.offset
if n <= remaining {
d.offset += copy(d.block[d.offset:], p)
return
}
copy(d.block[d.offset:], p[:remaining])
hashBlocks(&d.h, &d.c, 0, d.block[:])
d.offset = 0
p = p[remaining:]
}
if length := len(p); length > BlockSize {
nn := length &^ (BlockSize - 1)
if length == nn {
nn -= BlockSize
}
hashBlocks(&d.h, &d.c, 0, p[:nn])
p = p[nn:]
}
if len(p) > 0 {
d.offset += copy(d.block[:], p)
}
return
}
func (d *digest) Sum(sum []byte) []byte {
var hash [Size]byte
d.finalize(&hash)
return append(sum, hash[:d.size]...)
}
func (d *digest) finalize(hash *[Size]byte) {
var block [BlockSize]byte
copy(block[:], d.block[:d.offset])
remaining := uint64(BlockSize - d.offset)
c := d.c
if c[0] < remaining {
c[1]--
}
c[0] -= remaining
h := d.h
hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:])
for i, v := range h {
binary.LittleEndian.PutUint64(hash[8*i:], v)
}
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.BigEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func appendUint32(b []byte, x uint32) []byte {
var a [4]byte
binary.BigEndian.PutUint32(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := binary.BigEndian.Uint64(b)
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
x := binary.BigEndian.Uint32(b)
return b[4:], x
}

View File

@ -0,0 +1,37 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
package blake2b
import "golang.org/x/sys/cpu"
func init() {
useAVX2 = cpu.X86.HasAVX2
useAVX = cpu.X86.HasAVX
useSSE4 = cpu.X86.HasSSE41
}
//go:noescape
func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
//go:noescape
func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
//go:noescape
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
switch {
case useAVX2:
hashBlocksAVX2(h, c, flag, blocks)
case useAVX:
hashBlocksAVX(h, c, flag, blocks)
case useSSE4:
hashBlocksSSE4(h, c, flag, blocks)
default:
hashBlocksGeneric(h, c, flag, blocks)
}
}

750
vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s generated vendored Normal file
View File

@ -0,0 +1,750 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.7,amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403
DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32
DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302
DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32
DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16
DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16
#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39
#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93
#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e
#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93
#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39
#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \
VPADDQ m0, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m1, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y1_Y1; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y3_Y3; \
VPADDQ m2, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFD $-79, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPSHUFB c40, Y1, Y1; \
VPADDQ m3, Y0, Y0; \
VPADDQ Y1, Y0, Y0; \
VPXOR Y0, Y3, Y3; \
VPSHUFB c48, Y3, Y3; \
VPADDQ Y3, Y2, Y2; \
VPXOR Y2, Y1, Y1; \
VPADDQ Y1, Y1, t; \
VPSRLQ $63, Y1, Y1; \
VPXOR t, Y1, Y1; \
VPERMQ_0x39_Y3_Y3; \
VPERMQ_0x4E_Y2_Y2; \
VPERMQ_0x93_Y1_Y1
#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E
#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26
#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E
#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36
#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E
#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n
#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n
#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n
#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n
#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n
#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01
#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01
#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01
#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01
#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01
#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01
#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01
#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8
#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01
// load msg: Y12 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y12, Y12
// load msg: Y13 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \
VMOVQ_SI_X13(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X13(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y13, Y13
// load msg: Y14 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \
VMOVQ_SI_X14(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X14(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y14, Y14
// load msg: Y15 = (i0, i1, i2, i3)
// i0, i1, i2, i3 must not be 0
#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \
VMOVQ_SI_X15(i0*8); \
VMOVQ_SI_X11(i2*8); \
VPINSRQ_1_SI_X15(i1*8); \
VPINSRQ_1_SI_X11(i3*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X11(6*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \
LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \
LOAD_MSG_AVX2_Y15(9, 11, 13, 15)
#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \
LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \
LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \
VMOVQ_SI_X11(11*8); \
VPSHUFD $0x4E, 0*8(SI), X14; \
VPINSRQ_1_SI_X11(5*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(12, 2, 7, 3)
#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \
VMOVQ_SI_X11(5*8); \
VMOVDQU 11*8(SI), X12; \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y12, Y12; \
VMOVQ_SI_X13(8*8); \
VMOVQ_SI_X11(2*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X11(13*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \
LOAD_MSG_AVX2_Y15(14, 6, 1, 4)
#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \
LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \
LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \
LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \
VMOVQ_SI_X15(6*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X15(10*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \
LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X11(4*8); \
VPINSRQ_1_SI_X13(7*8); \
VPINSRQ_1_SI_X11(15*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \
LOAD_MSG_AVX2_Y15(1, 12, 8, 13)
#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X11_0; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X11(8*8); \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \
LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \
LOAD_MSG_AVX2_Y15(13, 5, 14, 9)
#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \
LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \
LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \
VMOVQ_SI_X14_0; \
VPSHUFD $0x4E, 8*8(SI), X11; \
VPINSRQ_1_SI_X14(6*8); \
VINSERTI128 $1, X11, Y14, Y14; \
LOAD_MSG_AVX2_Y15(7, 3, 2, 11)
#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \
LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \
LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \
LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \
VMOVQ_SI_X15_0; \
VMOVQ_SI_X11(6*8); \
VPINSRQ_1_SI_X15(4*8); \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \
VMOVQ_SI_X12(6*8); \
VMOVQ_SI_X11(11*8); \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y12, Y12; \
LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \
VMOVQ_SI_X11(1*8); \
VMOVDQU 12*8(SI), X14; \
VPINSRQ_1_SI_X11(10*8); \
VINSERTI128 $1, X11, Y14, Y14; \
VMOVQ_SI_X15(2*8); \
VMOVDQU 4*8(SI), X11; \
VPINSRQ_1_SI_X15(7*8); \
VINSERTI128 $1, X11, Y15, Y15
#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \
LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \
VMOVQ_SI_X13(2*8); \
VPSHUFD $0x4E, 5*8(SI), X11; \
VPINSRQ_1_SI_X13(4*8); \
VINSERTI128 $1, X11, Y13, Y13; \
LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \
VMOVQ_SI_X15(11*8); \
VMOVQ_SI_X11(12*8); \
VPINSRQ_1_SI_X15(14*8); \
VPINSRQ_1_SI_X11_0; \
VINSERTI128 $1, X11, Y15, Y15
// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment
MOVQ h+0(FP), AX
MOVQ c+8(FP), BX
MOVQ flag+16(FP), CX
MOVQ blocks_base+24(FP), SI
MOVQ blocks_len+32(FP), DI
MOVQ SP, DX
MOVQ SP, R9
ADDQ $31, R9
ANDQ $~31, R9
MOVQ R9, SP
MOVQ CX, 16(SP)
XORQ CX, CX
MOVQ CX, 24(SP)
VMOVDQU ·AVX2_c40<>(SB), Y4
VMOVDQU ·AVX2_c48<>(SB), Y5
VMOVDQU 0(AX), Y8
VMOVDQU 32(AX), Y9
VMOVDQU ·AVX2_iv0<>(SB), Y6
VMOVDQU ·AVX2_iv1<>(SB), Y7
MOVQ 0(BX), R8
MOVQ 8(BX), R9
MOVQ R9, 8(SP)
loop:
ADDQ $128, R8
MOVQ R8, 0(SP)
CMPQ R8, $128
JGE noinc
INCQ R9
MOVQ R9, 8(SP)
noinc:
VMOVDQA Y8, Y0
VMOVDQA Y9, Y1
VMOVDQA Y6, Y2
VPXOR 0(SP), Y7, Y3
LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15()
VMOVDQA Y12, 32(SP)
VMOVDQA Y13, 64(SP)
VMOVDQA Y14, 96(SP)
VMOVDQA Y15, 128(SP)
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3()
VMOVDQA Y12, 160(SP)
VMOVDQA Y13, 192(SP)
VMOVDQA Y14, 224(SP)
VMOVDQA Y15, 256(SP)
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0()
ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5)
ROUND_AVX2(32(SP), 64(SP), 96(SP), 128(SP), Y10, Y4, Y5)
ROUND_AVX2(160(SP), 192(SP), 224(SP), 256(SP), Y10, Y4, Y5)
VPXOR Y0, Y8, Y8
VPXOR Y1, Y9, Y9
VPXOR Y2, Y8, Y8
VPXOR Y3, Y9, Y9
LEAQ 128(SI), SI
SUBQ $128, DI
JNE loop
MOVQ R8, 0(BX)
MOVQ R9, 8(BX)
VMOVDQU Y8, 0(AX)
VMOVDQU Y9, 32(AX)
VZEROUPPER
MOVQ DX, SP
RET
#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA
#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB
#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF
#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD
#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE
#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF
#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7
#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7
#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF
#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF
#define SHUFFLE_AVX() \
VMOVDQA X6, X13; \
VMOVDQA X2, X14; \
VMOVDQA X4, X6; \
VPUNPCKLQDQ_X13_X13_X15; \
VMOVDQA X5, X4; \
VMOVDQA X6, X5; \
VPUNPCKHQDQ_X15_X7_X6; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X13_X7; \
VPUNPCKLQDQ_X3_X3_X15; \
VPUNPCKHQDQ_X15_X2_X2; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X3_X3; \
#define SHUFFLE_AVX_INV() \
VMOVDQA X2, X13; \
VMOVDQA X4, X14; \
VPUNPCKLQDQ_X2_X2_X15; \
VMOVDQA X5, X4; \
VPUNPCKHQDQ_X15_X3_X2; \
VMOVDQA X14, X5; \
VPUNPCKLQDQ_X3_X3_X15; \
VMOVDQA X6, X14; \
VPUNPCKHQDQ_X15_X13_X3; \
VPUNPCKLQDQ_X7_X7_X15; \
VPUNPCKHQDQ_X15_X6_X6; \
VPUNPCKLQDQ_X14_X14_X15; \
VPUNPCKHQDQ_X15_X7_X7; \
#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
VPADDQ m0, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m1, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFD $-79, v6, v6; \
VPSHUFD $-79, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPSHUFB c40, v2, v2; \
VPSHUFB c40, v3, v3; \
VPADDQ m2, v0, v0; \
VPADDQ v2, v0, v0; \
VPADDQ m3, v1, v1; \
VPADDQ v3, v1, v1; \
VPXOR v0, v6, v6; \
VPXOR v1, v7, v7; \
VPSHUFB c48, v6, v6; \
VPSHUFB c48, v7, v7; \
VPADDQ v6, v4, v4; \
VPADDQ v7, v5, v5; \
VPXOR v4, v2, v2; \
VPXOR v5, v3, v3; \
VPADDQ v2, v2, t0; \
VPSRLQ $63, v2, v2; \
VPXOR t0, v2, v2; \
VPADDQ v3, v3, t0; \
VPSRLQ $63, v3, v3; \
VPXOR t0, v3, v3
// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7)
// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0
#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \
VMOVQ_SI_X12(i0*8); \
VMOVQ_SI_X13(i2*8); \
VMOVQ_SI_X14(i4*8); \
VMOVQ_SI_X15(i6*8); \
VPINSRQ_1_SI_X12(i1*8); \
VPINSRQ_1_SI_X13(i3*8); \
VPINSRQ_1_SI_X14(i5*8); \
VPINSRQ_1_SI_X15(i7*8)
// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7)
#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \
VMOVQ_SI_X12_0; \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(1*8); \
VMOVQ_SI_X15(5*8); \
VPINSRQ_1_SI_X12(2*8); \
VPINSRQ_1_SI_X13(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(7*8)
// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3)
#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \
VPSHUFD $0x4E, 0*8(SI), X12; \
VMOVQ_SI_X13(11*8); \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(7*8); \
VPINSRQ_1_SI_X13(5*8); \
VPINSRQ_1_SI_X14(2*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13)
#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \
VMOVDQU 11*8(SI), X12; \
VMOVQ_SI_X13(5*8); \
VMOVQ_SI_X14(8*8); \
VMOVQ_SI_X15(2*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14_0; \
VPINSRQ_1_SI_X15(13*8)
// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8)
#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13(4*8); \
VMOVQ_SI_X14(6*8); \
VMOVQ_SI_X15_0; \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(15*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15)
#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \
VMOVQ_SI_X12(9*8); \
VMOVQ_SI_X13(2*8); \
VMOVQ_SI_X14_0; \
VMOVQ_SI_X15(4*8); \
VPINSRQ_1_SI_X12(5*8); \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VPINSRQ_1_SI_X15(15*8)
// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3)
#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \
VMOVQ_SI_X12(2*8); \
VMOVQ_SI_X13_0; \
VMOVQ_SI_X14(12*8); \
VMOVQ_SI_X15(11*8); \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X13(8*8); \
VPINSRQ_1_SI_X14(10*8); \
VPINSRQ_1_SI_X15(3*8)
// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11)
#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \
MOVQ 0*8(SI), X12; \
VPSHUFD $0x4E, 8*8(SI), X13; \
MOVQ 7*8(SI), X14; \
MOVQ 2*8(SI), X15; \
VPINSRQ_1_SI_X12(6*8); \
VPINSRQ_1_SI_X14(3*8); \
VPINSRQ_1_SI_X15(11*8)
// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8)
#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \
MOVQ 6*8(SI), X12; \
MOVQ 11*8(SI), X13; \
MOVQ 15*8(SI), X14; \
MOVQ 3*8(SI), X15; \
VPINSRQ_1_SI_X12(14*8); \
VPINSRQ_1_SI_X13_0; \
VPINSRQ_1_SI_X14(9*8); \
VPINSRQ_1_SI_X15(8*8)
// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10)
#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \
MOVQ 5*8(SI), X12; \
MOVQ 8*8(SI), X13; \
MOVQ 0*8(SI), X14; \
MOVQ 6*8(SI), X15; \
VPINSRQ_1_SI_X12(15*8); \
VPINSRQ_1_SI_X13(2*8); \
VPINSRQ_1_SI_X14(4*8); \
VPINSRQ_1_SI_X15(10*8)
// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5)
#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \
VMOVDQU 12*8(SI), X12; \
MOVQ 1*8(SI), X13; \
MOVQ 2*8(SI), X14; \
VPINSRQ_1_SI_X13(10*8); \
VPINSRQ_1_SI_X14(7*8); \
VMOVDQU 4*8(SI), X15
// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0)
#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \
MOVQ 15*8(SI), X12; \
MOVQ 3*8(SI), X13; \
MOVQ 11*8(SI), X14; \
MOVQ 12*8(SI), X15; \
VPINSRQ_1_SI_X12(9*8); \
VPINSRQ_1_SI_X13(13*8); \
VPINSRQ_1_SI_X14(14*8); \
VPINSRQ_1_SI_X15_0
// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ c+8(FP), BX
MOVQ flag+16(FP), CX
MOVQ blocks_base+24(FP), SI
MOVQ blocks_len+32(FP), DI
MOVQ SP, BP
MOVQ SP, R9
ADDQ $15, R9
ANDQ $~15, R9
MOVQ R9, SP
VMOVDQU ·AVX_c40<>(SB), X0
VMOVDQU ·AVX_c48<>(SB), X1
VMOVDQA X0, X8
VMOVDQA X1, X9
VMOVDQU ·AVX_iv3<>(SB), X0
VMOVDQA X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·AVX_iv3 ^ (CX || 0)
VMOVDQU 0(AX), X10
VMOVDQU 16(AX), X11
VMOVDQU 32(AX), X2
VMOVDQU 48(AX), X3
MOVQ 0(BX), R8
MOVQ 8(BX), R9
loop:
ADDQ $128, R8
CMPQ R8, $128
JGE noinc
INCQ R9
noinc:
VMOVQ_R8_X15
VPINSRQ_1_R9_X15
VMOVDQA X10, X0
VMOVDQA X11, X1
VMOVDQU ·AVX_iv0<>(SB), X4
VMOVDQU ·AVX_iv1<>(SB), X5
VMOVDQU ·AVX_iv2<>(SB), X6
VPXOR X15, X6, X6
VMOVDQA 0(SP), X7
LOAD_MSG_AVX_0_2_4_6_1_3_5_7()
VMOVDQA X12, 16(SP)
VMOVDQA X13, 32(SP)
VMOVDQA X14, 48(SP)
VMOVDQA X15, 64(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15)
VMOVDQA X12, 80(SP)
VMOVDQA X13, 96(SP)
VMOVDQA X14, 112(SP)
VMOVDQA X15, 128(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6)
VMOVDQA X12, 144(SP)
VMOVDQA X13, 160(SP)
VMOVDQA X14, 176(SP)
VMOVDQA X15, 192(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_1_0_11_5_12_2_7_3()
VMOVDQA X12, 208(SP)
VMOVDQA X13, 224(SP)
VMOVDQA X14, 240(SP)
VMOVDQA X15, 256(SP)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_11_12_5_15_8_0_2_13()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_2_5_4_15_6_10_0_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_9_5_2_10_0_7_4_15()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_2_6_0_8_12_10_11_3()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_0_6_9_8_7_3_2_11()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_5_15_8_2_0_4_6_10()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX_6_14_11_0_15_9_3_8()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_12_13_1_10_2_7_4_5()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX()
LOAD_MSG_AVX_15_9_3_13_11_14_12_0()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9)
SHUFFLE_AVX_INV()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X15, X8, X9)
SHUFFLE_AVX()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X15, X8, X9)
SHUFFLE_AVX_INV()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X15, X8, X9)
SHUFFLE_AVX()
HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X15, X8, X9)
SHUFFLE_AVX_INV()
VMOVDQU 32(AX), X14
VMOVDQU 48(AX), X15
VPXOR X0, X10, X10
VPXOR X1, X11, X11
VPXOR X2, X14, X14
VPXOR X3, X15, X15
VPXOR X4, X10, X10
VPXOR X5, X11, X11
VPXOR X6, X14, X2
VPXOR X7, X15, X3
VMOVDQU X2, 32(AX)
VMOVDQU X3, 48(AX)
LEAQ 128(SI), SI
SUBQ $128, DI
JNE loop
VMOVDQU X10, 0(AX)
VMOVDQU X11, 16(AX)
MOVQ R8, 0(BX)
MOVQ R9, 8(BX)
VZEROUPPER
MOVQ BP, SP
RET

24
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.7,amd64,!gccgo,!appengine
package blake2b
import "golang.org/x/sys/cpu"
func init() {
useSSE4 = cpu.X86.HasSSE41
}
//go:noescape
func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
if useSSE4 {
hashBlocksSSE4(h, c, flag, blocks)
} else {
hashBlocksGeneric(h, c, flag, blocks)
}
}

281
vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s generated vendored Normal file
View File

@ -0,0 +1,281 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908
DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b
GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16
DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b
DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1
GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16
DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1
DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f
GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16
DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b
DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179
GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16
DATA ·c40<>+0x00(SB)/8, $0x0201000706050403
DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b
GLOBL ·c40<>(SB), (NOPTR+RODATA), $16
DATA ·c48<>+0x00(SB)/8, $0x0100070605040302
DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a
GLOBL ·c48<>(SB), (NOPTR+RODATA), $16
#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v6, t1; \
PUNPCKLQDQ v6, t2; \
PUNPCKHQDQ v7, v6; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ v7, t2; \
MOVO t1, v7; \
MOVO v2, t1; \
PUNPCKHQDQ t2, v7; \
PUNPCKLQDQ v3, t2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v3
#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \
MOVO v4, t1; \
MOVO v5, v4; \
MOVO t1, v5; \
MOVO v2, t1; \
PUNPCKLQDQ v2, t2; \
PUNPCKHQDQ v3, v2; \
PUNPCKHQDQ t2, v2; \
PUNPCKLQDQ v3, t2; \
MOVO t1, v3; \
MOVO v6, t1; \
PUNPCKHQDQ t2, v3; \
PUNPCKLQDQ v7, t2; \
PUNPCKHQDQ t2, v6; \
PUNPCKLQDQ t1, t2; \
PUNPCKHQDQ t2, v7
#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \
PADDQ m0, v0; \
PADDQ m1, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFD $0xB1, v6, v6; \
PSHUFD $0xB1, v7, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
PSHUFB c40, v2; \
PSHUFB c40, v3; \
PADDQ m2, v0; \
PADDQ m3, v1; \
PADDQ v2, v0; \
PADDQ v3, v1; \
PXOR v0, v6; \
PXOR v1, v7; \
PSHUFB c48, v6; \
PSHUFB c48, v7; \
PADDQ v6, v4; \
PADDQ v7, v5; \
PXOR v4, v2; \
PXOR v5, v3; \
MOVOU v2, t0; \
PADDQ v2, t0; \
PSRLQ $63, v2; \
PXOR t0, v2; \
MOVOU v3, t0; \
PADDQ v3, t0; \
PSRLQ $63, v3; \
PXOR t0, v3
#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \
MOVQ i0*8(src), m0; \
PINSRQ $1, i1*8(src), m0; \
MOVQ i2*8(src), m1; \
PINSRQ $1, i3*8(src), m1; \
MOVQ i4*8(src), m2; \
PINSRQ $1, i5*8(src), m2; \
MOVQ i6*8(src), m3; \
PINSRQ $1, i7*8(src), m3
// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte)
TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment
MOVQ h+0(FP), AX
MOVQ c+8(FP), BX
MOVQ flag+16(FP), CX
MOVQ blocks_base+24(FP), SI
MOVQ blocks_len+32(FP), DI
MOVQ SP, BP
MOVQ SP, R9
ADDQ $15, R9
ANDQ $~15, R9
MOVQ R9, SP
MOVOU ·iv3<>(SB), X0
MOVO X0, 0(SP)
XORQ CX, 0(SP) // 0(SP) = ·iv3 ^ (CX || 0)
MOVOU ·c40<>(SB), X13
MOVOU ·c48<>(SB), X14
MOVOU 0(AX), X12
MOVOU 16(AX), X15
MOVQ 0(BX), R8
MOVQ 8(BX), R9
loop:
ADDQ $128, R8
CMPQ R8, $128
JGE noinc
INCQ R9
noinc:
MOVQ R8, X8
PINSRQ $1, R9, X8
MOVO X12, X0
MOVO X15, X1
MOVOU 32(AX), X2
MOVOU 48(AX), X3
MOVOU ·iv0<>(SB), X4
MOVOU ·iv1<>(SB), X5
MOVOU ·iv2<>(SB), X6
PXOR X8, X6
MOVO 0(SP), X7
LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7)
MOVO X8, 16(SP)
MOVO X9, 32(SP)
MOVO X10, 48(SP)
MOVO X11, 64(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15)
MOVO X8, 80(SP)
MOVO X9, 96(SP)
MOVO X10, 112(SP)
MOVO X11, 128(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6)
MOVO X8, 144(SP)
MOVO X9, 160(SP)
MOVO X10, 176(SP)
MOVO X11, 192(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3)
MOVO X8, 208(SP)
MOVO X9, 224(SP)
MOVO X10, 240(SP)
MOVO X11, 256(SP)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(SP), 96(SP), 112(SP), 128(SP), X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(SP), 160(SP), 176(SP), 192(SP), X11, X13, X14)
SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9)
HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(SP), 224(SP), 240(SP), 256(SP), X11, X13, X14)
SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9)
MOVOU 32(AX), X10
MOVOU 48(AX), X11
PXOR X0, X12
PXOR X1, X15
PXOR X2, X10
PXOR X3, X11
PXOR X4, X12
PXOR X5, X15
PXOR X6, X10
PXOR X7, X11
MOVOU X10, 32(AX)
MOVOU X11, 48(AX)
LEAQ 128(SI), SI
SUBQ $128, DI
JNE loop
MOVOU X12, 0(AX)
MOVOU X15, 16(AX)
MOVQ R8, 0(BX)
MOVQ R9, 8(BX)
MOVQ BP, SP
RET

179
vendor/golang.org/x/crypto/blake2b/blake2b_generic.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blake2b
import "encoding/binary"
// the precomputed values for BLAKE2b
// there are 12 16-byte arrays - one for each round
// the entries are calculated from the sigma constants.
var precomputed = [12][16]byte{
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
{11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
{7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
{9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
{2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
{12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
{13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
{6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
{10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second
}
func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
var m [16]uint64
c0, c1 := c[0], c[1]
for i := 0; i < len(blocks); {
c0 += BlockSize
if c0 < BlockSize {
c1++
}
v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
v12 ^= c0
v13 ^= c1
v14 ^= flag
for j := range m {
m[j] = binary.LittleEndian.Uint64(blocks[i:])
i += 8
}
for j := range precomputed {
s := &(precomputed[j])
v0 += m[s[0]]
v0 += v4
v12 ^= v0
v12 = v12<<(64-32) | v12>>32
v8 += v12
v4 ^= v8
v4 = v4<<(64-24) | v4>>24
v1 += m[s[1]]
v1 += v5
v13 ^= v1
v13 = v13<<(64-32) | v13>>32
v9 += v13
v5 ^= v9
v5 = v5<<(64-24) | v5>>24
v2 += m[s[2]]
v2 += v6
v14 ^= v2
v14 = v14<<(64-32) | v14>>32
v10 += v14
v6 ^= v10
v6 = v6<<(64-24) | v6>>24
v3 += m[s[3]]
v3 += v7
v15 ^= v3
v15 = v15<<(64-32) | v15>>32
v11 += v15
v7 ^= v11
v7 = v7<<(64-24) | v7>>24
v0 += m[s[4]]
v0 += v4
v12 ^= v0
v12 = v12<<(64-16) | v12>>16
v8 += v12
v4 ^= v8
v4 = v4<<(64-63) | v4>>63
v1 += m[s[5]]
v1 += v5
v13 ^= v1
v13 = v13<<(64-16) | v13>>16
v9 += v13
v5 ^= v9
v5 = v5<<(64-63) | v5>>63
v2 += m[s[6]]
v2 += v6
v14 ^= v2
v14 = v14<<(64-16) | v14>>16
v10 += v14
v6 ^= v10
v6 = v6<<(64-63) | v6>>63
v3 += m[s[7]]
v3 += v7
v15 ^= v3
v15 = v15<<(64-16) | v15>>16
v11 += v15
v7 ^= v11
v7 = v7<<(64-63) | v7>>63
v0 += m[s[8]]
v0 += v5
v15 ^= v0
v15 = v15<<(64-32) | v15>>32
v10 += v15
v5 ^= v10
v5 = v5<<(64-24) | v5>>24
v1 += m[s[9]]
v1 += v6
v12 ^= v1
v12 = v12<<(64-32) | v12>>32
v11 += v12
v6 ^= v11
v6 = v6<<(64-24) | v6>>24
v2 += m[s[10]]
v2 += v7
v13 ^= v2
v13 = v13<<(64-32) | v13>>32
v8 += v13
v7 ^= v8
v7 = v7<<(64-24) | v7>>24
v3 += m[s[11]]
v3 += v4
v14 ^= v3
v14 = v14<<(64-32) | v14>>32
v9 += v14
v4 ^= v9
v4 = v4<<(64-24) | v4>>24
v0 += m[s[12]]
v0 += v5
v15 ^= v0
v15 = v15<<(64-16) | v15>>16
v10 += v15
v5 ^= v10
v5 = v5<<(64-63) | v5>>63
v1 += m[s[13]]
v1 += v6
v12 ^= v1
v12 = v12<<(64-16) | v12>>16
v11 += v12
v6 ^= v11
v6 = v6<<(64-63) | v6>>63
v2 += m[s[14]]
v2 += v7
v13 ^= v2
v13 = v13<<(64-16) | v13>>16
v8 += v13
v7 ^= v8
v7 = v7<<(64-63) | v7>>63
v3 += m[s[15]]
v3 += v4
v14 ^= v3
v14 = v14<<(64-16) | v14>>16
v9 += v14
v4 ^= v9
v4 = v4<<(64-63) | v4>>63
}
h[0] ^= v0 ^ v8
h[1] ^= v1 ^ v9
h[2] ^= v2 ^ v10
h[3] ^= v3 ^ v11
h[4] ^= v4 ^ v12
h[5] ^= v5 ^ v13
h[6] ^= v6 ^ v14
h[7] ^= v7 ^ v15
}
c[0], c[1] = c0, c1
}

11
vendor/golang.org/x/crypto/blake2b/blake2b_ref.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64 appengine gccgo
package blake2b
func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) {
hashBlocksGeneric(h, c, flag, blocks)
}

177
vendor/golang.org/x/crypto/blake2b/blake2x.go generated vendored Normal file
View File

@ -0,0 +1,177 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blake2b
import (
"encoding/binary"
"errors"
"io"
)
// XOF defines the interface to hash functions that
// support arbitrary-length output.
type XOF interface {
// Write absorbs more data into the hash's state. It panics if called
// after Read.
io.Writer
// Read reads more output from the hash. It returns io.EOF if the limit
// has been reached.
io.Reader
// Clone returns a copy of the XOF in its current state.
Clone() XOF
// Reset resets the XOF to its initial state.
Reset()
}
// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
// the length of the output is not known in advance.
const OutputLengthUnknown = 0
// magicUnknownOutputLength is a magic value for the output size that indicates
// an unknown number of output bytes.
const magicUnknownOutputLength = (1 << 32) - 1
// maxOutputLength is the absolute maximum number of bytes to produce when the
// number of output bytes is unknown.
const maxOutputLength = (1 << 32) * 64
// NewXOF creates a new variable-output-length hash. The hash either produce a
// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes
// (size == OutputLengthUnknown). In the latter case, an absolute limit of
// 256GiB applies.
//
// A non-nil key turns the hash into a MAC. The key must between
// zero and 32 bytes long.
func NewXOF(size uint32, key []byte) (XOF, error) {
if len(key) > Size {
return nil, errKeySize
}
if size == magicUnknownOutputLength {
// 2^32-1 indicates an unknown number of bytes and thus isn't a
// valid length.
return nil, errors.New("blake2b: XOF length too large")
}
if size == OutputLengthUnknown {
size = magicUnknownOutputLength
}
x := &xof{
d: digest{
size: Size,
keyLen: len(key),
},
length: size,
}
copy(x.d.key[:], key)
x.Reset()
return x, nil
}
type xof struct {
d digest
length uint32
remaining uint64
cfg, root, block [Size]byte
offset int
nodeOffset uint32
readMode bool
}
func (x *xof) Write(p []byte) (n int, err error) {
if x.readMode {
panic("blake2b: write to XOF after read")
}
return x.d.Write(p)
}
func (x *xof) Clone() XOF {
clone := *x
return &clone
}
func (x *xof) Reset() {
x.cfg[0] = byte(Size)
binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length
x.cfg[17] = byte(Size) // inner hash size
x.d.Reset()
x.d.h[1] ^= uint64(x.length) << 32
x.remaining = uint64(x.length)
if x.remaining == magicUnknownOutputLength {
x.remaining = maxOutputLength
}
x.offset, x.nodeOffset = 0, 0
x.readMode = false
}
func (x *xof) Read(p []byte) (n int, err error) {
if !x.readMode {
x.d.finalize(&x.root)
x.readMode = true
}
if x.remaining == 0 {
return 0, io.EOF
}
n = len(p)
if uint64(n) > x.remaining {
n = int(x.remaining)
p = p[:n]
}
if x.offset > 0 {
blockRemaining := Size - x.offset
if n < blockRemaining {
x.offset += copy(p, x.block[x.offset:])
x.remaining -= uint64(n)
return
}
copy(p, x.block[x.offset:])
p = p[blockRemaining:]
x.offset = 0
x.remaining -= uint64(blockRemaining)
}
for len(p) >= Size {
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
x.nodeOffset++
x.d.initConfig(&x.cfg)
x.d.Write(x.root[:])
x.d.finalize(&x.block)
copy(p, x.block[:])
p = p[Size:]
x.remaining -= uint64(Size)
}
if todo := len(p); todo > 0 {
if x.remaining < uint64(Size) {
x.cfg[0] = byte(x.remaining)
}
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
x.nodeOffset++
x.d.initConfig(&x.cfg)
x.d.Write(x.root[:])
x.d.finalize(&x.block)
x.offset = copy(p, x.block[:todo])
x.remaining -= uint64(todo)
}
return
}
func (d *digest) initConfig(cfg *[Size]byte) {
d.offset, d.c[0], d.c[1] = 0, 0, 0
for i := range d.h {
d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:])
}
}

32
vendor/golang.org/x/crypto/blake2b/register.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package blake2b
import (
"crypto"
"hash"
)
func init() {
newHash256 := func() hash.Hash {
h, _ := New256(nil)
return h
}
newHash384 := func() hash.Hash {
h, _ := New384(nil)
return h
}
newHash512 := func() hash.Hash {
h, _ := New512(nil)
return h
}
crypto.RegisterHash(crypto.BLAKE2b_256, newHash256)
crypto.RegisterHash(crypto.BLAKE2b_384, newHash384)
crypto.RegisterHash(crypto.BLAKE2b_512, newHash512)
}

244
vendor/golang.org/x/crypto/blake2s/blake2s.go generated vendored Normal file
View File

@ -0,0 +1,244 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package blake2s implements the BLAKE2s hash algorithm defined by RFC 7693
// and the extendable output function (XOF) BLAKE2Xs.
//
// For a detailed specification of BLAKE2s see https://blake2.net/blake2.pdf
// and for BLAKE2Xs see https://blake2.net/blake2x.pdf
//
// If you aren't sure which function you need, use BLAKE2s (Sum256 or New256).
// If you need a secret-key MAC (message authentication code), use the New256
// function with a non-nil key.
//
// BLAKE2X is a construction to compute hash values larger than 32 bytes. It
// can produce hash values between 0 and 65535 bytes.
package blake2s // import "golang.org/x/crypto/blake2s"
import (
"encoding/binary"
"errors"
"hash"
)
const (
// The blocksize of BLAKE2s in bytes.
BlockSize = 64
// The hash size of BLAKE2s-256 in bytes.
Size = 32
// The hash size of BLAKE2s-128 in bytes.
Size128 = 16
)
var errKeySize = errors.New("blake2s: invalid key size")
var iv = [8]uint32{
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
}
// Sum256 returns the BLAKE2s-256 checksum of the data.
func Sum256(data []byte) [Size]byte {
var sum [Size]byte
checkSum(&sum, Size, data)
return sum
}
// New256 returns a new hash.Hash computing the BLAKE2s-256 checksum. A non-nil
// key turns the hash into a MAC. The key must between zero and 32 bytes long.
// When the key is nil, the returned hash.Hash implements BinaryMarshaler
// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash.
func New256(key []byte) (hash.Hash, error) { return newDigest(Size, key) }
// New128 returns a new hash.Hash computing the BLAKE2s-128 checksum given a
// non-empty key. Note that a 128-bit digest is too small to be secure as a
// cryptographic hash and should only be used as a MAC, thus the key argument
// is not optional.
func New128(key []byte) (hash.Hash, error) {
if len(key) == 0 {
return nil, errors.New("blake2s: a key is required for a 128-bit hash")
}
return newDigest(Size128, key)
}
func newDigest(hashSize int, key []byte) (*digest, error) {
if len(key) > Size {
return nil, errKeySize
}
d := &digest{
size: hashSize,
keyLen: len(key),
}
copy(d.key[:], key)
d.Reset()
return d, nil
}
func checkSum(sum *[Size]byte, hashSize int, data []byte) {
var (
h [8]uint32
c [2]uint32
)
h = iv
h[0] ^= uint32(hashSize) | (1 << 16) | (1 << 24)
if length := len(data); length > BlockSize {
n := length &^ (BlockSize - 1)
if length == n {
n -= BlockSize
}
hashBlocks(&h, &c, 0, data[:n])
data = data[n:]
}
var block [BlockSize]byte
offset := copy(block[:], data)
remaining := uint32(BlockSize - offset)
if c[0] < remaining {
c[1]--
}
c[0] -= remaining
hashBlocks(&h, &c, 0xFFFFFFFF, block[:])
for i, v := range h {
binary.LittleEndian.PutUint32(sum[4*i:], v)
}
}
type digest struct {
h [8]uint32
c [2]uint32
size int
block [BlockSize]byte
offset int
key [BlockSize]byte
keyLen int
}
const (
magic = "b2s"
marshaledSize = len(magic) + 8*4 + 2*4 + 1 + BlockSize + 1
)
func (d *digest) MarshalBinary() ([]byte, error) {
if d.keyLen != 0 {
return nil, errors.New("crypto/blake2s: cannot marshal MACs")
}
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
for i := 0; i < 8; i++ {
b = appendUint32(b, d.h[i])
}
b = appendUint32(b, d.c[0])
b = appendUint32(b, d.c[1])
// Maximum value for size is 32
b = append(b, byte(d.size))
b = append(b, d.block[:]...)
b = append(b, byte(d.offset))
return b, nil
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("crypto/blake2s: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/blake2s: invalid hash state size")
}
b = b[len(magic):]
for i := 0; i < 8; i++ {
b, d.h[i] = consumeUint32(b)
}
b, d.c[0] = consumeUint32(b)
b, d.c[1] = consumeUint32(b)
d.size = int(b[0])
b = b[1:]
copy(d.block[:], b[:BlockSize])
b = b[BlockSize:]
d.offset = int(b[0])
return nil
}
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Size() int { return d.size }
func (d *digest) Reset() {
d.h = iv
d.h[0] ^= uint32(d.size) | (uint32(d.keyLen) << 8) | (1 << 16) | (1 << 24)
d.offset, d.c[0], d.c[1] = 0, 0, 0
if d.keyLen > 0 {
d.block = d.key
d.offset = BlockSize
}
}
func (d *digest) Write(p []byte) (n int, err error) {
n = len(p)
if d.offset > 0 {
remaining := BlockSize - d.offset
if n <= remaining {
d.offset += copy(d.block[d.offset:], p)
return
}
copy(d.block[d.offset:], p[:remaining])
hashBlocks(&d.h, &d.c, 0, d.block[:])
d.offset = 0
p = p[remaining:]
}
if length := len(p); length > BlockSize {
nn := length &^ (BlockSize - 1)
if length == nn {
nn -= BlockSize
}
hashBlocks(&d.h, &d.c, 0, p[:nn])
p = p[nn:]
}
d.offset += copy(d.block[:], p)
return
}
func (d *digest) Sum(sum []byte) []byte {
var hash [Size]byte
d.finalize(&hash)
return append(sum, hash[:d.size]...)
}
func (d *digest) finalize(hash *[Size]byte) {
var block [BlockSize]byte
h := d.h
c := d.c
copy(block[:], d.block[:d.offset])
remaining := uint32(BlockSize - d.offset)
if c[0] < remaining {
c[1]--
}
c[0] -= remaining
hashBlocks(&h, &c, 0xFFFFFFFF, block[:])
for i, v := range h {
binary.LittleEndian.PutUint32(hash[4*i:], v)
}
}
func appendUint32(b []byte, x uint32) []byte {
var a [4]byte
binary.BigEndian.PutUint32(a[:], x)
return append(b, a[:]...)
}
func consumeUint32(b []byte) ([]byte, uint32) {
x := binary.BigEndian.Uint32(b)
return b[4:], x
}

32
vendor/golang.org/x/crypto/blake2s/blake2s_386.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386,!gccgo,!appengine
package blake2s
import "golang.org/x/sys/cpu"
var (
useSSE4 = false
useSSSE3 = cpu.X86.HasSSSE3
useSSE2 = cpu.X86.HasSSE2
)
//go:noescape
func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
//go:noescape
func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) {
switch {
case useSSSE3:
hashBlocksSSSE3(h, c, flag, blocks)
case useSSE2:
hashBlocksSSE2(h, c, flag, blocks)
default:
hashBlocksGeneric(h, c, flag, blocks)
}
}

435
vendor/golang.org/x/crypto/blake2s/blake2s_386.s generated vendored Normal file
View File

@ -0,0 +1,435 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build 386,!gccgo,!appengine
#include "textflag.h"
DATA iv0<>+0x00(SB)/4, $0x6a09e667
DATA iv0<>+0x04(SB)/4, $0xbb67ae85
DATA iv0<>+0x08(SB)/4, $0x3c6ef372
DATA iv0<>+0x0c(SB)/4, $0xa54ff53a
GLOBL iv0<>(SB), (NOPTR+RODATA), $16
DATA iv1<>+0x00(SB)/4, $0x510e527f
DATA iv1<>+0x04(SB)/4, $0x9b05688c
DATA iv1<>+0x08(SB)/4, $0x1f83d9ab
DATA iv1<>+0x0c(SB)/4, $0x5be0cd19
GLOBL iv1<>(SB), (NOPTR+RODATA), $16
DATA rol16<>+0x00(SB)/8, $0x0504070601000302
DATA rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
GLOBL rol16<>(SB), (NOPTR+RODATA), $16
DATA rol8<>+0x00(SB)/8, $0x0407060500030201
DATA rol8<>+0x08(SB)/8, $0x0C0F0E0D080B0A09
GLOBL rol8<>(SB), (NOPTR+RODATA), $16
DATA counter<>+0x00(SB)/8, $0x40
DATA counter<>+0x08(SB)/8, $0x0
GLOBL counter<>(SB), (NOPTR+RODATA), $16
#define ROTL_SSE2(n, t, v) \
MOVO v, t; \
PSLLL $n, t; \
PSRLL $(32-n), v; \
PXOR t, v
#define ROTL_SSSE3(c, v) \
PSHUFB c, v
#define ROUND_SSE2(v0, v1, v2, v3, m0, m1, m2, m3, t) \
PADDL m0, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(16, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m1, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(24, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v1, v1; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v3, v3; \
PADDL m2, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(16, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m3, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(24, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v3, v3; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v1, v1
#define ROUND_SSSE3(v0, v1, v2, v3, m0, m1, m2, m3, t, c16, c8) \
PADDL m0, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c16, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m1, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c8, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v1, v1; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v3, v3; \
PADDL m2, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c16, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m3, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c8, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v3, v3; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v1, v1
#define PRECOMPUTE(dst, off, src, t) \
MOVL 0*4(src), t; \
MOVL t, 0*4+off+0(dst); \
MOVL t, 9*4+off+64(dst); \
MOVL t, 5*4+off+128(dst); \
MOVL t, 14*4+off+192(dst); \
MOVL t, 4*4+off+256(dst); \
MOVL t, 2*4+off+320(dst); \
MOVL t, 8*4+off+384(dst); \
MOVL t, 12*4+off+448(dst); \
MOVL t, 3*4+off+512(dst); \
MOVL t, 15*4+off+576(dst); \
MOVL 1*4(src), t; \
MOVL t, 4*4+off+0(dst); \
MOVL t, 8*4+off+64(dst); \
MOVL t, 14*4+off+128(dst); \
MOVL t, 5*4+off+192(dst); \
MOVL t, 12*4+off+256(dst); \
MOVL t, 11*4+off+320(dst); \
MOVL t, 1*4+off+384(dst); \
MOVL t, 6*4+off+448(dst); \
MOVL t, 10*4+off+512(dst); \
MOVL t, 3*4+off+576(dst); \
MOVL 2*4(src), t; \
MOVL t, 1*4+off+0(dst); \
MOVL t, 13*4+off+64(dst); \
MOVL t, 6*4+off+128(dst); \
MOVL t, 8*4+off+192(dst); \
MOVL t, 2*4+off+256(dst); \
MOVL t, 0*4+off+320(dst); \
MOVL t, 14*4+off+384(dst); \
MOVL t, 11*4+off+448(dst); \
MOVL t, 12*4+off+512(dst); \
MOVL t, 4*4+off+576(dst); \
MOVL 3*4(src), t; \
MOVL t, 5*4+off+0(dst); \
MOVL t, 15*4+off+64(dst); \
MOVL t, 9*4+off+128(dst); \
MOVL t, 1*4+off+192(dst); \
MOVL t, 11*4+off+256(dst); \
MOVL t, 7*4+off+320(dst); \
MOVL t, 13*4+off+384(dst); \
MOVL t, 3*4+off+448(dst); \
MOVL t, 6*4+off+512(dst); \
MOVL t, 10*4+off+576(dst); \
MOVL 4*4(src), t; \
MOVL t, 2*4+off+0(dst); \
MOVL t, 1*4+off+64(dst); \
MOVL t, 15*4+off+128(dst); \
MOVL t, 10*4+off+192(dst); \
MOVL t, 6*4+off+256(dst); \
MOVL t, 8*4+off+320(dst); \
MOVL t, 3*4+off+384(dst); \
MOVL t, 13*4+off+448(dst); \
MOVL t, 14*4+off+512(dst); \
MOVL t, 5*4+off+576(dst); \
MOVL 5*4(src), t; \
MOVL t, 6*4+off+0(dst); \
MOVL t, 11*4+off+64(dst); \
MOVL t, 2*4+off+128(dst); \
MOVL t, 9*4+off+192(dst); \
MOVL t, 1*4+off+256(dst); \
MOVL t, 13*4+off+320(dst); \
MOVL t, 4*4+off+384(dst); \
MOVL t, 8*4+off+448(dst); \
MOVL t, 15*4+off+512(dst); \
MOVL t, 7*4+off+576(dst); \
MOVL 6*4(src), t; \
MOVL t, 3*4+off+0(dst); \
MOVL t, 7*4+off+64(dst); \
MOVL t, 13*4+off+128(dst); \
MOVL t, 12*4+off+192(dst); \
MOVL t, 10*4+off+256(dst); \
MOVL t, 1*4+off+320(dst); \
MOVL t, 9*4+off+384(dst); \
MOVL t, 14*4+off+448(dst); \
MOVL t, 0*4+off+512(dst); \
MOVL t, 6*4+off+576(dst); \
MOVL 7*4(src), t; \
MOVL t, 7*4+off+0(dst); \
MOVL t, 14*4+off+64(dst); \
MOVL t, 10*4+off+128(dst); \
MOVL t, 0*4+off+192(dst); \
MOVL t, 5*4+off+256(dst); \
MOVL t, 9*4+off+320(dst); \
MOVL t, 12*4+off+384(dst); \
MOVL t, 1*4+off+448(dst); \
MOVL t, 13*4+off+512(dst); \
MOVL t, 2*4+off+576(dst); \
MOVL 8*4(src), t; \
MOVL t, 8*4+off+0(dst); \
MOVL t, 5*4+off+64(dst); \
MOVL t, 4*4+off+128(dst); \
MOVL t, 15*4+off+192(dst); \
MOVL t, 14*4+off+256(dst); \
MOVL t, 3*4+off+320(dst); \
MOVL t, 11*4+off+384(dst); \
MOVL t, 10*4+off+448(dst); \
MOVL t, 7*4+off+512(dst); \
MOVL t, 1*4+off+576(dst); \
MOVL 9*4(src), t; \
MOVL t, 12*4+off+0(dst); \
MOVL t, 2*4+off+64(dst); \
MOVL t, 11*4+off+128(dst); \
MOVL t, 4*4+off+192(dst); \
MOVL t, 0*4+off+256(dst); \
MOVL t, 15*4+off+320(dst); \
MOVL t, 10*4+off+384(dst); \
MOVL t, 7*4+off+448(dst); \
MOVL t, 5*4+off+512(dst); \
MOVL t, 9*4+off+576(dst); \
MOVL 10*4(src), t; \
MOVL t, 9*4+off+0(dst); \
MOVL t, 4*4+off+64(dst); \
MOVL t, 8*4+off+128(dst); \
MOVL t, 13*4+off+192(dst); \
MOVL t, 3*4+off+256(dst); \
MOVL t, 5*4+off+320(dst); \
MOVL t, 7*4+off+384(dst); \
MOVL t, 15*4+off+448(dst); \
MOVL t, 11*4+off+512(dst); \
MOVL t, 0*4+off+576(dst); \
MOVL 11*4(src), t; \
MOVL t, 13*4+off+0(dst); \
MOVL t, 10*4+off+64(dst); \
MOVL t, 0*4+off+128(dst); \
MOVL t, 3*4+off+192(dst); \
MOVL t, 9*4+off+256(dst); \
MOVL t, 6*4+off+320(dst); \
MOVL t, 15*4+off+384(dst); \
MOVL t, 4*4+off+448(dst); \
MOVL t, 2*4+off+512(dst); \
MOVL t, 12*4+off+576(dst); \
MOVL 12*4(src), t; \
MOVL t, 10*4+off+0(dst); \
MOVL t, 12*4+off+64(dst); \
MOVL t, 1*4+off+128(dst); \
MOVL t, 6*4+off+192(dst); \
MOVL t, 13*4+off+256(dst); \
MOVL t, 4*4+off+320(dst); \
MOVL t, 0*4+off+384(dst); \
MOVL t, 2*4+off+448(dst); \
MOVL t, 8*4+off+512(dst); \
MOVL t, 14*4+off+576(dst); \
MOVL 13*4(src), t; \
MOVL t, 14*4+off+0(dst); \
MOVL t, 3*4+off+64(dst); \
MOVL t, 7*4+off+128(dst); \
MOVL t, 2*4+off+192(dst); \
MOVL t, 15*4+off+256(dst); \
MOVL t, 12*4+off+320(dst); \
MOVL t, 6*4+off+384(dst); \
MOVL t, 0*4+off+448(dst); \
MOVL t, 9*4+off+512(dst); \
MOVL t, 11*4+off+576(dst); \
MOVL 14*4(src), t; \
MOVL t, 11*4+off+0(dst); \
MOVL t, 0*4+off+64(dst); \
MOVL t, 12*4+off+128(dst); \
MOVL t, 7*4+off+192(dst); \
MOVL t, 8*4+off+256(dst); \
MOVL t, 14*4+off+320(dst); \
MOVL t, 2*4+off+384(dst); \
MOVL t, 5*4+off+448(dst); \
MOVL t, 1*4+off+512(dst); \
MOVL t, 13*4+off+576(dst); \
MOVL 15*4(src), t; \
MOVL t, 15*4+off+0(dst); \
MOVL t, 6*4+off+64(dst); \
MOVL t, 3*4+off+128(dst); \
MOVL t, 11*4+off+192(dst); \
MOVL t, 7*4+off+256(dst); \
MOVL t, 10*4+off+320(dst); \
MOVL t, 5*4+off+384(dst); \
MOVL t, 9*4+off+448(dst); \
MOVL t, 4*4+off+512(dst); \
MOVL t, 8*4+off+576(dst)
// func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
TEXT ·hashBlocksSSE2(SB), 0, $672-24 // frame = 656 + 16 byte alignment
MOVL h+0(FP), AX
MOVL c+4(FP), BX
MOVL flag+8(FP), CX
MOVL blocks_base+12(FP), SI
MOVL blocks_len+16(FP), DX
MOVL SP, BP
MOVL SP, DI
ADDL $15, DI
ANDL $~15, DI
MOVL DI, SP
MOVL CX, 8(SP)
MOVL 0(BX), CX
MOVL CX, 0(SP)
MOVL 4(BX), CX
MOVL CX, 4(SP)
XORL CX, CX
MOVL CX, 12(SP)
MOVOU 0(AX), X0
MOVOU 16(AX), X1
MOVOU counter<>(SB), X2
loop:
MOVO X0, X4
MOVO X1, X5
MOVOU iv0<>(SB), X6
MOVOU iv1<>(SB), X7
MOVO 0(SP), X3
PADDQ X2, X3
PXOR X3, X7
MOVO X3, 0(SP)
PRECOMPUTE(SP, 16, SI, CX)
ROUND_SSE2(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X3)
ROUND_SSE2(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X3)
PXOR X4, X0
PXOR X5, X1
PXOR X6, X0
PXOR X7, X1
LEAL 64(SI), SI
SUBL $64, DX
JNE loop
MOVL 0(SP), CX
MOVL CX, 0(BX)
MOVL 4(SP), CX
MOVL CX, 4(BX)
MOVOU X0, 0(AX)
MOVOU X1, 16(AX)
MOVL BP, SP
RET
// func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
TEXT ·hashBlocksSSSE3(SB), 0, $704-24 // frame = 688 + 16 byte alignment
MOVL h+0(FP), AX
MOVL c+4(FP), BX
MOVL flag+8(FP), CX
MOVL blocks_base+12(FP), SI
MOVL blocks_len+16(FP), DX
MOVL SP, BP
MOVL SP, DI
ADDL $15, DI
ANDL $~15, DI
MOVL DI, SP
MOVL CX, 8(SP)
MOVL 0(BX), CX
MOVL CX, 0(SP)
MOVL 4(BX), CX
MOVL CX, 4(SP)
XORL CX, CX
MOVL CX, 12(SP)
MOVOU 0(AX), X0
MOVOU 16(AX), X1
MOVOU counter<>(SB), X2
loop:
MOVO X0, 656(SP)
MOVO X1, 672(SP)
MOVO X0, X4
MOVO X1, X5
MOVOU iv0<>(SB), X6
MOVOU iv1<>(SB), X7
MOVO 0(SP), X3
PADDQ X2, X3
PXOR X3, X7
MOVO X3, 0(SP)
MOVOU rol16<>(SB), X0
MOVOU rol8<>(SB), X1
PRECOMPUTE(SP, 16, SI, CX)
ROUND_SSSE3(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X3, X0, X1)
ROUND_SSSE3(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X3, X0, X1)
MOVO 656(SP), X0
MOVO 672(SP), X1
PXOR X4, X0
PXOR X5, X1
PXOR X6, X0
PXOR X7, X1
LEAL 64(SI), SI
SUBL $64, DX
JNE loop
MOVL 0(SP), CX
MOVL CX, 0(BX)
MOVL 4(SP), CX
MOVL CX, 4(BX)
MOVOU X0, 0(AX)
MOVOU X1, 16(AX)
MOVL BP, SP
RET

37
vendor/golang.org/x/crypto/blake2s/blake2s_amd64.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
package blake2s
import "golang.org/x/sys/cpu"
var (
useSSE4 = cpu.X86.HasSSE41
useSSSE3 = cpu.X86.HasSSSE3
useSSE2 = cpu.X86.HasSSE2
)
//go:noescape
func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
//go:noescape
func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
//go:noescape
func hashBlocksSSE4(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) {
switch {
case useSSE4:
hashBlocksSSE4(h, c, flag, blocks)
case useSSSE3:
hashBlocksSSSE3(h, c, flag, blocks)
case useSSE2:
hashBlocksSSE2(h, c, flag, blocks)
default:
hashBlocksGeneric(h, c, flag, blocks)
}
}

438
vendor/golang.org/x/crypto/blake2s/blake2s_amd64.s generated vendored Normal file
View File

@ -0,0 +1,438 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build amd64,!gccgo,!appengine
#include "textflag.h"
DATA iv0<>+0x00(SB)/4, $0x6a09e667
DATA iv0<>+0x04(SB)/4, $0xbb67ae85
DATA iv0<>+0x08(SB)/4, $0x3c6ef372
DATA iv0<>+0x0c(SB)/4, $0xa54ff53a
GLOBL iv0<>(SB), (NOPTR+RODATA), $16
DATA iv1<>+0x00(SB)/4, $0x510e527f
DATA iv1<>+0x04(SB)/4, $0x9b05688c
DATA iv1<>+0x08(SB)/4, $0x1f83d9ab
DATA iv1<>+0x0c(SB)/4, $0x5be0cd19
GLOBL iv1<>(SB), (NOPTR+RODATA), $16
DATA rol16<>+0x00(SB)/8, $0x0504070601000302
DATA rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
GLOBL rol16<>(SB), (NOPTR+RODATA), $16
DATA rol8<>+0x00(SB)/8, $0x0407060500030201
DATA rol8<>+0x08(SB)/8, $0x0C0F0E0D080B0A09
GLOBL rol8<>(SB), (NOPTR+RODATA), $16
DATA counter<>+0x00(SB)/8, $0x40
DATA counter<>+0x08(SB)/8, $0x0
GLOBL counter<>(SB), (NOPTR+RODATA), $16
#define ROTL_SSE2(n, t, v) \
MOVO v, t; \
PSLLL $n, t; \
PSRLL $(32-n), v; \
PXOR t, v
#define ROTL_SSSE3(c, v) \
PSHUFB c, v
#define ROUND_SSE2(v0, v1, v2, v3, m0, m1, m2, m3, t) \
PADDL m0, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(16, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m1, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(24, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v1, v1; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v3, v3; \
PADDL m2, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(16, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m3, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSE2(24, t, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v3, v3; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v1, v1
#define ROUND_SSSE3(v0, v1, v2, v3, m0, m1, m2, m3, t, c16, c8) \
PADDL m0, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c16, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m1, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c8, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v1, v1; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v3, v3; \
PADDL m2, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c16, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(20, t, v1); \
PADDL m3, v0; \
PADDL v1, v0; \
PXOR v0, v3; \
ROTL_SSSE3(c8, v3); \
PADDL v3, v2; \
PXOR v2, v1; \
ROTL_SSE2(25, t, v1); \
PSHUFL $0x39, v3, v3; \
PSHUFL $0x4E, v2, v2; \
PSHUFL $0x93, v1, v1
#define LOAD_MSG_SSE4(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11, i12, i13, i14, i15) \
MOVL i0*4(src), m0; \
PINSRD $1, i1*4(src), m0; \
PINSRD $2, i2*4(src), m0; \
PINSRD $3, i3*4(src), m0; \
MOVL i4*4(src), m1; \
PINSRD $1, i5*4(src), m1; \
PINSRD $2, i6*4(src), m1; \
PINSRD $3, i7*4(src), m1; \
MOVL i8*4(src), m2; \
PINSRD $1, i9*4(src), m2; \
PINSRD $2, i10*4(src), m2; \
PINSRD $3, i11*4(src), m2; \
MOVL i12*4(src), m3; \
PINSRD $1, i13*4(src), m3; \
PINSRD $2, i14*4(src), m3; \
PINSRD $3, i15*4(src), m3
#define PRECOMPUTE_MSG(dst, off, src, R8, R9, R10, R11, R12, R13, R14, R15) \
MOVQ 0*4(src), R8; \
MOVQ 2*4(src), R9; \
MOVQ 4*4(src), R10; \
MOVQ 6*4(src), R11; \
MOVQ 8*4(src), R12; \
MOVQ 10*4(src), R13; \
MOVQ 12*4(src), R14; \
MOVQ 14*4(src), R15; \
\
MOVL R8, 0*4+off+0(dst); \
MOVL R8, 9*4+off+64(dst); \
MOVL R8, 5*4+off+128(dst); \
MOVL R8, 14*4+off+192(dst); \
MOVL R8, 4*4+off+256(dst); \
MOVL R8, 2*4+off+320(dst); \
MOVL R8, 8*4+off+384(dst); \
MOVL R8, 12*4+off+448(dst); \
MOVL R8, 3*4+off+512(dst); \
MOVL R8, 15*4+off+576(dst); \
SHRQ $32, R8; \
MOVL R8, 4*4+off+0(dst); \
MOVL R8, 8*4+off+64(dst); \
MOVL R8, 14*4+off+128(dst); \
MOVL R8, 5*4+off+192(dst); \
MOVL R8, 12*4+off+256(dst); \
MOVL R8, 11*4+off+320(dst); \
MOVL R8, 1*4+off+384(dst); \
MOVL R8, 6*4+off+448(dst); \
MOVL R8, 10*4+off+512(dst); \
MOVL R8, 3*4+off+576(dst); \
\
MOVL R9, 1*4+off+0(dst); \
MOVL R9, 13*4+off+64(dst); \
MOVL R9, 6*4+off+128(dst); \
MOVL R9, 8*4+off+192(dst); \
MOVL R9, 2*4+off+256(dst); \
MOVL R9, 0*4+off+320(dst); \
MOVL R9, 14*4+off+384(dst); \
MOVL R9, 11*4+off+448(dst); \
MOVL R9, 12*4+off+512(dst); \
MOVL R9, 4*4+off+576(dst); \
SHRQ $32, R9; \
MOVL R9, 5*4+off+0(dst); \
MOVL R9, 15*4+off+64(dst); \
MOVL R9, 9*4+off+128(dst); \
MOVL R9, 1*4+off+192(dst); \
MOVL R9, 11*4+off+256(dst); \
MOVL R9, 7*4+off+320(dst); \
MOVL R9, 13*4+off+384(dst); \
MOVL R9, 3*4+off+448(dst); \
MOVL R9, 6*4+off+512(dst); \
MOVL R9, 10*4+off+576(dst); \
\
MOVL R10, 2*4+off+0(dst); \
MOVL R10, 1*4+off+64(dst); \
MOVL R10, 15*4+off+128(dst); \
MOVL R10, 10*4+off+192(dst); \
MOVL R10, 6*4+off+256(dst); \
MOVL R10, 8*4+off+320(dst); \
MOVL R10, 3*4+off+384(dst); \
MOVL R10, 13*4+off+448(dst); \
MOVL R10, 14*4+off+512(dst); \
MOVL R10, 5*4+off+576(dst); \
SHRQ $32, R10; \
MOVL R10, 6*4+off+0(dst); \
MOVL R10, 11*4+off+64(dst); \
MOVL R10, 2*4+off+128(dst); \
MOVL R10, 9*4+off+192(dst); \
MOVL R10, 1*4+off+256(dst); \
MOVL R10, 13*4+off+320(dst); \
MOVL R10, 4*4+off+384(dst); \
MOVL R10, 8*4+off+448(dst); \
MOVL R10, 15*4+off+512(dst); \
MOVL R10, 7*4+off+576(dst); \
\
MOVL R11, 3*4+off+0(dst); \
MOVL R11, 7*4+off+64(dst); \
MOVL R11, 13*4+off+128(dst); \
MOVL R11, 12*4+off+192(dst); \
MOVL R11, 10*4+off+256(dst); \
MOVL R11, 1*4+off+320(dst); \
MOVL R11, 9*4+off+384(dst); \
MOVL R11, 14*4+off+448(dst); \
MOVL R11, 0*4+off+512(dst); \
MOVL R11, 6*4+off+576(dst); \
SHRQ $32, R11; \
MOVL R11, 7*4+off+0(dst); \
MOVL R11, 14*4+off+64(dst); \
MOVL R11, 10*4+off+128(dst); \
MOVL R11, 0*4+off+192(dst); \
MOVL R11, 5*4+off+256(dst); \
MOVL R11, 9*4+off+320(dst); \
MOVL R11, 12*4+off+384(dst); \
MOVL R11, 1*4+off+448(dst); \
MOVL R11, 13*4+off+512(dst); \
MOVL R11, 2*4+off+576(dst); \
\
MOVL R12, 8*4+off+0(dst); \
MOVL R12, 5*4+off+64(dst); \
MOVL R12, 4*4+off+128(dst); \
MOVL R12, 15*4+off+192(dst); \
MOVL R12, 14*4+off+256(dst); \
MOVL R12, 3*4+off+320(dst); \
MOVL R12, 11*4+off+384(dst); \
MOVL R12, 10*4+off+448(dst); \
MOVL R12, 7*4+off+512(dst); \
MOVL R12, 1*4+off+576(dst); \
SHRQ $32, R12; \
MOVL R12, 12*4+off+0(dst); \
MOVL R12, 2*4+off+64(dst); \
MOVL R12, 11*4+off+128(dst); \
MOVL R12, 4*4+off+192(dst); \
MOVL R12, 0*4+off+256(dst); \
MOVL R12, 15*4+off+320(dst); \
MOVL R12, 10*4+off+384(dst); \
MOVL R12, 7*4+off+448(dst); \
MOVL R12, 5*4+off+512(dst); \
MOVL R12, 9*4+off+576(dst); \
\
MOVL R13, 9*4+off+0(dst); \
MOVL R13, 4*4+off+64(dst); \
MOVL R13, 8*4+off+128(dst); \
MOVL R13, 13*4+off+192(dst); \
MOVL R13, 3*4+off+256(dst); \
MOVL R13, 5*4+off+320(dst); \
MOVL R13, 7*4+off+384(dst); \
MOVL R13, 15*4+off+448(dst); \
MOVL R13, 11*4+off+512(dst); \
MOVL R13, 0*4+off+576(dst); \
SHRQ $32, R13; \
MOVL R13, 13*4+off+0(dst); \
MOVL R13, 10*4+off+64(dst); \
MOVL R13, 0*4+off+128(dst); \
MOVL R13, 3*4+off+192(dst); \
MOVL R13, 9*4+off+256(dst); \
MOVL R13, 6*4+off+320(dst); \
MOVL R13, 15*4+off+384(dst); \
MOVL R13, 4*4+off+448(dst); \
MOVL R13, 2*4+off+512(dst); \
MOVL R13, 12*4+off+576(dst); \
\
MOVL R14, 10*4+off+0(dst); \
MOVL R14, 12*4+off+64(dst); \
MOVL R14, 1*4+off+128(dst); \
MOVL R14, 6*4+off+192(dst); \
MOVL R14, 13*4+off+256(dst); \
MOVL R14, 4*4+off+320(dst); \
MOVL R14, 0*4+off+384(dst); \
MOVL R14, 2*4+off+448(dst); \
MOVL R14, 8*4+off+512(dst); \
MOVL R14, 14*4+off+576(dst); \
SHRQ $32, R14; \
MOVL R14, 14*4+off+0(dst); \
MOVL R14, 3*4+off+64(dst); \
MOVL R14, 7*4+off+128(dst); \
MOVL R14, 2*4+off+192(dst); \
MOVL R14, 15*4+off+256(dst); \
MOVL R14, 12*4+off+320(dst); \
MOVL R14, 6*4+off+384(dst); \
MOVL R14, 0*4+off+448(dst); \
MOVL R14, 9*4+off+512(dst); \
MOVL R14, 11*4+off+576(dst); \
\
MOVL R15, 11*4+off+0(dst); \
MOVL R15, 0*4+off+64(dst); \
MOVL R15, 12*4+off+128(dst); \
MOVL R15, 7*4+off+192(dst); \
MOVL R15, 8*4+off+256(dst); \
MOVL R15, 14*4+off+320(dst); \
MOVL R15, 2*4+off+384(dst); \
MOVL R15, 5*4+off+448(dst); \
MOVL R15, 1*4+off+512(dst); \
MOVL R15, 13*4+off+576(dst); \
SHRQ $32, R15; \
MOVL R15, 15*4+off+0(dst); \
MOVL R15, 6*4+off+64(dst); \
MOVL R15, 3*4+off+128(dst); \
MOVL R15, 11*4+off+192(dst); \
MOVL R15, 7*4+off+256(dst); \
MOVL R15, 10*4+off+320(dst); \
MOVL R15, 5*4+off+384(dst); \
MOVL R15, 9*4+off+448(dst); \
MOVL R15, 4*4+off+512(dst); \
MOVL R15, 8*4+off+576(dst)
#define BLAKE2s_SSE2() \
PRECOMPUTE_MSG(SP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \
ROUND_SSE2(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X8); \
ROUND_SSE2(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X8)
#define BLAKE2s_SSSE3() \
PRECOMPUTE_MSG(SP, 16, SI, R8, R9, R10, R11, R12, R13, R14, R15); \
ROUND_SSSE3(X4, X5, X6, X7, 16(SP), 32(SP), 48(SP), 64(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+64(SP), 32+64(SP), 48+64(SP), 64+64(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+128(SP), 32+128(SP), 48+128(SP), 64+128(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+192(SP), 32+192(SP), 48+192(SP), 64+192(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+256(SP), 32+256(SP), 48+256(SP), 64+256(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+320(SP), 32+320(SP), 48+320(SP), 64+320(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+384(SP), 32+384(SP), 48+384(SP), 64+384(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+448(SP), 32+448(SP), 48+448(SP), 64+448(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+512(SP), 32+512(SP), 48+512(SP), 64+512(SP), X8, X13, X14); \
ROUND_SSSE3(X4, X5, X6, X7, 16+576(SP), 32+576(SP), 48+576(SP), 64+576(SP), X8, X13, X14)
#define BLAKE2s_SSE4() \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14); \
LOAD_MSG_SSE4(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0); \
ROUND_SSSE3(X4, X5, X6, X7, X8, X9, X10, X11, X8, X13, X14)
#define HASH_BLOCKS(h, c, flag, blocks_base, blocks_len, BLAKE2s_FUNC) \
MOVQ h, AX; \
MOVQ c, BX; \
MOVL flag, CX; \
MOVQ blocks_base, SI; \
MOVQ blocks_len, DX; \
\
MOVQ SP, BP; \
MOVQ SP, R9; \
ADDQ $15, R9; \
ANDQ $~15, R9; \
MOVQ R9, SP; \
\
MOVQ 0(BX), R9; \
MOVQ R9, 0(SP); \
XORQ R9, R9; \
MOVQ R9, 8(SP); \
MOVL CX, 8(SP); \
\
MOVOU 0(AX), X0; \
MOVOU 16(AX), X1; \
MOVOU iv0<>(SB), X2; \
MOVOU iv1<>(SB), X3 \
\
MOVOU counter<>(SB), X12; \
MOVOU rol16<>(SB), X13; \
MOVOU rol8<>(SB), X14; \
MOVO 0(SP), X15; \
\
loop: \
MOVO X0, X4; \
MOVO X1, X5; \
MOVO X2, X6; \
MOVO X3, X7; \
\
PADDQ X12, X15; \
PXOR X15, X7; \
\
BLAKE2s_FUNC(); \
\
PXOR X4, X0; \
PXOR X5, X1; \
PXOR X6, X0; \
PXOR X7, X1; \
\
LEAQ 64(SI), SI; \
SUBQ $64, DX; \
JNE loop; \
\
MOVO X15, 0(SP); \
MOVQ 0(SP), R9; \
MOVQ R9, 0(BX); \
\
MOVOU X0, 0(AX); \
MOVOU X1, 16(AX); \
\
MOVQ BP, SP
// func hashBlocksSSE2(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
TEXT ·hashBlocksSSE2(SB), 0, $672-48 // frame = 656 + 16 byte alignment
HASH_BLOCKS(h+0(FP), c+8(FP), flag+16(FP), blocks_base+24(FP), blocks_len+32(FP), BLAKE2s_SSE2)
RET
// func hashBlocksSSSE3(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
TEXT ·hashBlocksSSSE3(SB), 0, $672-48 // frame = 656 + 16 byte alignment
HASH_BLOCKS(h+0(FP), c+8(FP), flag+16(FP), blocks_base+24(FP), blocks_len+32(FP), BLAKE2s_SSSE3)
RET
// func hashBlocksSSE4(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte)
TEXT ·hashBlocksSSE4(SB), 0, $32-48 // frame = 16 + 16 byte alignment
HASH_BLOCKS(h+0(FP), c+8(FP), flag+16(FP), blocks_base+24(FP), blocks_len+32(FP), BLAKE2s_SSE4)
RET

174
vendor/golang.org/x/crypto/blake2s/blake2s_generic.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blake2s
// the precomputed values for BLAKE2s
// there are 10 16-byte arrays - one for each round
// the entries are calculated from the sigma constants.
var precomputed = [10][16]byte{
{0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15},
{14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3},
{11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4},
{7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8},
{9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13},
{2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9},
{12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11},
{13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10},
{6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5},
{10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0},
}
func hashBlocksGeneric(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) {
var m [16]uint32
c0, c1 := c[0], c[1]
for i := 0; i < len(blocks); {
c0 += BlockSize
if c0 < BlockSize {
c1++
}
v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7]
v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7]
v12 ^= c0
v13 ^= c1
v14 ^= flag
for j := range m {
m[j] = uint32(blocks[i]) | uint32(blocks[i+1])<<8 | uint32(blocks[i+2])<<16 | uint32(blocks[i+3])<<24
i += 4
}
for k := range precomputed {
s := &(precomputed[k])
v0 += m[s[0]]
v0 += v4
v12 ^= v0
v12 = v12<<(32-16) | v12>>16
v8 += v12
v4 ^= v8
v4 = v4<<(32-12) | v4>>12
v1 += m[s[1]]
v1 += v5
v13 ^= v1
v13 = v13<<(32-16) | v13>>16
v9 += v13
v5 ^= v9
v5 = v5<<(32-12) | v5>>12
v2 += m[s[2]]
v2 += v6
v14 ^= v2
v14 = v14<<(32-16) | v14>>16
v10 += v14
v6 ^= v10
v6 = v6<<(32-12) | v6>>12
v3 += m[s[3]]
v3 += v7
v15 ^= v3
v15 = v15<<(32-16) | v15>>16
v11 += v15
v7 ^= v11
v7 = v7<<(32-12) | v7>>12
v0 += m[s[4]]
v0 += v4
v12 ^= v0
v12 = v12<<(32-8) | v12>>8
v8 += v12
v4 ^= v8
v4 = v4<<(32-7) | v4>>7
v1 += m[s[5]]
v1 += v5
v13 ^= v1
v13 = v13<<(32-8) | v13>>8
v9 += v13
v5 ^= v9
v5 = v5<<(32-7) | v5>>7
v2 += m[s[6]]
v2 += v6
v14 ^= v2
v14 = v14<<(32-8) | v14>>8
v10 += v14
v6 ^= v10
v6 = v6<<(32-7) | v6>>7
v3 += m[s[7]]
v3 += v7
v15 ^= v3
v15 = v15<<(32-8) | v15>>8
v11 += v15
v7 ^= v11
v7 = v7<<(32-7) | v7>>7
v0 += m[s[8]]
v0 += v5
v15 ^= v0
v15 = v15<<(32-16) | v15>>16
v10 += v15
v5 ^= v10
v5 = v5<<(32-12) | v5>>12
v1 += m[s[9]]
v1 += v6
v12 ^= v1
v12 = v12<<(32-16) | v12>>16
v11 += v12
v6 ^= v11
v6 = v6<<(32-12) | v6>>12
v2 += m[s[10]]
v2 += v7
v13 ^= v2
v13 = v13<<(32-16) | v13>>16
v8 += v13
v7 ^= v8
v7 = v7<<(32-12) | v7>>12
v3 += m[s[11]]
v3 += v4
v14 ^= v3
v14 = v14<<(32-16) | v14>>16
v9 += v14
v4 ^= v9
v4 = v4<<(32-12) | v4>>12
v0 += m[s[12]]
v0 += v5
v15 ^= v0
v15 = v15<<(32-8) | v15>>8
v10 += v15
v5 ^= v10
v5 = v5<<(32-7) | v5>>7
v1 += m[s[13]]
v1 += v6
v12 ^= v1
v12 = v12<<(32-8) | v12>>8
v11 += v12
v6 ^= v11
v6 = v6<<(32-7) | v6>>7
v2 += m[s[14]]
v2 += v7
v13 ^= v2
v13 = v13<<(32-8) | v13>>8
v8 += v13
v7 ^= v8
v7 = v7<<(32-7) | v7>>7
v3 += m[s[15]]
v3 += v4
v14 ^= v3
v14 = v14<<(32-8) | v14>>8
v9 += v14
v4 ^= v9
v4 = v4<<(32-7) | v4>>7
}
h[0] ^= v0 ^ v8
h[1] ^= v1 ^ v9
h[2] ^= v2 ^ v10
h[3] ^= v3 ^ v11
h[4] ^= v4 ^ v12
h[5] ^= v5 ^ v13
h[6] ^= v6 ^ v14
h[7] ^= v7 ^ v15
}
c[0], c[1] = c0, c1
}

17
vendor/golang.org/x/crypto/blake2s/blake2s_ref.go generated vendored Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64,!386 gccgo appengine
package blake2s
var (
useSSE4 = false
useSSSE3 = false
useSSE2 = false
)
func hashBlocks(h *[8]uint32, c *[2]uint32, flag uint32, blocks []byte) {
hashBlocksGeneric(h, c, flag, blocks)
}

178
vendor/golang.org/x/crypto/blake2s/blake2x.go generated vendored Normal file
View File

@ -0,0 +1,178 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package blake2s
import (
"encoding/binary"
"errors"
"io"
)
// XOF defines the interface to hash functions that
// support arbitrary-length output.
type XOF interface {
// Write absorbs more data into the hash's state. It panics if called
// after Read.
io.Writer
// Read reads more output from the hash. It returns io.EOF if the limit
// has been reached.
io.Reader
// Clone returns a copy of the XOF in its current state.
Clone() XOF
// Reset resets the XOF to its initial state.
Reset()
}
// OutputLengthUnknown can be used as the size argument to NewXOF to indicate
// the length of the output is not known in advance.
const OutputLengthUnknown = 0
// magicUnknownOutputLength is a magic value for the output size that indicates
// an unknown number of output bytes.
const magicUnknownOutputLength = 65535
// maxOutputLength is the absolute maximum number of bytes to produce when the
// number of output bytes is unknown.
const maxOutputLength = (1 << 32) * 32
// NewXOF creates a new variable-output-length hash. The hash either produce a
// known number of bytes (1 <= size < 65535), or an unknown number of bytes
// (size == OutputLengthUnknown). In the latter case, an absolute limit of
// 128GiB applies.
//
// A non-nil key turns the hash into a MAC. The key must between
// zero and 32 bytes long.
func NewXOF(size uint16, key []byte) (XOF, error) {
if len(key) > Size {
return nil, errKeySize
}
if size == magicUnknownOutputLength {
// 2^16-1 indicates an unknown number of bytes and thus isn't a
// valid length.
return nil, errors.New("blake2s: XOF length too large")
}
if size == OutputLengthUnknown {
size = magicUnknownOutputLength
}
x := &xof{
d: digest{
size: Size,
keyLen: len(key),
},
length: size,
}
copy(x.d.key[:], key)
x.Reset()
return x, nil
}
type xof struct {
d digest
length uint16
remaining uint64
cfg, root, block [Size]byte
offset int
nodeOffset uint32
readMode bool
}
func (x *xof) Write(p []byte) (n int, err error) {
if x.readMode {
panic("blake2s: write to XOF after read")
}
return x.d.Write(p)
}
func (x *xof) Clone() XOF {
clone := *x
return &clone
}
func (x *xof) Reset() {
x.cfg[0] = byte(Size)
binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length
binary.LittleEndian.PutUint16(x.cfg[12:], x.length) // XOF length
x.cfg[15] = byte(Size) // inner hash size
x.d.Reset()
x.d.h[3] ^= uint32(x.length)
x.remaining = uint64(x.length)
if x.remaining == magicUnknownOutputLength {
x.remaining = maxOutputLength
}
x.offset, x.nodeOffset = 0, 0
x.readMode = false
}
func (x *xof) Read(p []byte) (n int, err error) {
if !x.readMode {
x.d.finalize(&x.root)
x.readMode = true
}
if x.remaining == 0 {
return 0, io.EOF
}
n = len(p)
if uint64(n) > x.remaining {
n = int(x.remaining)
p = p[:n]
}
if x.offset > 0 {
blockRemaining := Size - x.offset
if n < blockRemaining {
x.offset += copy(p, x.block[x.offset:])
x.remaining -= uint64(n)
return
}
copy(p, x.block[x.offset:])
p = p[blockRemaining:]
x.offset = 0
x.remaining -= uint64(blockRemaining)
}
for len(p) >= Size {
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
x.nodeOffset++
x.d.initConfig(&x.cfg)
x.d.Write(x.root[:])
x.d.finalize(&x.block)
copy(p, x.block[:])
p = p[Size:]
x.remaining -= uint64(Size)
}
if todo := len(p); todo > 0 {
if x.remaining < uint64(Size) {
x.cfg[0] = byte(x.remaining)
}
binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset)
x.nodeOffset++
x.d.initConfig(&x.cfg)
x.d.Write(x.root[:])
x.d.finalize(&x.block)
x.offset = copy(p, x.block[:todo])
x.remaining -= uint64(todo)
}
return
}
func (d *digest) initConfig(cfg *[Size]byte) {
d.offset, d.c[0], d.c[1] = 0, 0, 0
for i := range d.h {
d.h[i] = iv[i] ^ binary.LittleEndian.Uint32(cfg[i*4:])
}
}

21
vendor/golang.org/x/crypto/blake2s/register.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.9
package blake2s
import (
"crypto"
"hash"
)
func init() {
newHash256 := func() hash.Hash {
h, _ := New256(nil)
return h
}
crypto.RegisterHash(crypto.BLAKE2s_256, newHash256)
}

124
vendor/golang.org/x/crypto/ripemd160/ripemd160.go generated vendored Normal file
View File

@ -0,0 +1,124 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ripemd160 implements the RIPEMD-160 hash algorithm.
//
// Deprecated: RIPEMD-160 is a legacy hash and should not be used for new
// applications. Also, this package does not and will not provide an optimized
// implementation. Instead, use a modern hash like SHA-256 (from crypto/sha256).
package ripemd160 // import "golang.org/x/crypto/ripemd160"
// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart
// Preneel with specifications available at:
// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
import (
"crypto"
"hash"
)
func init() {
crypto.RegisterHash(crypto.RIPEMD160, New)
}
// The size of the checksum in bytes.
const Size = 20
// The block size of the hash algorithm in bytes.
const BlockSize = 64
const (
_s0 = 0x67452301
_s1 = 0xefcdab89
_s2 = 0x98badcfe
_s3 = 0x10325476
_s4 = 0xc3d2e1f0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
s [5]uint32 // running context
x [BlockSize]byte // temporary buffer
nx int // index into x
tc uint64 // total count of bytes processed
}
func (d *digest) Reset() {
d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4
d.nx = 0
d.tc = 0
}
// New returns a new hash.Hash computing the checksum.
func New() hash.Hash {
result := new(digest)
result.Reset()
return result
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.tc += uint64(nn)
if d.nx > 0 {
n := len(p)
if n > BlockSize-d.nx {
n = BlockSize - d.nx
}
for i := 0; i < n; i++ {
d.x[d.nx+i] = p[i]
}
d.nx += n
if d.nx == BlockSize {
_Block(d, d.x[0:])
d.nx = 0
}
p = p[n:]
}
n := _Block(d, p)
p = p[n:]
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d0 *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d := *d0
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
tc := d.tc
var tmp [64]byte
tmp[0] = 0x80
if tc%64 < 56 {
d.Write(tmp[0 : 56-tc%64])
} else {
d.Write(tmp[0 : 64+56-tc%64])
}
// Length in bits.
tc <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(tc >> (8 * i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.s {
digest[i*4] = byte(s)
digest[i*4+1] = byte(s >> 8)
digest[i*4+2] = byte(s >> 16)
digest[i*4+3] = byte(s >> 24)
}
return append(in, digest[:]...)
}

165
vendor/golang.org/x/crypto/ripemd160/ripemd160block.go generated vendored Normal file
View File

@ -0,0 +1,165 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// RIPEMD-160 block step.
// In its own file so that a faster assembly or C version
// can be substituted easily.
package ripemd160
import (
"math/bits"
)
// work buffer indices and roll amounts for one line
var _n = [80]uint{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
}
var _r = [80]uint{
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
}
// same for the other parallel one
var n_ = [80]uint{
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
}
var r_ = [80]uint{
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
}
func _Block(md *digest, p []byte) int {
n := 0
var x [16]uint32
var alpha, beta uint32
for len(p) >= BlockSize {
a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
aa, bb, cc, dd, ee := a, b, c, d, e
j := 0
for i := 0; i < 16; i++ {
x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
j += 4
}
// round 1
i := 0
for i < 16 {
alpha = a + (b ^ c ^ d) + x[_n[i]]
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 2
for i < 32 {
alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 3
for i < 48 {
alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 4
for i < 64 {
alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 5
for i < 80 {
alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// combine results
dd += c + md.s[1]
md.s[1] = md.s[2] + d + ee
md.s[2] = md.s[3] + e + aa
md.s[3] = md.s[4] + a + bb
md.s[4] = md.s[0] + b + cc
md.s[0] = dd
p = p[BlockSize:]
n += BlockSize
}
return n
}

66
vendor/golang.org/x/crypto/sha3/doc.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha3 implements the SHA-3 fixed-output-length hash functions and
// the SHAKE variable-output-length hash functions defined by FIPS-202.
//
// Both types of hash function use the "sponge" construction and the Keccak
// permutation. For a detailed specification see http://keccak.noekeon.org/
//
//
// Guidance
//
// If you aren't sure what function you need, use SHAKE256 with at least 64
// bytes of output. The SHAKE instances are faster than the SHA3 instances;
// the latter have to allocate memory to conform to the hash.Hash interface.
//
// If you need a secret-key MAC (message authentication code), prepend the
// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
// output.
//
//
// Security strengths
//
// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
// strength against preimage attacks of x bits. Since they only produce "x"
// bits of output, their collision-resistance is only "x/2" bits.
//
// The SHAKE-256 and -128 functions have a generic security strength of 256 and
// 128 bits against all attacks, provided that at least 2x bits of their output
// is used. Requesting more than 64 or 32 bytes of output, respectively, does
// not increase the collision-resistance of the SHAKE functions.
//
//
// The sponge construction
//
// A sponge builds a pseudo-random function from a public pseudo-random
// permutation, by applying the permutation to a state of "rate + capacity"
// bytes, but hiding "capacity" of the bytes.
//
// A sponge starts out with a zero state. To hash an input using a sponge, up
// to "rate" bytes of the input are XORed into the sponge's state. The sponge
// is then "full" and the permutation is applied to "empty" it. This process is
// repeated until all the input has been "absorbed". The input is then padded.
// The digest is "squeezed" from the sponge in the same way, except that output
// is copied out instead of input being XORed in.
//
// A sponge is parameterized by its generic security strength, which is equal
// to half its capacity; capacity + rate is equal to the permutation's width.
// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
//
//
// Recommendations
//
// The SHAKE functions are recommended for most new uses. They can produce
// output of arbitrary length. SHAKE256, with an output length of at least
// 64 bytes, provides 256-bit security against all attacks. The Keccak team
// recommends it for most applications upgrading from SHA2-512. (NIST chose a
// much stronger, but much slower, sponge instance for SHA3-512.)
//
// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
// They produce output of the same length, with the same security strengths
// against all attacks. This means, in particular, that SHA3-256 only has
// 128-bit collision resistance, because its output length is 32 bytes.
package sha3 // import "golang.org/x/crypto/sha3"

97
vendor/golang.org/x/crypto/sha3/hashes.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package sha3
// This file provides functions for creating instances of the SHA-3
// and SHAKE hash functions, as well as utility functions for hashing
// bytes.
import (
"hash"
)
// New224 creates a new SHA3-224 hash.
// Its generic security strength is 224 bits against preimage attacks,
// and 112 bits against collision attacks.
func New224() hash.Hash {
if h := new224Asm(); h != nil {
return h
}
return &state{rate: 144, outputLen: 28, dsbyte: 0x06}
}
// New256 creates a new SHA3-256 hash.
// Its generic security strength is 256 bits against preimage attacks,
// and 128 bits against collision attacks.
func New256() hash.Hash {
if h := new256Asm(); h != nil {
return h
}
return &state{rate: 136, outputLen: 32, dsbyte: 0x06}
}
// New384 creates a new SHA3-384 hash.
// Its generic security strength is 384 bits against preimage attacks,
// and 192 bits against collision attacks.
func New384() hash.Hash {
if h := new384Asm(); h != nil {
return h
}
return &state{rate: 104, outputLen: 48, dsbyte: 0x06}
}
// New512 creates a new SHA3-512 hash.
// Its generic security strength is 512 bits against preimage attacks,
// and 256 bits against collision attacks.
func New512() hash.Hash {
if h := new512Asm(); h != nil {
return h
}
return &state{rate: 72, outputLen: 64, dsbyte: 0x06}
}
// NewLegacyKeccak256 creates a new Keccak-256 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New256 instead.
func NewLegacyKeccak256() hash.Hash { return &state{rate: 136, outputLen: 32, dsbyte: 0x01} }
// NewLegacyKeccak512 creates a new Keccak-512 hash.
//
// Only use this function if you require compatibility with an existing cryptosystem
// that uses non-standard padding. All other users should use New512 instead.
func NewLegacyKeccak512() hash.Hash { return &state{rate: 72, outputLen: 64, dsbyte: 0x01} }
// Sum224 returns the SHA3-224 digest of the data.
func Sum224(data []byte) (digest [28]byte) {
h := New224()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum256 returns the SHA3-256 digest of the data.
func Sum256(data []byte) (digest [32]byte) {
h := New256()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum384 returns the SHA3-384 digest of the data.
func Sum384(data []byte) (digest [48]byte) {
h := New384()
h.Write(data)
h.Sum(digest[:0])
return
}
// Sum512 returns the SHA3-512 digest of the data.
func Sum512(data []byte) (digest [64]byte) {
h := New512()
h.Write(data)
h.Sum(digest[:0])
return
}

Some files were not shown because too many files have changed in this diff Show More