diff --git a/.gitignore b/.gitignore index dc6c8b7..1c94957 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ _testmain.go *.test *.prof +dist/ output/ gosuv bindata_assetfs.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 90c9efe..1284794 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,7 +1,7 @@ { "ImportPath": "github.com/codeskyblue/gosuv", - "GoVersion": "go1.6", - "GodepVersion": "v74", + "GoVersion": "go1.7", + "GodepVersion": "v79", "Deps": [ { "ImportPath": "github.com/codeskyblue/kexec", @@ -43,17 +43,15 @@ "ImportPath": "github.com/goji/httpauth", "Rev": "2da839ab0f4df05a6db5eb277995589dadbd4fb9" }, - { - "ImportPath": "github.com/gorilla/context", - "Rev": "1c83b3eabd45b6d76072b66b746c20815fb2872d" - }, { "ImportPath": "github.com/gorilla/mux", - "Rev": "ee1815431e497d3850809578c93ab6705f1a19f7" + "Comment": "v1.3.0-1-g94e7d24", + "Rev": "94e7d24fd285520f3d12ae998f7fdd6b5393d453" }, { "ImportPath": "github.com/gorilla/websocket", - "Rev": "b6ab76f1fe9803ee1d59e7e5b2a797c1fe897ce5" + "Comment": "v1.1.0-24-g3f3e394", + "Rev": "3f3e394da2b801fbe732a935ef40724762a67a07" }, { "ImportPath": "github.com/jtolds/gls", @@ -72,6 +70,14 @@ "Comment": "v1.0.00-2-ge002bc2", "Rev": "e002bc2020b19bfa61ed378cc5407383dbd2f346" }, + { + "ImportPath": "github.com/shurcooL/httpfs/vfsutil", + "Rev": "df3d5d88c59699064c4449a2d4e3db0f07e74ed9" + }, + { + "ImportPath": "github.com/shurcooL/vfsgen", + "Rev": "8bd98c96e6f6800019cdcd1183547b1c8e89d280" + }, { "ImportPath": "github.com/smartystreets/assertions", "Comment": "1.5.0-405-g01fedaa", @@ -104,11 +110,15 @@ }, { "ImportPath": "golang.org/x/net/html", - "Rev": "075e191f18186a8ff2becaf64478e30f4545cdad" + "Rev": "6b27048ae5e6ad1ef927e72e437531493de612fe" }, { "ImportPath": "golang.org/x/net/html/atom", - "Rev": "075e191f18186a8ff2becaf64478e30f4545cdad" + "Rev": "6b27048ae5e6ad1ef927e72e437531493de612fe" + }, + { + "ImportPath": "golang.org/x/tools/godoc/vfs", + "Rev": "513c731aab546b4628168d772550499740fe9dd2" } ] } diff --git a/build_remote.sh b/build_remote.sh new file mode 100644 index 0000000..e9ac42a --- /dev/null +++ b/build_remote.sh @@ -0,0 +1,19 @@ +#!/bin/bash - +# + +set -e + +TARGET=build_tmp/src/github.com/codeskyblue/gosuv +HOST="pi3-0" +ssh pi@$HOST mkdir -p $TARGET + +rsync -avz -e "ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" --progress \ + --exclude gosuv --exclude dist --exclude .git \ + --delete \ + . pi@$HOST:$TARGET + +echo "Build remotely ..." +ssh pi@$HOST bash $TARGET/build_standalone.sh +echo "Build finished, copying ..." +scp pi@$HOST:$TARGET/gosuv ./dist/gosuv-linux-arm +echo "All finished" diff --git a/build_standalone.sh b/build_standalone.sh new file mode 100755 index 0000000..67c1d08 --- /dev/null +++ b/build_standalone.sh @@ -0,0 +1,12 @@ +#!/bin/bash - +# + +set -e +set -o pipefail + +cd $(dirname $0) +source "$HOME/.bash_profile" +export GOPATH=$GOPATH:/home/pi/build_tmp + +go generate +exec go build -tags vfs "$@" diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml deleted file mode 100644 index f983b60..0000000 --- a/vendor/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -sudo: false - -go: - - 1.3 - - 1.4 - - 1.5 - - tip diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b..0000000 --- a/vendor/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128..0000000 --- a/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c7400..0000000 --- a/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml index f983b60..ca377e6 100644 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ b/vendor/github.com/gorilla/mux/.travis.yml @@ -1,8 +1,22 @@ language: go sudo: false -go: - - 1.3 - - 1.4 - - 1.5 - - tip +matrix: + include: + - go: 1.2 + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: 1.8 + - go: tip + +install: + - # Skip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 55dd4e5..94d396c 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,218 +1,322 @@ -mux +gorilla/mux === [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) -Let's start registering a couple of URL paths and handlers: +![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } +http://www.gorillatoolkit.org/pkg/mux -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts and paths can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Listing Routes](#listing-routes) +* [Static Files](#static-files) +* [Registered URLs](#registered-urls) +* [Full Example](#full-example) - vars := mux.Vars(request) - category := vars["category"] +--- -And this is all you need to know about the basic usage. More advanced options -are explained below. +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: +## Examples +Let's start registering a couple of URL paths and handlers: + +```go +func main() { r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` There are several other matchers that can be added. To match path prefixes: - r.PathPrefix("/products/") +```go +r.PathPrefix("/products/") +``` ...or HTTP methods: - r.Methods("GET", "POST") +```go +r.Methods("GET", "POST") +``` ...or URL schemes: - r.Schemes("https") +```go +r.Schemes("https") +``` ...or header values: - r.Headers("X-Requested-With", "XMLHttpRequest") +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` ...or query values: - r.Queries("key", "value") +```go +r.Queries("key", "value") +``` ...or to use a custom matcher function: - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` ...and finally, it is possible to combine several matchers in a single route: - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". -For example, let's say we have several URLs that should only match when the -host is `www.example.com`. Create a route for that host and get a "subrouter" -from it: +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` Then register routes in the subrouter: - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. -The three URL paths we registered above will only be tested if the domain is -`www.example.com`, because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + +### Listing Routes + +Routes on a mux can be listed using the Router.Walk method—useful for generating documentation: + +```go +package main + +import ( + "fmt" + "net/http" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler) + r.HandleFunc("/articles", handler) + r.HandleFunc("/articles/{id}", handler) + r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + t, err := route.GetPathTemplate() + if err != nil { + return err + } + fmt.Println(t) + return nil + }) + http.Handle("/", r) +} +``` + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs Now let's see how to build registered URLs. -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - url, err := r.Get("article").URL("category", "technology", "id", "42") +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` -...and the result will be a url.URL with the following path: +...and the result will be a `url.URL` with the following path: - "/articles/technology/42" +``` +"/articles/technology/42" +``` This also works for host variables: - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. Regex support also exists for matching Headers within a route. For example, we could do: - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") -And if you use subrouters, host and path defined separately can be built -as well: +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") +And if you use subrouters, host and path defined separately can be built as well: - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` ## Full Example -Here's a complete, runnable example of a small mux based server: +Here's a complete, runnable example of a small `mux` based server: ```go package main import ( "net/http" - + "log" "github.com/gorilla/mux" ) @@ -226,7 +330,7 @@ func main() { r.HandleFunc("/", YourHandler) // Bind to a port and pass our router in - http.ListenAndServe(":8000", r) + log.Fatal(http.ListenAndServe(":8000", r)) } ``` diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go new file mode 100644 index 0000000..d7adaa8 --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_gorilla.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package mux + +import ( + "net/http" + + "github.com/gorilla/context" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return context.Get(r, key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + context.Set(r, key, val) + return r +} + +func contextClear(r *http.Request) { + context.Clear(r) +} diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go new file mode 100644 index 0000000..209cbea --- /dev/null +++ b/vendor/github.com/gorilla/mux/context_native.go @@ -0,0 +1,24 @@ +// +build go1.7 + +package mux + +import ( + "context" + "net/http" +) + +func contextGet(r *http.Request, key interface{}) interface{} { + return r.Context().Value(key) +} + +func contextSet(r *http.Request, key, val interface{}) *http.Request { + if val == nil { + return r + } + + return r.WithContext(context.WithValue(r.Context(), key, val)) +} + +func contextClear(r *http.Request) { + return +} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index 49798cb..00daf4a 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. /* -Package gorilla/mux implements a request router and dispatcher. +Package mux implements a request router and dispatcher. The name mux stands for "HTTP request multiplexer". Like the standard http.ServeMux, mux.Router matches incoming requests against a list of @@ -47,12 +47,21 @@ variable will be anything until the next slash. For example: r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + The names are used to create a map of route variables which can be retrieved calling mux.Vars(): vars := mux.Vars(request) category := vars["category"] +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + And this is all you need to know about the basic usage. More advanced options are explained below. @@ -136,6 +145,31 @@ the inner routes use it as base for their paths: // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + Now let's see how to build registered URLs. Routes can be named. All routes that define a name can have their URLs built, diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 002051f..d66ec38 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -10,8 +10,7 @@ import ( "net/http" "path" "regexp" - - "github.com/gorilla/context" + "strings" ) // NewRouter returns a new router instance. @@ -48,8 +47,14 @@ type Router struct { namedRoutes map[string]*Route // See Router.StrictSlash(). This defines the flag for new routes. strictSlash bool - // If true, do not clear the request context after handling the request + // See Router.SkipClean(). This defines the flag for new routes. + skipClean bool + // If true, do not clear the request context after handling the request. + // This has no effect when go1.7+ is used, since the context is stored + // on the request itself. KeepContext bool + // see Router.UseEncodedPath(). This defines a flag for all routes. + useEncodedPath bool } // Match matches registered routes against the request. @@ -59,6 +64,12 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { return true } } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } return false } @@ -67,35 +78,38 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { // When there is a match, the route variables can be retrieved calling // mux.Vars(request). func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } } var match RouteMatch var handler http.Handler if r.Match(req, &match) { handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) + req = setVars(req, match.Vars) + req = setCurrentRoute(req, match.Route) } if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } + handler = http.NotFoundHandler() } if !r.KeepContext { - defer context.Clear(req) + defer contextClear(req) } handler.ServeHTTP(w, req) } @@ -130,6 +144,34 @@ func (r *Router) StrictSlash(value bool) *Router { return r } +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// This behavior has the drawback of needing to match routes against +// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix) +// to r.URL.Path will not affect routing when this flag is on and thus may +// induce unintended behavior. +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + // ---------------------------------------------------------------------------- // parentRoute // ---------------------------------------------------------------------------- @@ -167,7 +209,7 @@ func (r *Router) buildVars(m map[string]string) map[string]string { // NewRoute registers an empty route. func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} + route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} r.routes = append(r.routes, route) return route } @@ -233,7 +275,7 @@ func (r *Router) Schemes(schemes ...string) *Route { return r.NewRoute().Schemes(schemes...) } -// BuildVars registers a new route with a custom function for modifying +// BuildVarsFunc registers a new route with a custom function for modifying // route variables before building a URL. func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { return r.NewRoute().BuildVarsFunc(f) @@ -265,6 +307,9 @@ func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { if err == SkipRouter { continue } + if err != nil { + return err + } for _, sr := range t.matchers { if h, ok := sr.(*Router); ok { err := h.walk(walkFn, ancestors) @@ -305,7 +350,7 @@ const ( // Vars returns the route variables for the current request, if any. func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { + if rv := contextGet(r, varsKey); rv != nil { return rv.(map[string]string) } return nil @@ -317,24 +362,46 @@ func Vars(r *http.Request) map[string]string { // after the handler returns, unless the KeepContext option is set on the // Router. func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { + if rv := contextGet(r, routeKey); rv != nil { return rv.(*Route) } return nil } -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) +func setVars(r *http.Request, val interface{}) *http.Request { + return contextSet(r, varsKey, val) } -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) +func setCurrentRoute(r *http.Request, val interface{}) *http.Request { + return contextSet(r, routeKey, val) } // ---------------------------------------------------------------------------- // Helpers // ---------------------------------------------------------------------------- +// getPath returns the escaped path if possible; doing what URL.EscapedPath() +// which was added in go1.5 does +func getPath(req *http.Request) string { + if req.RequestURI != "" { + // Extract the path from RequestURI (which is escaped unlike URL.Path) + // as detailed here as detailed in https://golang.org/pkg/net/url/#URL + // for < 1.5 server side workaround + // http://localhost/path/here?v=1 -> /path/here + path := req.RequestURI + path = strings.TrimPrefix(path, req.URL.Scheme+`://`) + path = strings.TrimPrefix(path, req.URL.Host) + if i := strings.LastIndex(path, "?"); i > -1 { + path = path[:i] + } + if i := strings.LastIndex(path, "#"); i > -1 { + path = path[:i] + } + return path + } + return req.URL.Path +} + // cleanPath returns the canonical path for p, eliminating . and .. elements. // Borrowed from the net/http package. func cleanPath(p string) string { @@ -350,6 +417,7 @@ func cleanPath(p string) string { if p[len(p)-1] == '/' && np != "/" { np += "/" } + return np } @@ -365,6 +433,8 @@ func uniqueVars(s1, s2 []string) error { return nil } +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. func checkPairs(pairs ...string) (int, error) { length := len(pairs) if length%2 != 0 { @@ -374,7 +444,8 @@ func checkPairs(pairs ...string) (int, error) { return length, nil } -// mapFromPairs converts variadic string parameters to a string map. +// mapFromPairsToString converts variadic string parameters to a +// string to string map. func mapFromPairsToString(pairs ...string) (map[string]string, error) { length, err := checkPairs(pairs...) if err != nil { @@ -387,6 +458,8 @@ func mapFromPairsToString(pairs ...string) (map[string]string, error) { return m, nil } +// mapFromPairsToRegex converts variadic string paramers to a +// string to regex map. func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { length, err := checkPairs(pairs...) if err != nil { diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go index 06728dd..0189ad3 100644 --- a/vendor/github.com/gorilla/mux/regexp.go +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -24,7 +24,7 @@ import ( // Previously we accepted only Python-like identifiers for variable // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that // name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { +func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) { // Check if it is well-formed. idxs, errBraces := braceIndices(tpl) if errBraces != nil { @@ -73,14 +73,14 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash tpl[idxs[i]:end]) } // Build the regexp pattern. - varIdx := i / 2 - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + // Build the reverse template. fmt.Fprintf(reverse, "%s%%s", raw) // Append variable name and compiled pattern. - varsN[varIdx] = name - varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) if err != nil { return nil, err } @@ -109,16 +109,24 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash if errCompile != nil { return nil, errCompile } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + // Done! return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, + template: template, + matchHost: matchHost, + matchQuery: matchQuery, + strictSlash: strictSlash, + useEncodedPath: useEncodedPath, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, }, nil } @@ -133,6 +141,9 @@ type routeRegexp struct { matchQuery bool // The strictSlash value defined on the route, but disabled if PathPrefix was used. strictSlash bool + // Determines whether to use encoded path from getPath function or unencoded + // req.URL.Path for path matching + useEncodedPath bool // Expanded regexp. regexp *regexp.Regexp // Reverse template. @@ -148,10 +159,14 @@ func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { if !r.matchHost { if r.matchQuery { return r.matchQueryString(req) - } else { - return r.regexp.MatchString(req.URL.Path) } + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } + return r.regexp.MatchString(path) } + return r.regexp.MatchString(getHost(req)) } @@ -181,10 +196,10 @@ func (r *routeRegexp) url(values map[string]string) (string, error) { return rv, nil } -// getUrlQuery returns a single query parameter from a request URL. +// getURLQuery returns a single query parameter from a request URL. // For a URL with foo=bar&baz=ding, we return only the relevant key // value pair for the routeRegexp. -func (r *routeRegexp) getUrlQuery(req *http.Request) string { +func (r *routeRegexp) getURLQuery(req *http.Request) string { if !r.matchQuery { return "" } @@ -198,14 +213,14 @@ func (r *routeRegexp) getUrlQuery(req *http.Request) string { } func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getUrlQuery(req)) + return r.regexp.MatchString(r.getURLQuery(req)) } // braceIndices returns the first level curly brace indices from a string. // It returns an error in case of unbalanced braces. func braceIndices(s string) ([]int, error) { var level, idx int - idxs := make([]int, 0) + var idxs []int for i := 0; i < len(s); i++ { switch s[i] { case '{': @@ -246,33 +261,24 @@ type routeRegexpGroup struct { func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { // Store host variables. if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - subexpNames := v.host.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.host.varsN[varName]] = hostVars[i+1] - varName++ - } - } + host := getHost(req) + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) } } + path := req.URL.Path + if r.useEncodedPath { + path = getPath(req) + } // Store path variables. if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - subexpNames := v.path.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.path.varsN[varName]] = pathVars[i+1] - varName++ - } - } + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) // Check if we should redirect. if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") + p1 := strings.HasSuffix(path, "/") p2 := strings.HasSuffix(v.path.template, "/") if p1 != p2 { u, _ := url.Parse(req.URL.String()) @@ -288,16 +294,10 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) } // Store query string variables. for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) - if queryVars != nil { - subexpNames := q.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[q.varsN[varName]] = queryVars[i+1] - varName++ - } - } + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) } } } @@ -315,3 +315,9 @@ func getHost(r *http.Request) string { return host } + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 8901304..9221915 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -26,6 +26,11 @@ type Route struct { // If true, when the path pattern is "/path/", accessing "/path" will // redirect to the former and vice versa. strictSlash bool + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool // If true, this route never matches: it is only used to build URLs. buildOnly bool // The name used to build URLs. @@ -36,6 +41,10 @@ type Route struct { buildVarsFunc BuildVarsFunc } +func (r *Route) SkipClean() bool { + return r.skipClean +} + // Match matches the route against the request. func (r *Route) Match(req *http.Request, match *RouteMatch) bool { if r.buildOnly || r.err != nil { @@ -144,14 +153,14 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery } r.regexp = r.getRegexpGroup() if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { + if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') { return fmt.Errorf("mux: path must start with a slash, got %q", tpl) } if r.regexp.path != nil { tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl } } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) + rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath) if err != nil { return err } @@ -200,15 +209,7 @@ func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { // "X-Requested-With", "XMLHttpRequest") // // The above route will only match if both request header values match. -// Alternatively, you can provide a regular expression and match the header as follows: -// -// r.Headers("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will the same as the previous example, with the addition of matching -// application/text as well. -// -// It the value is an empty string, it will match any value if the key is set. +// If the value is an empty string, it will match any value if the key is set. func (r *Route) Headers(pairs ...string) *Route { if r.err == nil { var headers map[string]string @@ -225,8 +226,9 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { return matchMapWithRegex(m, r.Header, true) } -// Regular expressions can be used with headers as well. -// It accepts a sequence of key/value pairs, where the value has regex support. For example +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// // r := mux.NewRouter() // r.HeadersRegexp("Content-Type", "application/(text|json)", // "X-Requested-With", "XMLHttpRequest") @@ -271,6 +273,7 @@ func (r *Route) Host(tpl string) *Route { // MatcherFunc is the function signature used by custom matchers. type MatcherFunc func(*http.Request, *RouteMatch) bool +// Match returns the match for a given request. func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { return m(r, match) } @@ -540,6 +543,36 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) { }, nil } +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp == nil || r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + // prepareVars converts the route variable pairs into a map. If the route has a // BuildVarsFunc, it is invoked. func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore index 0026861..ac71020 100644 --- a/vendor/github.com/gorilla/websocket/.gitignore +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -20,3 +20,6 @@ _cgo_export.* _testmain.go *.exe + +.idea/ +*.iml \ No newline at end of file diff --git a/vendor/github.com/gorilla/websocket/.travis.yml b/vendor/github.com/gorilla/websocket/.travis.yml index 8687342..3d8d29c 100644 --- a/vendor/github.com/gorilla/websocket/.travis.yml +++ b/vendor/github.com/gorilla/websocket/.travis.yml @@ -1,6 +1,19 @@ language: go +sudo: false -go: - - 1.1 - - 1.2 - - tip +matrix: + include: + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: 1.8 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md index 9ad75a0..33c3d2b 100644 --- a/vendor/github.com/gorilla/websocket/README.md +++ b/vendor/github.com/gorilla/websocket/README.md @@ -3,10 +3,15 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + ### Documentation * [API Reference](http://godoc.org/github.com/gorilla/websocket) * [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) * [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) ### Status @@ -41,7 +46,7 @@ subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn Send pings and receive pongsYesNo Get the type of a received data messageYesYes, see note 2 Other Features -Limit size of received messageYesNo +Compression ExtensionsExperimentalNo Read message using io.ReaderYesNo, see note 3 Write message using io.WriteCloserYesNo, see note 3 diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go index 93db8dd..1b0e69a 100644 --- a/vendor/github.com/gorilla/websocket/client.go +++ b/vendor/github.com/gorilla/websocket/client.go @@ -5,8 +5,10 @@ package websocket import ( + "bufio" "bytes" "crypto/tls" + "encoding/base64" "errors" "io" "io/ioutil" @@ -21,6 +23,8 @@ import ( // invalid. var ErrBadHandshake = errors.New("websocket: bad handshake") +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + // NewClient creates a new client connection using the given net connection. // The URL u specifies the host and request URI. Use requestHeader to specify // the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies @@ -30,50 +34,17 @@ var ErrBadHandshake = errors.New("websocket: bad handshake") // If the WebSocket handshake fails, ErrBadHandshake is returned along with a // non-nil *http.Response so that callers can handle redirects, authentication, // etc. +// +// Deprecated: Use Dialer instead. func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { - challengeKey, err := generateChallengeKey() - if err != nil { - return nil, nil, err - } - acceptKey := computeAcceptKey(challengeKey) - - c = newConn(netConn, false, readBufSize, writeBufSize) - p := c.writeBuf[:0] - p = append(p, "GET "...) - p = append(p, u.RequestURI()...) - p = append(p, " HTTP/1.1\r\nHost: "...) - p = append(p, u.Host...) - // "Upgrade" is capitalized for servers that do not use case insensitive - // comparisons on header tokens. - p = append(p, "\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Version: 13\r\nSec-WebSocket-Key: "...) - p = append(p, challengeKey...) - p = append(p, "\r\n"...) - for k, vs := range requestHeader { - for _, v := range vs { - p = append(p, k...) - p = append(p, ": "...) - p = append(p, v...) - p = append(p, "\r\n"...) - } - } - p = append(p, "\r\n"...) - - if _, err := netConn.Write(p); err != nil { - return nil, nil, err - } - - resp, err := http.ReadResponse(c.br, &http.Request{Method: "GET", URL: u}) - if err != nil { - return nil, nil, err - } - if resp.StatusCode != 101 || - !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || - !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || - resp.Header.Get("Sec-Websocket-Accept") != acceptKey { - return nil, resp, ErrBadHandshake + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, } - c.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") - return c, resp, nil + return d.Dial(u.String(), requestHeader) } // A Dialer contains options for connecting to WebSocket server. @@ -82,6 +53,12 @@ type Dialer struct { // NetDial is nil, net.Dial is used. NetDial func(network, addr string) (net.Conn, error) + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + // TLSClientConfig specifies the TLS configuration to use with tls.Client. // If nil, the default configuration is used. TLSClientConfig *tls.Config @@ -95,22 +72,30 @@ type Dialer struct { // Subprotocols specifies the client's requested subprotocols. Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar } var errMalformedURL = errors.New("malformed ws or wss URL") -// parseURL parses the URL. The url.Parse function is not used here because -// url.Parse mangles the path. +// parseURL parses the URL. +// +// This function is a replacement for the standard library url.Parse function. +// In Go 1.4 and earlier, url.Parse loses information from the path. func parseURL(s string) (*url.URL, error) { // From the RFC: // // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - // - // We don't use the net/url parser here because the dialer interface does - // not provide a way for applications to work around percent deocding in - // the net/url parser. - var u url.URL switch { case strings.HasPrefix(s, "ws://"): @@ -123,15 +108,23 @@ func parseURL(s string) (*url.URL, error) { return nil, errMalformedURL } - u.Host = s - u.Opaque = "/" + if i := strings.Index(s, "?"); i >= 0 { + u.RawQuery = s[i+1:] + s = s[:i] + } + if i := strings.Index(s, "/"); i >= 0 { - u.Host = s[:i] u.Opaque = s[i:] + s = s[:i] + } else { + u.Opaque = "/" } + u.Host = s + if strings.Contains(u.Host, "@") { - // WebSocket URIs do not contain user information. + // Don't bother parsing user information because user information is + // not allowed in websocket URIs. return nil, errMalformedURL } @@ -144,9 +137,12 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { hostNoPort = hostNoPort[:i] } else { - if u.Scheme == "wss" { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": hostPort += ":443" - } else { + default: hostPort += ":80" } } @@ -154,7 +150,9 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { } // DefaultDialer is a dialer with all fields set to the default zero values. -var DefaultDialer *Dialer +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, +} // Dial creates a new client connection. Use requestHeader to specify the // origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). @@ -166,15 +164,103 @@ var DefaultDialer *Dialer // etcetera. The response body may not contain the entire response and does not // need to be closed by the application. func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &Dialer{ + Proxy: http.ProxyFromEnvironment, + } + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + u, err := parseURL(urlStr) if err != nil { return nil, nil, err } + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + } + hostPort, hostNoPort := hostPortNoPort(u) - if d == nil { - d = &Dialer{} + var proxyURL *url.URL + // Check wether the proxy method has been configured + if d.Proxy != nil { + proxyURL, err = d.Proxy(req) + } + if err != nil { + return nil, nil, err + } + + var targetHostPort string + if proxyURL != nil { + targetHostPort, _ = hostPortNoPort(proxyURL) + } else { + targetHostPort = hostPort } var deadline time.Time @@ -188,7 +274,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re netDial = netDialer.Dial } - netConn, err := netDial("tcp", hostPort) + netConn, err := netDial("tcp", targetHostPort) if err != nil { return nil, nil, err } @@ -203,13 +289,41 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re return nil, nil, err } - if u.Scheme == "wss" { - cfg := d.TLSClientConfig - if cfg == nil { - cfg = &tls.Config{ServerName: hostNoPort} - } else if cfg.ServerName == "" { - shallowCopy := *cfg - cfg = &shallowCopy + if proxyURL != nil { + connectHeader := make(http.Header) + if user := proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: hostPort}, + Host: hostPort, + Header: connectHeader, + } + + connectReq.Write(netConn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(netConn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + return nil, nil, errors.New(f[1]) + } + } + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { cfg.ServerName = hostNoPort } tlsConn := tls.Client(netConn, cfg) @@ -224,45 +338,53 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re } } - if len(d.Subprotocols) > 0 { - h := http.Header{} - for k, v := range requestHeader { - h[k] = v - } - h.Set("Sec-Websocket-Protocol", strings.Join(d.Subprotocols, ", ")) - requestHeader = h + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err } - if len(requestHeader["Host"]) > 0 { - // This can be used to supply a Host: header which is different from - // the dial address. - u.Host = requestHeader.Get("Host") + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } - // Drop "Host" header - h := http.Header{} - for k, v := range requestHeader { - if k == "Host" { - continue - } - h[k] = v + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) } - requestHeader = h } - conn, resp, err := NewClient(netConn, u, requestHeader, d.ReadBufferSize, d.WriteBufferSize) + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } - if err != nil { - if err == ErrBadHandshake { - // Before closing the network connection on return from this - // function, slurp up some of the response to aid application - // debugging. - buf := make([]byte, 1024) - n, _ := io.ReadFull(resp.Body, buf) - resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + for _, ext := range parseExtensions(req.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression } - return nil, resp, err + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break } + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + netConn.SetDeadline(time.Time{}) netConn = nil // to avoid close in defer. return conn, resp, nil diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 0000000..4f0d943 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 0000000..babb007 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 0000000..813ffb1 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go index a2374a8..4c0933b 100644 --- a/vendor/github.com/gorilla/websocket/conn.go +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -13,15 +13,25 @@ import ( "math/rand" "net" "strconv" + "sync" "time" + "unicode/utf8" ) const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask maxControlFramePayloadSize = 125 - finalBit = 1 << 7 - maskBit = 1 << 7 - writeWait = time.Second + + writeWait = time.Second defaultReadBufferSize = 4096 defaultWriteBufferSize = 4096 @@ -43,6 +53,8 @@ const ( CloseMessageTooBig = 1009 CloseMandatoryExtension = 1010 CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 CloseTLSHandshake = 1015 ) @@ -99,17 +111,81 @@ type CloseError struct { } func (e *CloseError) Error() string { - return "websocket: close " + strconv.Itoa(e.Code) + " " + e.Text + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false } var ( - errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true} + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} errBadWriteOpCode = errors.New("websocket: bad write message type") errWriteClosed = errors.New("websocket: write closed") errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + func hideTempErr(err error) error { if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} @@ -125,49 +201,67 @@ func isData(frameType int) bool { return frameType == TextMessage || frameType == BinaryMessage } -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, } -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) } -// Conn represents a WebSocket connection. +// The Conn type represents a WebSocket connection. type Conn struct { conn net.Conn isServer bool subprotocol string // Write fields - mu chan bool // used as mutex to protect write to conn and closeSent - closeSent bool // true if close message was sent + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error - // Message writer fields. - writeErr error - writeBuf []byte // frame is constructed in this buffer. - writePos int // end of data in writeBuf. - writeFrameType int // type of the current frame. - writeSeq int // incremented to invalidate message writers. - writeDeadline time.Time + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser // Read fields + reader io.ReadCloser // the current reader returned to the application readErr error br *bufio.Reader readRemaining int64 // bytes remaining in current frame. readFinal bool // true the current message has more frames. - readSeq int // incremented to invalidate message readers. readLength int64 // Message size. readLimit int64 // Maximum message size. readMaskPos int readMaskKey [4]byte handlePong func(string) error handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser } func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { @@ -177,20 +271,24 @@ func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) if readBufferSize == 0 { readBufferSize = defaultReadBufferSize } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } if writeBufferSize == 0 { writeBufferSize = defaultWriteBufferSize } c := &Conn{ - isServer: isServer, - br: bufio.NewReaderSize(conn, readBufferSize), - conn: conn, - mu: mu, - readFinal: true, - writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), - writeFrameType: noFrame, - writePos: maxFrameHeaderSize, + isServer: isServer, + br: bufio.NewReaderSize(conn, readBufferSize), + conn: conn, + mu: mu, + readFinal: true, + writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, } + c.SetCloseHandler(nil) c.SetPingHandler(nil) c.SetPongHandler(nil) return c @@ -218,29 +316,40 @@ func (c *Conn) RemoteAddr() net.Addr { // Write methods +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { <-c.mu defer func() { c.mu <- true }() - if c.closeSent { - return ErrCloseSent - } else if frameType == CloseMessage { - c.closeSent = true + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err } c.conn.SetWriteDeadline(deadline) for _, buf := range bufs { if len(buf) > 0 { - n, err := c.conn.Write(buf) - if n != len(buf) { - // Close on partial write. - c.conn.Close() - } + _, err := c.conn.Write(buf) if err != nil { - return err + return c.writeFatal(err) } } } + + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } return nil } @@ -289,63 +398,104 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er } defer func() { c.mu <- true }() - if c.closeSent { - return ErrCloseSent - } else if messageType == CloseMessage { - c.closeSent = true + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err } c.conn.SetWriteDeadline(deadline) - n, err := c.conn.Write(buf) - if n != 0 && n != len(buf) { - c.conn.Close() + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() return err } -// NextWriter returns a writer for the next message to send. The writer's -// Close method flushes the complete message to the network. +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. // // There can be at most one open writer on a connection. NextWriter closes the // previous writer if the application has not already done so. -// -// The NextWriter method and the writers returned from the method cannot be -// accessed by more than one goroutine at a time. func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if c.writeErr != nil { - return nil, c.writeErr + if err := c.prepWrite(messageType); err != nil { + return nil, err } - if c.writeFrameType != noFrame { - if err := c.flushFrame(true, nil); err != nil { - return nil, err - } + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, } - - if !isControl(messageType) && !isData(messageType) { - return nil, errBadWriteOpCode + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w } + return c.writer, nil +} - c.writeFrameType = messageType - return messageWriter{c, c.writeSeq}, nil +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error } -func (c *Conn) flushFrame(final bool, extra []byte) error { - length := c.writePos - maxFrameHeaderSize + len(extra) +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) // Check for invalid control frames. - if isControl(c.writeFrameType) && + if isControl(w.frameType) && (!final || length > maxControlFramePayloadSize) { - c.writeSeq++ - c.writeFrameType = noFrame - c.writePos = maxFrameHeaderSize - return errInvalidControlFrame + return w.fatal(errInvalidControlFrame) } - b0 := byte(c.writeFrameType) + b0 := byte(w.frameType) if final { b0 |= finalBit } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + b1 := byte(0) if !c.isServer { b1 |= maskBit @@ -377,49 +527,50 @@ func (c *Conn) flushFrame(final bool, extra []byte) error { if !c.isServer { key := newMaskKey() copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) if len(extra) > 0 { - c.writeErr = errors.New("websocket: internal error, extra used in client mode") - return c.writeErr + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) } } - // Write the buffers to the connection. - c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. - // Setup for next frame. - c.writePos = maxFrameHeaderSize - c.writeFrameType = continuationFrame - if final { - c.writeSeq++ - c.writeFrameType = noFrame + if c.isWriting { + panic("concurrent write to websocket connection") } - return c.writeErr -} + c.isWriting = true -type messageWriter struct { - c *Conn - seq int -} + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) -func (w messageWriter) err() error { - c := w.c - if c.writeSeq != w.seq { - return errWriteClosed + if !c.isWriting { + panic("concurrent write to websocket connection") } - if c.writeErr != nil { - return c.writeErr + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + return nil } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame return nil } -func (w messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.c.writePos +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos if n <= 0 { - if err := w.c.flushFrame(false, nil); err != nil { + if err := w.flushFrame(false, nil); err != nil { return 0, err } - n = len(w.c.writeBuf) - w.c.writePos + n = len(w.c.writeBuf) - w.pos } if n > max { n = max @@ -427,14 +578,14 @@ func (w messageWriter) ncopy(max int) (int, error) { return n, nil } -func (w messageWriter) write(final bool, p []byte) (int, error) { - if err := w.err(); err != nil { - return 0, err +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err } if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { // Don't buffer large messages. - err := w.c.flushFrame(final, p) + err := w.flushFrame(false, p) if err != nil { return 0, err } @@ -447,20 +598,16 @@ func (w messageWriter) write(final bool, p []byte) (int, error) { if err != nil { return 0, err } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n p = p[n:] } return nn, nil } -func (w messageWriter) Write(p []byte) (int, error) { - return w.write(false, p) -} - -func (w messageWriter) WriteString(p string) (int, error) { - if err := w.err(); err != nil { - return 0, err +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err } nn := len(p) @@ -469,27 +616,27 @@ func (w messageWriter) WriteString(p string) (int, error) { if err != nil { return 0, err } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n p = p[n:] } return nn, nil } -func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if err := w.err(); err != nil { - return 0, err +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err } for { - if w.c.writePos == len(w.c.writeBuf) { - err = w.c.flushFrame(false, nil) + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) if err != nil { break } } var n int - n, err = r.Read(w.c.writeBuf[w.c.writePos:]) - w.c.writePos += n + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n nn += int64(n) if err != nil { if err == io.EOF { @@ -501,30 +648,64 @@ func (w messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { return nn, err } -func (w messageWriter) Close() error { - if err := w.err(); err != nil { +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { return err } - return w.c.flushFrame(true, nil) + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err } // WriteMessage is a helper method for getting a writer using NextWriter, // writing the message and closing the writer. func (c *Conn) WriteMessage(messageType int, data []byte) error { - wr, err := c.NextWriter(messageType) + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) if err != nil { return err } - w := wr.(messageWriter) - if _, err := w.write(true, data); err != nil { + if _, err = w.Write(data); err != nil { return err } - if c.writeSeq == w.seq { - if err := c.flushFrame(true, nil); err != nil { - return err - } - } - return nil + return w.Close() } // SetWriteDeadline sets the write deadline on the underlying network @@ -538,22 +719,6 @@ func (c *Conn) SetWriteDeadline(t time.Time) error { // Read methods -// readFull is like io.ReadFull except that io.EOF is never returned. -func (c *Conn) readFull(p []byte) (err error) { - var n int - for n < len(p) && err == nil { - var nn int - nn, err = c.br.Read(p[n:]) - n += nn - } - if n == len(p) { - err = nil - } else if err == io.EOF { - err = errUnexpectedEOF - } - return -} - func (c *Conn) advanceFrame() (int, error) { // 1. Skip remainder of previous frame. @@ -566,19 +731,24 @@ func (c *Conn) advanceFrame() (int, error) { // 2. Read and parse first two bytes of frame header. - var b [8]byte - if err := c.readFull(b[:2]); err != nil { + p, err := c.read(2) + if err != nil { return noFrame, err } - final := b[0]&finalBit != 0 - frameType := int(b[0] & 0xf) - reserved := int((b[0] >> 4) & 0x7) - mask := b[1]&maskBit != 0 - c.readRemaining = int64(b[1] & 0x7f) + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) - if reserved != 0 { - return noFrame, c.handleProtocolError("unexpected reserved bits " + strconv.Itoa(reserved)) + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) } switch frameType { @@ -607,15 +777,17 @@ func (c *Conn) advanceFrame() (int, error) { switch c.readRemaining { case 126: - if err := c.readFull(b[:2]); err != nil { + p, err := c.read(2) + if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint16(b[:2])) + c.readRemaining = int64(binary.BigEndian.Uint16(p)) case 127: - if err := c.readFull(b[:8]); err != nil { + p, err := c.read(8) + if err != nil { return noFrame, err } - c.readRemaining = int64(binary.BigEndian.Uint64(b[:8])) + c.readRemaining = int64(binary.BigEndian.Uint64(p)) } // 4. Handle frame masking. @@ -626,9 +798,11 @@ func (c *Conn) advanceFrame() (int, error) { if mask { c.readMaskPos = 0 - if err := c.readFull(c.readMaskKey[:]); err != nil { + p, err := c.read(len(c.readMaskKey)) + if err != nil { return noFrame, err } + copy(c.readMaskKey[:], p) } // 5. For text and binary messages, enforce read limit and return. @@ -648,9 +822,9 @@ func (c *Conn) advanceFrame() (int, error) { var payload []byte if c.readRemaining > 0 { - payload = make([]byte, c.readRemaining) + payload, err = c.read(int(c.readRemaining)) c.readRemaining = 0 - if err := c.readFull(payload); err != nil { + if err != nil { return noFrame, err } if c.isServer { @@ -670,12 +844,20 @@ func (c *Conn) advanceFrame() (int, error) { return noFrame, err } case CloseMessage: - c.WriteControl(CloseMessage, []byte{}, time.Now().Add(writeWait)) closeCode := CloseNoStatusReceived closeText := "" if len(payload) >= 2 { closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err } return noFrame, &CloseError{Code: closeCode, Text: closeText} } @@ -694,11 +876,18 @@ func (c *Conn) handleProtocolError(message string) error { // There can be at most one open reader on a connection. NextReader discards // the previous message if the application has not already consumed it. // -// The NextReader method and the readers returned from the method cannot be -// accessed by more than one goroutine at a time. +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } - c.readSeq++ + c.messageReader = nil c.readLength = 0 for c.readErr == nil { @@ -708,59 +897,77 @@ func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { break } if frameType == TextMessage || frameType == BinaryMessage { - return frameType, messageReader{c, c.readSeq}, nil + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil } } - return noFrame, nil, c.readErr -} -type messageReader struct { - c *Conn - seq int + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr } -func (r messageReader) Read(b []byte) (int, error) { +type messageReader struct{ c *Conn } - if r.seq != r.c.readSeq { +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { return 0, io.EOF } - for r.c.readErr == nil { + for c.readErr == nil { - if r.c.readRemaining > 0 { - if int64(len(b)) > r.c.readRemaining { - b = b[:r.c.readRemaining] + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) } - n, err := r.c.br.Read(b) - r.c.readErr = hideTempErr(err) - if r.c.isServer { - r.c.readMaskPos = maskBytes(r.c.readMaskKey, r.c.readMaskPos, b[:n]) + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF } - r.c.readRemaining -= int64(n) - return n, r.c.readErr + return n, c.readErr } - if r.c.readFinal { - r.c.readSeq++ + if c.readFinal { + c.messageReader = nil return 0, io.EOF } - frameType, err := r.c.advanceFrame() + frameType, err := c.advanceFrame() switch { case err != nil: - r.c.readErr = hideTempErr(err) + c.readErr = hideTempErr(err) case frameType == TextMessage || frameType == BinaryMessage: - r.c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") } } - err := r.c.readErr - if err == io.EOF && r.seq == r.c.readSeq { + err := c.readErr + if err == io.EOF && c.messageReader == r { err = errUnexpectedEOF } return 0, err } +func (r *messageReader) Close() error { + return nil +} + // ReadMessage is a helper method for getting a reader using NextReader and // reading from that reader to a buffer. func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { @@ -788,21 +995,76 @@ func (c *Conn) SetReadLimit(limit int64) { c.readLimit = limit } +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close frame +// back to the peer. +// +// The application must read the connection to process close messages as +// described in the section on Control Frames above. +// +// The connection read methods return a CloseError when a close frame is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close frame back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := []byte{} + if code != CloseNoStatusReceived { + message = FormatCloseMessage(code, "") + } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + // SetPingHandler sets the handler for ping messages received from the peer. -// The default ping handler sends a pong to the peer. -func (c *Conn) SetPingHandler(h func(string) error) { +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { - c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) - return nil + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err } } c.handlePing = h } +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + // SetPongHandler sets the handler for pong messages received from the peer. -// The default pong handler does nothing. -func (c *Conn) SetPongHandler(h func(string) error) { +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPongHandler(h func(appData string) error) { if h == nil { h = func(string) error { return nil } } @@ -815,6 +1077,25 @@ func (c *Conn) UnderlyingConn() net.Conn { return c.conn } +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + // FormatCloseMessage formats closeCode and text as a WebSocket close message. func FormatCloseMessage(closeCode int, text string) []byte { buf := make([]byte, 2+len(text)) diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go new file mode 100644 index 0000000..1ea1505 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go new file mode 100644 index 0000000..018541c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read_legacy.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if len(p) > 0 { + // advance over the bytes just read + io.ReadFull(c.br, p) + } + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go index f52925d..e291a95 100644 --- a/vendor/github.com/gorilla/websocket/doc.go +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -46,8 +46,7 @@ // method to get an io.WriteCloser, write the message to the writer and close // the writer when done. To receive a message, call the connection NextReader // method to get an io.Reader and read until io.EOF is returned. This snippet -// snippet shows how to echo messages using the NextWriter and NextReader -// methods: +// shows how to echo messages using the NextWriter and NextReader methods: // // for { // messageType, r, err := conn.NextReader() @@ -86,31 +85,23 @@ // and pong. Call the connection WriteControl, WriteMessage or NextWriter // methods to send a control message to the peer. // -// Connections handle received ping and pong messages by invoking a callback -// function set with SetPingHandler and SetPongHandler methods. These callback -// functions can be invoked from the ReadMessage method, the NextReader method -// or from a call to the data message reader returned from NextReader. +// Connections handle received close messages by sending a close message to the +// peer and returning a *CloseError from the the NextReader, ReadMessage or the +// message Read method. // -// Connections handle received close messages by returning an error from the -// ReadMessage method, the NextReader method or from a call to the data message -// reader returned from NextReader. -// -// Concurrency -// -// Connections do not support concurrent calls to the write methods -// (NextWriter, SetWriteDeadline, WriteMessage) or concurrent calls to the read -// methods methods (NextReader, SetReadDeadline, ReadMessage). Connections do -// support a concurrent reader and writer. -// -// The Close and WriteControl methods can be called concurrently with all other +// Connections handle received ping and pong messages by invoking callback +// functions set with SetPingHandler and SetPongHandler methods. The callback +// functions are called from the NextReader, ReadMessage and the message Read // methods. // -// Read is Required +// The default ping handler sends a pong to the peer. The application's reading +// goroutine can block for a short time while the handler writes the pong data +// to the connection. // -// The application must read the connection to process ping and close messages -// sent from the peer. If the application is not otherwise interested in -// messages from the peer, then the application should start a goroutine to read -// and discard messages from the peer. A simple example is: +// The application must read the connection to process ping, pong and close +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: // // func readLoop(c *websocket.Conn) { // for { @@ -121,6 +112,20 @@ // } // } // +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// // Origin Considerations // // Web browsers allow Javascript applications to open a WebSocket connection to @@ -138,11 +143,38 @@ // An application can allow connections from any origin by specifying a // function that always returns true: // -// var upgrader = websocket.Upgrader{ +// var upgrader = websocket.Upgrader{ // CheckOrigin: func(r *http.Request) bool { return true }, -// } +// } // // The deprecated Upgrade function does not enforce an origin policy. It's the // application's responsibility to check the Origin header before calling // Upgrade. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. package websocket diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 0000000..6a88bbc --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 0000000..2aac060 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 0000000..1efffbd --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go index e56a004..6f6ac83 100644 --- a/vendor/github.com/gorilla/websocket/server.go +++ b/vendor/github.com/gorilla/websocket/server.go @@ -46,6 +46,12 @@ type Upgrader struct { // CheckOrigin is nil, the host in the Origin header must not be set or // must match the host of the request. CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool } func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { @@ -53,6 +59,7 @@ func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status in if u.Error != nil { u.Error(w, r, status, err) } else { + w.Header().Set("Sec-Websocket-Version", "13") http.Error(w, http.StatusText(status), status) } return nil, err @@ -92,17 +99,28 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header // The responseHeader is included in the response to the client's upgrade // request. Use the responseHeader to specify cookies (Set-Cookie) and the // application negotiated subprotocol (Sec-Websocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { - if values := r.Header["Sec-Websocket-Version"]; len(values) == 0 || values[0] != "13" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") } if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") } if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") } checkOrigin := u.CheckOrigin @@ -110,16 +128,28 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade checkOrigin = checkSameOrigin } if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") } challengeKey := r.Header.Get("Sec-Websocket-Key") if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") } subprotocol := u.selectSubprotocol(r, responseHeader) + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + var ( netConn net.Conn br *bufio.Reader @@ -145,6 +175,11 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) c.subprotocol = subprotocol + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + p := c.writeBuf[:0] p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) p = append(p, computeAcceptKey(challengeKey)...) @@ -154,6 +189,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, c.subprotocol...) p = append(p, "\r\n"...) } + if compress { + p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } for k, vs := range responseHeader { if k == "Sec-Websocket-Protocol" { continue @@ -245,3 +283,10 @@ func Subprotocols(r *http.Request) []string { } return protocols } + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go index ffdc265..9a4908d 100644 --- a/vendor/github.com/gorilla/websocket/util.go +++ b/vendor/github.com/gorilla/websocket/util.go @@ -13,19 +13,6 @@ import ( "strings" ) -// tokenListContainsValue returns true if the 1#token header with the given -// name contains token. -func tokenListContainsValue(header http.Header, name string, value string) bool { - for _, v := range header[name] { - for _, s := range strings.Split(v, ",") { - if strings.EqualFold(value, strings.TrimSpace(s)) { - return true - } - } - } - return false -} - var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") func computeAcceptKey(challengeKey string) string { @@ -42,3 +29,186 @@ func generateChallengeKey() (string, error) { } return base64.StdEncoding.EncodeToString(p), nil } + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j += 1 + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j += 1 + } + } + return "", "" + } + } + return "", "" +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if strings.EqualFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensiosn parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/shurcooL/httpfs/vfsutil/file.go b/vendor/github.com/shurcooL/httpfs/vfsutil/file.go new file mode 100644 index 0000000..4cb0dad --- /dev/null +++ b/vendor/github.com/shurcooL/httpfs/vfsutil/file.go @@ -0,0 +1,21 @@ +package vfsutil + +import ( + "net/http" + "os" +) + +// File implements http.FileSystem using the native file system restricted to a +// specific file served at root. +// +// While the FileSystem.Open method takes '/'-separated paths, a File's string +// value is a filename on the native file system, not a URL, so it is separated +// by filepath.Separator, which isn't necessarily '/'. +type File string + +func (f File) Open(name string) (http.File, error) { + if name != "/" { + return nil, &os.PathError{Op: "open", Path: name, Err: os.ErrNotExist} + } + return os.Open(string(f)) +} diff --git a/vendor/github.com/shurcooL/httpfs/vfsutil/vfsutil.go b/vendor/github.com/shurcooL/httpfs/vfsutil/vfsutil.go new file mode 100644 index 0000000..df071d1 --- /dev/null +++ b/vendor/github.com/shurcooL/httpfs/vfsutil/vfsutil.go @@ -0,0 +1,39 @@ +// Package vfsutil implements some I/O utility functions for http.FileSystem. +package vfsutil + +import ( + "io/ioutil" + "net/http" + "os" +) + +// ReadDir reads the contents of the directory associated with file and +// returns a slice of FileInfo values in directory order. +func ReadDir(fs http.FileSystem, name string) ([]os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + defer f.Close() + return f.Readdir(0) +} + +// Stat returns the FileInfo structure describing file. +func Stat(fs http.FileSystem, name string) (os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + defer f.Close() + return f.Stat() +} + +// ReadFile reads the file named by path from fs and returns the contents. +func ReadFile(fs http.FileSystem, path string) ([]byte, error) { + rc, err := fs.Open(path) + if err != nil { + return nil, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} diff --git a/vendor/github.com/shurcooL/httpfs/vfsutil/walk.go b/vendor/github.com/shurcooL/httpfs/vfsutil/walk.go new file mode 100644 index 0000000..a773283 --- /dev/null +++ b/vendor/github.com/shurcooL/httpfs/vfsutil/walk.go @@ -0,0 +1,148 @@ +package vfsutil + +import ( + "io" + "net/http" + "os" + pathpkg "path" + "path/filepath" + "sort" + + "golang.org/x/tools/godoc/vfs" +) + +// Walk walks the filesystem rooted at root, calling walkFn for each file or +// directory in the filesystem, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order. +func Walk(fs http.FileSystem, root string, walkFn filepath.WalkFunc) error { + info, err := Stat(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +func readDirNames(fs http.FileSystem, dirname string) ([]string, error) { + fis, err := ReadDir(fs, dirname) + if err != nil { + return nil, err + } + names := make([]string, len(fis)) + for i := range fis { + names[i] = fis[i].Name() + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn. +func walk(fs http.FileSystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := pathpkg.Join(path, name) + fileInfo, err := Stat(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// WalkFilesFunc is the type of the function called for each file or directory visited by WalkFiles. +// It's like filepath.WalkFunc, except it provides an additional ReadSeeker parameter for file being visited. +type WalkFilesFunc func(path string, info os.FileInfo, rs io.ReadSeeker, err error) error + +// WalkFiles walks the filesystem rooted at root, calling walkFn for each file or +// directory in the filesystem, including root. In addition to FileInfo, it passes an +// ReadSeeker to walkFn for each file it visits. +func WalkFiles(fs http.FileSystem, root string, walkFn WalkFilesFunc) error { + file, info, err := openStat(fs, root) + if err != nil { + return walkFn(root, nil, nil, err) + } + return walkFiles(fs, root, info, file, walkFn) +} + +// walkFiles recursively descends path, calling walkFn. +// It closes the input file after it's done with it, so the caller shouldn't. +func walkFiles(fs http.FileSystem, path string, info os.FileInfo, file vfs.ReadSeekCloser, walkFn WalkFilesFunc) error { + err := walkFn(path, info, file, nil) + file.Close() + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, nil, err) + } + + for _, name := range names { + filename := pathpkg.Join(path, name) + file, fileInfo, err := openStat(fs, filename) + if err != nil { + if err := walkFn(filename, nil, nil, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFiles(fs, filename, fileInfo, file, walkFn) + // file is closed by walkFiles, so we don't need to close it here. + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// openStat performs Open and Stat and returns results, or first error encountered. +// The caller is responsible for closing the returned file when done. +func openStat(fs http.FileSystem, name string) (http.File, os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, nil, err + } + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, nil, err + } + return f, fi, nil +} diff --git a/vendor/github.com/shurcooL/vfsgen/.travis.yml b/vendor/github.com/shurcooL/vfsgen/.travis.yml new file mode 100644 index 0000000..f51cf6a --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.7 + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/vfsgen/CONTRIBUTING.md b/vendor/github.com/shurcooL/vfsgen/CONTRIBUTING.md new file mode 100644 index 0000000..6127ddc --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/CONTRIBUTING.md @@ -0,0 +1,10 @@ +Contributing +============ + +vfsgen is open source, thanks for considering contributing! + +Please note that vfsgen aims to be simple and minimalistic, with as little to configure as possible. If you'd like to remove or simplify code (while having tests continue to pass), fix bugs, or improve code (e.g., add missing error checking, etc.), PRs and issues are welcome. + +However, if you'd like to add new functionality that increases complexity or scope, please make an issue and discuss your proposal first. I'm unlikely to accept such changes outright. It might be that your request is already a part of other similar packages, or it might fit in their scope better. See [Comparison and Alternatives](https://github.com/shurcooL/vfsgen/tree/README-alternatives-and-comparison-section#comparison) sections. + +Thank you! diff --git a/vendor/github.com/shurcooL/vfsgen/README.md b/vendor/github.com/shurcooL/vfsgen/README.md new file mode 100644 index 0000000..78347d9 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/README.md @@ -0,0 +1,171 @@ +vfsgen +====== + +[![Build Status](https://travis-ci.org/shurcooL/vfsgen.svg?branch=master)](https://travis-ci.org/shurcooL/vfsgen) [![GoDoc](https://godoc.org/github.com/shurcooL/vfsgen?status.svg)](https://godoc.org/github.com/shurcooL/vfsgen) + +Package vfsgen takes an http.FileSystem (likely at `go generate` time) and +generates Go code that statically implements the provided http.FileSystem. + +Features: + +- Efficient generated code without unneccessary overhead. + +- Uses gzip compression internally (selectively, only for files that compress well). + +- Enables direct access to internal gzip compressed bytes via an optional interface. + +- Outputs `gofmt`ed Go code. + +Installation +------------ + +```bash +go get -u github.com/shurcooL/vfsgen +``` + +Usage +----- + +This code will generate an assets_vfsdata.go file with `var assets http.FileSystem = ...` that statically implements the contents of "assets" directory. + +```Go +var fs http.FileSystem = http.Dir("assets") + +err := vfsgen.Generate(fs, vfsgen.Options{}) +if err != nil { + log.Fatalln(err) +} +``` + +Then, in your program, you can use `assets` as any other [`http.FileSystem`](https://godoc.org/net/http#FileSystem), for example: + +```Go +file, err := assets.Open("/some/file.txt") +if err != nil { ... } +defer file.Close() +``` + +```Go +http.Handle("/assets/", http.FileServer(assets)) +``` + +### `go generate` Usage + +vfsgen is great to use with go generate directives. The code invoking `vfsgen.Generate` can go in an assets_generate.go file, which can then be invoked via "//go:generate go run assets_generate.go". The input virtual filesystem can read directly from disk, or it can be more involved. + +By using build tags, you can create a development mode where assets are loaded directly from disk via `http.Dir`, but then statically implemented for final releases. + +For example, suppose your source filesystem is defined in a package with import path "example.com/project/data" as: + +```Go +// +build dev + +package data + +// Assets contains project assets. +var Assets http.FileSystem = http.Dir("assets") +``` + +When built with the "dev" build tag, accessing `data.Assets` will read from disk directly via `http.Dir`. + +A generate helper file assets_generate.go can be invoked via "//go:generate go run -tags=dev assets_generate.go" directive: + +```Go +// +build ignore + +package main + +import ( + "log" + + "example.com/project/data" + "github.com/shurcooL/vfsgen" +) + +func main() { + err := vfsgen.Generate(data.Assets, vfsgen.Options{ + PackageName: "data", + BuildTags: "!dev", + VariableName: "Assets", + }) + if err != nil { + log.Fatalln(err) + } +} +``` + +Note that "dev" build tag is used to access the source filesystem, and the output file will contain "!dev" build tag. That way, the statically implemented version will be used during normal builds and `go get`, when custom builds tags are not specified. + +### `vfsgendev` Usage + +`vfsgendev` is a binary that can be used to replace the need for the assets_generate.go file. + +Make sure it's installed and available in your PATH. + +```bash +go get -u github.com/shurcooL/vfsgen/cmd/vfsgendev +``` + +Then the "//go:generate go run -tags=dev assets_generate.go" directive can be replaced with: + +``` +//go:generate vfsgendev -source="example.com/project/data".Assets +``` + +vfsgendev accesses the source variable using "dev" build tag, and generates an output file with "!dev" build tag. + +### Additional Embedded Information + +All compressed files implement [`httpgzip.GzipByter` interface](https://godoc.org/github.com/shurcooL/httpgzip#GzipByter) for efficient direct access to the internal compressed bytes: + +```Go +// GzipByter is implemented by compressed files for +// efficient direct access to the internal compressed bytes. +type GzipByter interface { + // GzipBytes returns gzip compressed contents of the file. + GzipBytes() []byte +} +``` + +Files that have been determined to not be worth gzip compressing (their compressed size is larger than original) implement [`httpgzip.NotWorthGzipCompressing` interface](https://godoc.org/github.com/shurcooL/httpgzip#NotWorthGzipCompressing): + +```Go +// NotWorthGzipCompressing is implemented by files that were determined +// not to be worth gzip compressing (the file size did not decrease as a result). +type NotWorthGzipCompressing interface { + // NotWorthGzipCompressing is a noop. It's implemented in order to indicate + // the file is not worth gzip compressing. + NotWorthGzipCompressing() +} +``` + +Comparison +---------- + +vfsgen aims to be conceptually simple to use. The [`http.FileSystem`](https://godoc.org/net/http#FileSystem) abstraction is central to vfsgen. It's used as both input for code generation, and as output in the generated code. + +That enables great flexibility through orthogonality, since helpers and wrappers can operate on `http.FileSystem` without knowing about vfsgen. If you want, you can perform pre-processing, minifying assets, merging folders, filtering out files and otherwise modifying input via generic `http.FileSystem` middleware. + +It avoids unneccessary overhead by merging what was previously done with two distinct packages into a single package. + +It strives to be the best in its class in terms of code quality and efficiency of generated code. However, if your use goals are different, there are other similar packages that may fit your needs better. + +### Alternatives + +- [`go-bindata`](https://github.com/jteeuwen/go-bindata) - Reads from disk, generates Go code that provides access to data via a [custom API](https://github.com/jteeuwen/go-bindata#accessing-an-asset). +- [`go-bindata-assetfs`](https://github.com/elazarl/go-bindata-assetfs) - Takes output of go-bindata and provides a wrapper that implements `http.FileSystem` interface (the same as what vfsgen outputs directly). +- [`becky`](https://github.com/tv42/becky) - Embeds assets as string literals in Go source. +- [`statik`](https://github.com/rakyll/statik) - Embeds a directory of static files to be accessed via `http.FileSystem` interface (sounds very similar to vfsgen); implementation sourced from [camlistore](https://camlistore.org). +- [`go.rice`](https://github.com/GeertJohan/go.rice) - Makes working with resources such as HTML, JS, CSS, images and templates very easy. +- [`esc`](https://github.com/mjibson/esc) - Embeds files into Go programs and provides `http.FileSystem` interfaces to them. +- [`staticfiles`](https://github.com/bouk/staticfiles) - Allows you to embed a directory of files into your Go binary. + +Attribution +----------- + +This package was originally based on the excellent work by [@jteeuwen](https://github.com/jteeuwen) on [`go-bindata`](https://github.com/jteeuwen/go-bindata) and [@elazarl](https://github.com/elazarl) on [`go-bindata-assetfs`](https://github.com/elazarl/go-bindata-assetfs). + +License +------- + +- [MIT License](https://opensource.org/licenses/mit-license.php) diff --git a/vendor/github.com/shurcooL/vfsgen/commentwriter.go b/vendor/github.com/shurcooL/vfsgen/commentwriter.go new file mode 100644 index 0000000..b6847f5 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/commentwriter.go @@ -0,0 +1,45 @@ +package vfsgen + +import "io" + +// commentWriter writes a Go comment to the underlying io.Writer, +// using line comment form (//). +type commentWriter struct { + W io.Writer + wroteSlashes bool // Wrote "//" at the beginning of the current line. +} + +func (c *commentWriter) Write(p []byte) (int, error) { + var n int + for i, b := range p { + if !c.wroteSlashes { + s := "//" + if b != '\n' { + s = "// " + } + if _, err := io.WriteString(c.W, s); err != nil { + return n, err + } + c.wroteSlashes = true + } + n0, err := c.W.Write(p[i : i+1]) + n += n0 + if err != nil { + return n, err + } + if b == '\n' { + c.wroteSlashes = false + } + } + return len(p), nil +} + +func (c *commentWriter) Close() error { + if !c.wroteSlashes { + if _, err := io.WriteString(c.W, "//"); err != nil { + return err + } + c.wroteSlashes = true + } + return nil +} diff --git a/vendor/github.com/shurcooL/vfsgen/doc.go b/vendor/github.com/shurcooL/vfsgen/doc.go new file mode 100644 index 0000000..46f2850 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/doc.go @@ -0,0 +1,15 @@ +/* +Package vfsgen takes an http.FileSystem (likely at `go generate` time) and +generates Go code that statically implements the provided http.FileSystem. + +Features: + +- Efficient generated code without unneccessary overhead. + +- Uses gzip compression internally (selectively, only for files that compress well). + +- Enables direct access to internal gzip compressed bytes via an optional interface. + +- Outputs `gofmt`ed Go code. +*/ +package vfsgen diff --git a/vendor/github.com/shurcooL/vfsgen/generator.go b/vendor/github.com/shurcooL/vfsgen/generator.go new file mode 100644 index 0000000..ff40267 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/generator.go @@ -0,0 +1,500 @@ +package vfsgen + +import ( + "bytes" + "compress/gzip" + "errors" + "io" + "log" + "net/http" + "os" + pathpkg "path" + "sort" + "strconv" + "text/template" + "time" + + "github.com/shurcooL/httpfs/vfsutil" +) + +// Generate Go code that statically implements input filesystem, +// write the output to a file specified in opt. +func Generate(input http.FileSystem, opt Options) error { + opt.fillMissing() + + // Create output file. + f, err := os.Create(opt.Filename) + if err != nil { + return err + } + defer f.Close() + + err = t.ExecuteTemplate(f, "Header", opt) + if err != nil { + return err + } + + var toc toc + err = findAndWriteFiles(f, input, &toc) + if err != nil { + return err + } + + err = t.ExecuteTemplate(f, "DirEntries", toc.dirs) + if err != nil { + return err + } + + err = t.ExecuteTemplate(f, "Trailer", toc) + if err != nil { + return err + } + + // Trim any potential excess. + cur, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + err = f.Truncate(cur) + return err +} + +type toc struct { + dirs []*dirInfo + + HasCompressedFile bool // There's at least one compressedFile. + HasFile bool // There's at least one uncompressed file. +} + +// fileInfo is a definition of a file. +type fileInfo struct { + Path string + Name string + ModTime time.Time + UncompressedSize int64 +} + +// dirInfo is a definition of a directory. +type dirInfo struct { + Path string + Name string + ModTime time.Time + Entries []string +} + +// findAndWriteFiles recursively finds all the file paths in the given directory tree. +// They are added to the given map as keys. Values will be safe function names +// for each file, which will be used when generating the output code. +func findAndWriteFiles(f *os.File, fs http.FileSystem, toc *toc) error { + walkFn := func(path string, fi os.FileInfo, r io.ReadSeeker, err error) error { + if err != nil { + log.Printf("can't stat file %q: %v\n", path, err) + return nil + } + + switch fi.IsDir() { + case false: + file := &fileInfo{ + Path: path, + Name: pathpkg.Base(path), + ModTime: fi.ModTime().UTC(), + UncompressedSize: fi.Size(), + } + + marker, err := f.Seek(0, io.SeekCurrent) + if err != nil { + return err + } + + // Write CompressedFileInfo. + err = writeCompressedFileInfo(f, file, r) + switch err { + default: + return err + case nil: + toc.HasCompressedFile = true + // If compressed file is not smaller than original, revert and write original file. + case errCompressedNotSmaller: + _, err = r.Seek(0, io.SeekStart) + if err != nil { + return err + } + + _, err = f.Seek(marker, io.SeekStart) + if err != nil { + return err + } + + // Write FileInfo. + err = writeFileInfo(f, file, r) + if err != nil { + return err + } + toc.HasFile = true + } + case true: + entries, err := readDirPaths(fs, path) + if err != nil { + return err + } + + dir := &dirInfo{ + Path: path, + Name: pathpkg.Base(path), + ModTime: fi.ModTime().UTC(), + Entries: entries, + } + + toc.dirs = append(toc.dirs, dir) + + // Write DirInfo. + err = t.ExecuteTemplate(f, "DirInfo", dir) + if err != nil { + return err + } + } + + return nil + } + + err := vfsutil.WalkFiles(fs, "/", walkFn) + return err +} + +// readDirPaths reads the directory named by dirname and returns +// a sorted list of directory paths. +func readDirPaths(fs http.FileSystem, dirname string) ([]string, error) { + fis, err := vfsutil.ReadDir(fs, dirname) + if err != nil { + return nil, err + } + paths := make([]string, len(fis)) + for i := range fis { + paths[i] = pathpkg.Join(dirname, fis[i].Name()) + } + sort.Strings(paths) + return paths, nil +} + +// writeCompressedFileInfo writes CompressedFileInfo. +// It returns errCompressedNotSmaller if compressed file is not smaller than original. +func writeCompressedFileInfo(w io.Writer, file *fileInfo, r io.Reader) error { + err := t.ExecuteTemplate(w, "CompressedFileInfo-Before", file) + if err != nil { + return err + } + sw := &stringWriter{Writer: w} + gw := gzip.NewWriter(sw) + _, err = io.Copy(gw, r) + if err != nil { + return err + } + err = gw.Close() + if err != nil { + return err + } + if sw.N >= file.UncompressedSize { + return errCompressedNotSmaller + } + err = t.ExecuteTemplate(w, "CompressedFileInfo-After", file) + return err +} + +var errCompressedNotSmaller = errors.New("compressed file is not smaller than original") + +// Write FileInfo. +func writeFileInfo(w io.Writer, file *fileInfo, r io.Reader) error { + err := t.ExecuteTemplate(w, "FileInfo-Before", file) + if err != nil { + return err + } + sw := &stringWriter{Writer: w} + _, err = io.Copy(sw, r) + if err != nil { + return err + } + err = t.ExecuteTemplate(w, "FileInfo-After", file) + return err +} + +var t = template.Must(template.New("").Funcs(template.FuncMap{ + "quote": func(s string) string { + return strconv.Quote(s) + }, + "quoteBytes": func(b []byte) string { + return strconv.Quote(string(b)) + }, + "comment": func(s string) (string, error) { + var buf bytes.Buffer + cw := &commentWriter{W: &buf} + _, err := io.WriteString(cw, s) + if err != nil { + return "", err + } + err = cw.Close() + return buf.String(), err + }, +}).Parse(`{{define "Header"}}// Code generated by vfsgen; DO NOT EDIT + +{{with .BuildTags}}// +build {{.}} + +{{end}}package {{.PackageName}} + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + pathpkg "path" + "time" +) + +{{comment .VariableComment}} +var {{.VariableName}} = func() http.FileSystem { + mustUnmarshalTextTime := func(text string) time.Time { + var t time.Time + err := t.UnmarshalText([]byte(text)) + if err != nil { + panic(err) + } + return t + } + + fs := vfsgen۰FS{ +{{end}} + + + +{{define "CompressedFileInfo-Before"}} {{quote .Path}}: &vfsgen۰CompressedFileInfo{ + name: {{quote .Name}}, + modTime: mustUnmarshalTextTime({{quoteBytes .ModTime.MarshalText}}), + uncompressedSize: {{.UncompressedSize}}, +{{/* This blank line separating compressedContent is neccessary to prevent potential gofmt issues. See issue #19. */}} + compressedContent: []byte("{{end}}{{define "CompressedFileInfo-After"}}"), + }, +{{end}} + + + +{{define "FileInfo-Before"}} {{quote .Path}}: &vfsgen۰FileInfo{ + name: {{quote .Name}}, + modTime: mustUnmarshalTextTime({{quoteBytes .ModTime.MarshalText}}), + content: []byte("{{end}}{{define "FileInfo-After"}}"), + }, +{{end}} + + + +{{define "DirInfo"}} {{quote .Path}}: &vfsgen۰DirInfo{ + name: {{quote .Name}}, + modTime: mustUnmarshalTextTime({{quoteBytes .ModTime.MarshalText}}), + }, +{{end}} + + + +{{define "DirEntries"}} } +{{range .}}{{if .Entries}} fs[{{quote .Path}}].(*vfsgen۰DirInfo).entries = []os.FileInfo{{"{"}}{{range .Entries}} + fs[{{quote .}}].(os.FileInfo),{{end}} + } +{{end}}{{end}} + return fs +}() +{{end}} + + + +{{define "Trailer"}} +type vfsgen۰FS map[string]interface{} + +func (fs vfsgen۰FS) Open(path string) (http.File, error) { + path = pathpkg.Clean("/" + path) + f, ok := fs[path] + if !ok { + return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} + } + + switch f := f.(type) {{"{"}}{{if .HasCompressedFile}} + case *vfsgen۰CompressedFileInfo: + gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) + if err != nil { + // This should never happen because we generate the gzip bytes such that they are always valid. + panic("unexpected error reading own gzip compressed bytes: " + err.Error()) + } + return &vfsgen۰CompressedFile{ + vfsgen۰CompressedFileInfo: f, + gr: gr, + }, nil{{end}}{{if .HasFile}} + case *vfsgen۰FileInfo: + return &vfsgen۰File{ + vfsgen۰FileInfo: f, + Reader: bytes.NewReader(f.content), + }, nil{{end}} + case *vfsgen۰DirInfo: + return &vfsgen۰Dir{ + vfsgen۰DirInfo: f, + }, nil + default: + // This should never happen because we generate only the above types. + panic(fmt.Sprintf("unexpected type %T", f)) + } +} +{{if .HasCompressedFile}} +// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. +type vfsgen۰CompressedFileInfo struct { + name string + modTime time.Time + compressedContent []byte + uncompressedSize int64 +} + +func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot Readdir from file %s", f.name) +} +func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } + +func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { + return f.compressedContent +} + +func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } +func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } +func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } +func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } +func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } +func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } + +// vfsgen۰CompressedFile is an opened compressedFile instance. +type vfsgen۰CompressedFile struct { + *vfsgen۰CompressedFileInfo + gr *gzip.Reader + grPos int64 // Actual gr uncompressed position. + seekPos int64 // Seek uncompressed position. +} + +func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { + if f.grPos > f.seekPos { + // Rewind to beginning. + err = f.gr.Reset(bytes.NewReader(f.compressedContent)) + if err != nil { + return 0, err + } + f.grPos = 0 + } + if f.grPos < f.seekPos { + // Fast-forward. + _, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos) + if err != nil { + return 0, err + } + f.grPos = f.seekPos + } + n, err = f.gr.Read(p) + f.grPos += int64(n) + f.seekPos = f.grPos + return n, err +} +func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + f.seekPos = 0 + offset + case io.SeekCurrent: + f.seekPos += offset + case io.SeekEnd: + f.seekPos = f.uncompressedSize + offset + default: + panic(fmt.Errorf("invalid whence value: %v", whence)) + } + return f.seekPos, nil +} +func (f *vfsgen۰CompressedFile) Close() error { + return f.gr.Close() +} +{{else}} +// We already imported "compress/gzip" and "io/ioutil", but ended up not using them. Avoid unused import error. +var _ = gzip.Reader{} +var _ = ioutil.Discard +{{end}}{{if .HasFile}} +// vfsgen۰FileInfo is a static definition of an uncompressed file (because it's not worth gzip compressing). +type vfsgen۰FileInfo struct { + name string + modTime time.Time + content []byte +} + +func (f *vfsgen۰FileInfo) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot Readdir from file %s", f.name) +} +func (f *vfsgen۰FileInfo) Stat() (os.FileInfo, error) { return f, nil } + +func (f *vfsgen۰FileInfo) NotWorthGzipCompressing() {} + +func (f *vfsgen۰FileInfo) Name() string { return f.name } +func (f *vfsgen۰FileInfo) Size() int64 { return int64(len(f.content)) } +func (f *vfsgen۰FileInfo) Mode() os.FileMode { return 0444 } +func (f *vfsgen۰FileInfo) ModTime() time.Time { return f.modTime } +func (f *vfsgen۰FileInfo) IsDir() bool { return false } +func (f *vfsgen۰FileInfo) Sys() interface{} { return nil } + +// vfsgen۰File is an opened file instance. +type vfsgen۰File struct { + *vfsgen۰FileInfo + *bytes.Reader +} + +func (f *vfsgen۰File) Close() error { + return nil +} +{{else if not .HasCompressedFile}} +// We already imported "bytes", but ended up not using it. Avoid unused import error. +var _ = bytes.Reader{} +{{end}} +// vfsgen۰DirInfo is a static definition of a directory. +type vfsgen۰DirInfo struct { + name string + modTime time.Time + entries []os.FileInfo +} + +func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { + return 0, fmt.Errorf("cannot Read from directory %s", d.name) +} +func (d *vfsgen۰DirInfo) Close() error { return nil } +func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } + +func (d *vfsgen۰DirInfo) Name() string { return d.name } +func (d *vfsgen۰DirInfo) Size() int64 { return 0 } +func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } +func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } +func (d *vfsgen۰DirInfo) IsDir() bool { return true } +func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } + +// vfsgen۰Dir is an opened dir instance. +type vfsgen۰Dir struct { + *vfsgen۰DirInfo + pos int // Position within entries for Seek and Readdir. +} + +func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { + if offset == 0 && whence == io.SeekStart { + d.pos = 0 + return 0, nil + } + return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) +} + +func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { + if d.pos >= len(d.entries) && count > 0 { + return nil, io.EOF + } + if count <= 0 || count > len(d.entries)-d.pos { + count = len(d.entries) - d.pos + } + e := d.entries[d.pos : d.pos+count] + d.pos += count + return e, nil +} +{{end}}`)) diff --git a/vendor/github.com/shurcooL/vfsgen/options.go b/vendor/github.com/shurcooL/vfsgen/options.go new file mode 100644 index 0000000..d10d348 --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/options.go @@ -0,0 +1,45 @@ +package vfsgen + +import ( + "fmt" + "strings" +) + +// Options for vfsgen code generation. +type Options struct { + // Filename of the generated Go code output (including extension). + // If left empty, it defaults to "{{toLower .VariableName}}_vfsdata.go". + Filename string + + // PackageName is the name of the package in the generated code. + // If left empty, it defaults to "main". + PackageName string + + // BuildTags are the optional build tags in the generated code. + // The build tags syntax is specified by the go tool. + BuildTags string + + // VariableName is the name of the http.FileSystem variable in the generated code. + // If left empty, it defaults to "assets". + VariableName string + + // VariableComment is the comment of the http.FileSystem variable in the generated code. + // If left empty, it defaults to "{{.VariableName}} statically implements the virtual filesystem provided to vfsgen.". + VariableComment string +} + +// fillMissing sets default values for mandatory options that are left empty. +func (opt *Options) fillMissing() { + if opt.PackageName == "" { + opt.PackageName = "main" + } + if opt.VariableName == "" { + opt.VariableName = "assets" + } + if opt.Filename == "" { + opt.Filename = fmt.Sprintf("%s_vfsdata.go", strings.ToLower(opt.VariableName)) + } + if opt.VariableComment == "" { + opt.VariableComment = fmt.Sprintf("%s statically implements the virtual filesystem provided to vfsgen.", opt.VariableName) + } +} diff --git a/vendor/github.com/shurcooL/vfsgen/stringwriter.go b/vendor/github.com/shurcooL/vfsgen/stringwriter.go new file mode 100644 index 0000000..a781efd --- /dev/null +++ b/vendor/github.com/shurcooL/vfsgen/stringwriter.go @@ -0,0 +1,27 @@ +package vfsgen + +import ( + "io" +) + +// stringWriter writes given bytes to underlying io.Writer as a Go interpreted string literal value, +// not including double quotes. It tracks the total number of bytes written. +type stringWriter struct { + io.Writer + N int64 // Total bytes written. +} + +func (sw *stringWriter) Write(p []byte) (n int, err error) { + const hex = "0123456789abcdef" + buf := []byte{'\\', 'x', 0, 0} + for _, b := range p { + buf[2], buf[3] = hex[b/16], hex[b%16] + _, err = sw.Writer.Write(buf) + if err != nil { + return n, err + } + n++ + sw.N++ + } + return n, nil +} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/golang.org/x/tools/LICENSE similarity index 83% rename from vendor/github.com/gorilla/context/LICENSE rename to vendor/golang.org/x/tools/LICENSE index 0e5fb87..6a66aea 100644 --- a/vendor/github.com/gorilla/context/LICENSE +++ b/vendor/golang.org/x/tools/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/godoc/vfs/namespace.go b/vendor/golang.org/x/tools/godoc/vfs/namespace.go new file mode 100644 index 0000000..dbba20c --- /dev/null +++ b/vendor/golang.org/x/tools/godoc/vfs/namespace.go @@ -0,0 +1,381 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vfs + +import ( + "fmt" + "io" + "os" + pathpkg "path" + "sort" + "strings" + "time" +) + +// Setting debugNS = true will enable debugging prints about +// name space translations. +const debugNS = false + +// A NameSpace is a file system made up of other file systems +// mounted at specific locations in the name space. +// +// The representation is a map from mount point locations +// to the list of file systems mounted at that location. A traditional +// Unix mount table would use a single file system per mount point, +// but we want to be able to mount multiple file systems on a single +// mount point and have the system behave as if the union of those +// file systems were present at the mount point. +// For example, if the OS file system has a Go installation in +// c:\Go and additional Go path trees in d:\Work1 and d:\Work2, then +// this name space creates the view we want for the godoc server: +// +// NameSpace{ +// "/": { +// {old: "/", fs: OS(`c:\Go`), new: "/"}, +// }, +// "/src/pkg": { +// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"}, +// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"}, +// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"}, +// }, +// } +// +// This is created by executing: +// +// ns := NameSpace{} +// ns.Bind("/", OS(`c:\Go`), "/", BindReplace) +// ns.Bind("/src/pkg", OS(`d:\Work1`), "/src", BindAfter) +// ns.Bind("/src/pkg", OS(`d:\Work2`), "/src", BindAfter) +// +// A particular mount point entry is a triple (old, fs, new), meaning that to +// operate on a path beginning with old, replace that prefix (old) with new +// and then pass that path to the FileSystem implementation fs. +// +// Given this name space, a ReadDir of /src/pkg/code will check each prefix +// of the path for a mount point (first /src/pkg/code, then /src/pkg, then /src, +// then /), stopping when it finds one. For the above example, /src/pkg/code +// will find the mount point at /src/pkg: +// +// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"}, +// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"}, +// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"}, +// +// ReadDir will when execute these three calls and merge the results: +// +// OS(`c:\Go`).ReadDir("/src/pkg/code") +// OS(`d:\Work1').ReadDir("/src/code") +// OS(`d:\Work2').ReadDir("/src/code") +// +// Note that the "/src/pkg" in "/src/pkg/code" has been replaced by +// just "/src" in the final two calls. +// +// OS is itself an implementation of a file system: it implements +// OS(`c:\Go`).ReadDir("/src/pkg/code") as ioutil.ReadDir(`c:\Go\src\pkg\code`). +// +// Because the new path is evaluated by fs (here OS(root)), another way +// to read the mount table is to mentally combine fs+new, so that this table: +// +// {old: "/src/pkg", fs: OS(`c:\Go`), new: "/src/pkg"}, +// {old: "/src/pkg", fs: OS(`d:\Work1`), new: "/src"}, +// {old: "/src/pkg", fs: OS(`d:\Work2`), new: "/src"}, +// +// reads as: +// +// "/src/pkg" -> c:\Go\src\pkg +// "/src/pkg" -> d:\Work1\src +// "/src/pkg" -> d:\Work2\src +// +// An invariant (a redundancy) of the name space representation is that +// ns[mtpt][i].old is always equal to mtpt (in the example, ns["/src/pkg"]'s +// mount table entries always have old == "/src/pkg"). The 'old' field is +// useful to callers, because they receive just a []mountedFS and not any +// other indication of which mount point was found. +// +type NameSpace map[string][]mountedFS + +// A mountedFS handles requests for path by replacing +// a prefix 'old' with 'new' and then calling the fs methods. +type mountedFS struct { + old string + fs FileSystem + new string +} + +// hasPathPrefix returns true if x == y or x == y + "/" + more +func hasPathPrefix(x, y string) bool { + return x == y || strings.HasPrefix(x, y) && (strings.HasSuffix(y, "/") || strings.HasPrefix(x[len(y):], "/")) +} + +// translate translates path for use in m, replacing old with new. +// +// mountedFS{"/src/pkg", fs, "/src"}.translate("/src/pkg/code") == "/src/code". +func (m mountedFS) translate(path string) string { + path = pathpkg.Clean("/" + path) + if !hasPathPrefix(path, m.old) { + panic("translate " + path + " but old=" + m.old) + } + return pathpkg.Join(m.new, path[len(m.old):]) +} + +func (NameSpace) String() string { + return "ns" +} + +// Fprint writes a text representation of the name space to w. +func (ns NameSpace) Fprint(w io.Writer) { + fmt.Fprint(w, "name space {\n") + var all []string + for mtpt := range ns { + all = append(all, mtpt) + } + sort.Strings(all) + for _, mtpt := range all { + fmt.Fprintf(w, "\t%s:\n", mtpt) + for _, m := range ns[mtpt] { + fmt.Fprintf(w, "\t\t%s %s\n", m.fs, m.new) + } + } + fmt.Fprint(w, "}\n") +} + +// clean returns a cleaned, rooted path for evaluation. +// It canonicalizes the path so that we can use string operations +// to analyze it. +func (NameSpace) clean(path string) string { + return pathpkg.Clean("/" + path) +} + +type BindMode int + +const ( + BindReplace BindMode = iota + BindBefore + BindAfter +) + +// Bind causes references to old to redirect to the path new in newfs. +// If mode is BindReplace, old redirections are discarded. +// If mode is BindBefore, this redirection takes priority over existing ones, +// but earlier ones are still consulted for paths that do not exist in newfs. +// If mode is BindAfter, this redirection happens only after existing ones +// have been tried and failed. +func (ns NameSpace) Bind(old string, newfs FileSystem, new string, mode BindMode) { + old = ns.clean(old) + new = ns.clean(new) + m := mountedFS{old, newfs, new} + var mtpt []mountedFS + switch mode { + case BindReplace: + mtpt = append(mtpt, m) + case BindAfter: + mtpt = append(mtpt, ns.resolve(old)...) + mtpt = append(mtpt, m) + case BindBefore: + mtpt = append(mtpt, m) + mtpt = append(mtpt, ns.resolve(old)...) + } + + // Extend m.old, m.new in inherited mount point entries. + for i := range mtpt { + m := &mtpt[i] + if m.old != old { + if !hasPathPrefix(old, m.old) { + // This should not happen. If it does, panic so + // that we can see the call trace that led to it. + panic(fmt.Sprintf("invalid Bind: old=%q m={%q, %s, %q}", old, m.old, m.fs.String(), m.new)) + } + suffix := old[len(m.old):] + m.old = pathpkg.Join(m.old, suffix) + m.new = pathpkg.Join(m.new, suffix) + } + } + + ns[old] = mtpt +} + +// resolve resolves a path to the list of mountedFS to use for path. +func (ns NameSpace) resolve(path string) []mountedFS { + path = ns.clean(path) + for { + if m := ns[path]; m != nil { + if debugNS { + fmt.Printf("resolve %s: %v\n", path, m) + } + return m + } + if path == "/" { + break + } + path = pathpkg.Dir(path) + } + return nil +} + +// Open implements the FileSystem Open method. +func (ns NameSpace) Open(path string) (ReadSeekCloser, error) { + var err error + for _, m := range ns.resolve(path) { + if debugNS { + fmt.Printf("tx %s: %v\n", path, m.translate(path)) + } + r, err1 := m.fs.Open(m.translate(path)) + if err1 == nil { + return r, nil + } + if err == nil { + err = err1 + } + } + if err == nil { + err = &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} + } + return nil, err +} + +// stat implements the FileSystem Stat and Lstat methods. +func (ns NameSpace) stat(path string, f func(FileSystem, string) (os.FileInfo, error)) (os.FileInfo, error) { + var err error + for _, m := range ns.resolve(path) { + fi, err1 := f(m.fs, m.translate(path)) + if err1 == nil { + return fi, nil + } + if err == nil { + err = err1 + } + } + if err == nil { + err = &os.PathError{Op: "stat", Path: path, Err: os.ErrNotExist} + } + return nil, err +} + +func (ns NameSpace) Stat(path string) (os.FileInfo, error) { + return ns.stat(path, FileSystem.Stat) +} + +func (ns NameSpace) Lstat(path string) (os.FileInfo, error) { + return ns.stat(path, FileSystem.Lstat) +} + +// dirInfo is a trivial implementation of os.FileInfo for a directory. +type dirInfo string + +func (d dirInfo) Name() string { return string(d) } +func (d dirInfo) Size() int64 { return 0 } +func (d dirInfo) Mode() os.FileMode { return os.ModeDir | 0555 } +func (d dirInfo) ModTime() time.Time { return startTime } +func (d dirInfo) IsDir() bool { return true } +func (d dirInfo) Sys() interface{} { return nil } + +var startTime = time.Now() + +// ReadDir implements the FileSystem ReadDir method. It's where most of the magic is. +// (The rest is in resolve.) +// +// Logically, ReadDir must return the union of all the directories that are named +// by path. In order to avoid misinterpreting Go packages, of all the directories +// that contain Go source code, we only include the files from the first, +// but we include subdirectories from all. +// +// ReadDir must also return directory entries needed to reach mount points. +// If the name space looks like the example in the type NameSpace comment, +// but c:\Go does not have a src/pkg subdirectory, we still want to be able +// to find that subdirectory, because we've mounted d:\Work1 and d:\Work2 +// there. So if we don't see "src" in the directory listing for c:\Go, we add an +// entry for it before returning. +// +func (ns NameSpace) ReadDir(path string) ([]os.FileInfo, error) { + path = ns.clean(path) + + var ( + haveGo = false + haveName = map[string]bool{} + all []os.FileInfo + err error + first []os.FileInfo + ) + + for _, m := range ns.resolve(path) { + dir, err1 := m.fs.ReadDir(m.translate(path)) + if err1 != nil { + if err == nil { + err = err1 + } + continue + } + + if dir == nil { + dir = []os.FileInfo{} + } + + if first == nil { + first = dir + } + + // If we don't yet have Go files in 'all' and this directory + // has some, add all the files from this directory. + // Otherwise, only add subdirectories. + useFiles := false + if !haveGo { + for _, d := range dir { + if strings.HasSuffix(d.Name(), ".go") { + useFiles = true + haveGo = true + break + } + } + } + + for _, d := range dir { + name := d.Name() + if (d.IsDir() || useFiles) && !haveName[name] { + haveName[name] = true + all = append(all, d) + } + } + } + + // We didn't find any directories containing Go files. + // If some directory returned successfully, use that. + if !haveGo { + for _, d := range first { + if !haveName[d.Name()] { + haveName[d.Name()] = true + all = append(all, d) + } + } + } + + // Built union. Add any missing directories needed to reach mount points. + for old := range ns { + if hasPathPrefix(old, path) && old != path { + // Find next element after path in old. + elem := old[len(path):] + elem = strings.TrimPrefix(elem, "/") + if i := strings.Index(elem, "/"); i >= 0 { + elem = elem[:i] + } + if !haveName[elem] { + haveName[elem] = true + all = append(all, dirInfo(elem)) + } + } + } + + if len(all) == 0 { + return nil, err + } + + sort.Sort(byName(all)) + return all, nil +} + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } diff --git a/vendor/golang.org/x/tools/godoc/vfs/os.go b/vendor/golang.org/x/tools/godoc/vfs/os.go new file mode 100644 index 0000000..fa98142 --- /dev/null +++ b/vendor/golang.org/x/tools/godoc/vfs/os.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package vfs + +import ( + "fmt" + "io/ioutil" + "os" + pathpkg "path" + "path/filepath" +) + +// OS returns an implementation of FileSystem reading from the +// tree rooted at root. Recording a root is convenient everywhere +// but necessary on Windows, because the slash-separated path +// passed to Open has no way to specify a drive letter. Using a root +// lets code refer to OS(`c:\`), OS(`d:\`) and so on. +func OS(root string) FileSystem { + return osFS(root) +} + +type osFS string + +func (root osFS) String() string { return "os(" + string(root) + ")" } + +func (root osFS) resolve(path string) string { + // Clean the path so that it cannot possibly begin with ../. + // If it did, the result of filepath.Join would be outside the + // tree rooted at root. We probably won't ever see a path + // with .. in it, but be safe anyway. + path = pathpkg.Clean("/" + path) + + return filepath.Join(string(root), path) +} + +func (root osFS) Open(path string) (ReadSeekCloser, error) { + f, err := os.Open(root.resolve(path)) + if err != nil { + return nil, err + } + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + if fi.IsDir() { + f.Close() + return nil, fmt.Errorf("Open: %s is a directory", path) + } + return f, nil +} + +func (root osFS) Lstat(path string) (os.FileInfo, error) { + return os.Lstat(root.resolve(path)) +} + +func (root osFS) Stat(path string) (os.FileInfo, error) { + return os.Stat(root.resolve(path)) +} + +func (root osFS) ReadDir(path string) ([]os.FileInfo, error) { + return ioutil.ReadDir(root.resolve(path)) // is sorted +} diff --git a/vendor/golang.org/x/tools/godoc/vfs/vfs.go b/vendor/golang.org/x/tools/godoc/vfs/vfs.go new file mode 100644 index 0000000..937c2b2 --- /dev/null +++ b/vendor/golang.org/x/tools/godoc/vfs/vfs.go @@ -0,0 +1,45 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package vfs defines types for abstract file system access and provides an +// implementation accessing the file system of the underlying OS. +package vfs + +import ( + "io" + "io/ioutil" + "os" +) + +// The FileSystem interface specifies the methods godoc is using +// to access the file system for which it serves documentation. +type FileSystem interface { + Opener + Lstat(path string) (os.FileInfo, error) + Stat(path string) (os.FileInfo, error) + ReadDir(path string) ([]os.FileInfo, error) + String() string +} + +// Opener is a minimal virtual filesystem that can only open regular files. +type Opener interface { + Open(name string) (ReadSeekCloser, error) +} + +// A ReadSeekCloser can Read, Seek, and Close. +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer +} + +// ReadFile reads the file named by path from fs and returns the contents. +func ReadFile(fs Opener, path string) ([]byte, error) { + rc, err := fs.Open(path) + if err != nil { + return nil, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} diff --git a/web.go b/web.go index 91851d8..9ecc2c7 100644 --- a/web.go +++ b/web.go @@ -24,6 +24,7 @@ import ( "github.com/gorilla/mux" "github.com/gorilla/websocket" "github.com/qiniu/log" + _ "github.com/shurcooL/vfsgen" ) var defaultConfigDir string