change godep to glide

master
codeskyblue 7 years ago
parent 53e1c397fa
commit 5cde531d83

124
Godeps/Godeps.json generated

@ -1,124 +0,0 @@
{
"ImportPath": "github.com/codeskyblue/gosuv",
"GoVersion": "go1.7",
"GodepVersion": "v79",
"Deps": [
{
"ImportPath": "github.com/codeskyblue/kexec",
"Rev": "863094f94c7fb7c235764bf8f0f79cccea78c8eb"
},
{
"ImportPath": "github.com/equinox-io/equinox",
"Rev": "6f97d0d3970881d3e53dd6f547a41109eb055e54"
},
{
"ImportPath": "github.com/equinox-io/equinox/internal/go-update",
"Rev": "6f97d0d3970881d3e53dd6f547a41109eb055e54"
},
{
"ImportPath": "github.com/equinox-io/equinox/internal/go-update/internal/binarydist",
"Rev": "6f97d0d3970881d3e53dd6f547a41109eb055e54"
},
{
"ImportPath": "github.com/equinox-io/equinox/internal/go-update/internal/osext",
"Rev": "6f97d0d3970881d3e53dd6f547a41109eb055e54"
},
{
"ImportPath": "github.com/equinox-io/equinox/internal/osext",
"Rev": "6f97d0d3970881d3e53dd6f547a41109eb055e54"
},
{
"ImportPath": "github.com/equinox-io/equinox/proto",
"Rev": "6f97d0d3970881d3e53dd6f547a41109eb055e54"
},
{
"ImportPath": "github.com/glycerine/rbuf",
"Rev": "e57eda7aada105c8b00090b66e338a6413c3fbe9"
},
{
"ImportPath": "github.com/go-yaml/yaml",
"Rev": "e4d366fc3c7938e2958e662b4258c7a89e1f0e3e"
},
{
"ImportPath": "github.com/goji/httpauth",
"Rev": "2da839ab0f4df05a6db5eb277995589dadbd4fb9"
},
{
"ImportPath": "github.com/gorilla/mux",
"Comment": "v1.3.0-1-g94e7d24",
"Rev": "94e7d24fd285520f3d12ae998f7fdd6b5393d453"
},
{
"ImportPath": "github.com/gorilla/websocket",
"Comment": "v1.1.0-24-g3f3e394",
"Rev": "3f3e394da2b801fbe732a935ef40724762a67a07"
},
{
"ImportPath": "github.com/jtolds/gls",
"Rev": "9a4a02dbe491bef4bab3c24fd9f3087d6c4c6690"
},
{
"ImportPath": "github.com/kennygrant/sanitize",
"Rev": "bf9c39a678d8e26aeee60d5fe733cad47a7a6871"
},
{
"ImportPath": "github.com/mitchellh/go-ps",
"Rev": "e2d21980687ce16e58469d98dcee92d27fbbd7fb"
},
{
"ImportPath": "github.com/qiniu/log",
"Comment": "v1.0.00-2-ge002bc2",
"Rev": "e002bc2020b19bfa61ed378cc5407383dbd2f346"
},
{
"ImportPath": "github.com/shurcooL/httpfs/vfsutil",
"Rev": "df3d5d88c59699064c4449a2d4e3db0f07e74ed9"
},
{
"ImportPath": "github.com/shurcooL/vfsgen",
"Rev": "8bd98c96e6f6800019cdcd1183547b1c8e89d280"
},
{
"ImportPath": "github.com/smartystreets/assertions",
"Comment": "1.5.0-405-g01fedaa",
"Rev": "01fedaa993c0a9f9aa55111501cd7c81a49e812e"
},
{
"ImportPath": "github.com/smartystreets/assertions/internal/oglematchers",
"Comment": "1.5.0-405-g01fedaa",
"Rev": "01fedaa993c0a9f9aa55111501cd7c81a49e812e"
},
{
"ImportPath": "github.com/smartystreets/goconvey/convey",
"Comment": "1.5.0-449-g530dd3d",
"Rev": "530dd3d2cafa09958178e8c557bc4abeea1651a5"
},
{
"ImportPath": "github.com/smartystreets/goconvey/convey/gotest",
"Comment": "1.5.0-449-g530dd3d",
"Rev": "530dd3d2cafa09958178e8c557bc4abeea1651a5"
},
{
"ImportPath": "github.com/smartystreets/goconvey/convey/reporting",
"Comment": "1.5.0-449-g530dd3d",
"Rev": "530dd3d2cafa09958178e8c557bc4abeea1651a5"
},
{
"ImportPath": "github.com/urfave/cli",
"Comment": "v1.18.0-47-g168c954",
"Rev": "168c95418e66e019fe17b8f4f5c45aa62ff80e23"
},
{
"ImportPath": "golang.org/x/net/html",
"Rev": "6b27048ae5e6ad1ef927e72e437531493de612fe"
},
{
"ImportPath": "golang.org/x/net/html/atom",
"Rev": "6b27048ae5e6ad1ef927e72e437531493de612fe"
},
{
"ImportPath": "golang.org/x/tools/godoc/vfs",
"Rev": "513c731aab546b4628168d772550499740fe9dd2"
}
]
}

5
Godeps/Readme generated

@ -1,5 +0,0 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

70
glide.lock generated

@ -0,0 +1,70 @@
hash: ad7063d34838040bf683183342f3814298179801e2e055f29bca796456c88300
updated: 2017-08-11T19:08:31.193345249+08:00
imports:
- name: github.com/codeskyblue/kexec
version: 863094f94c7fb7c235764bf8f0f79cccea78c8eb
- name: github.com/equinox-io/equinox
version: 6f97d0d3970881d3e53dd6f547a41109eb055e54
subpackages:
- internal/go-update
- internal/go-update/internal/binarydist
- internal/go-update/internal/osext
- internal/osext
- proto
- name: github.com/franela/goreq
version: b5b0f5eb2d16f20345cce0a544a75163579c0b00
- name: github.com/glycerine/rbuf
version: 96ad00d7fa74f7dd9857f2b6068451062b4ebc5d
- name: github.com/go-yaml/yaml
version: 25c4ec802a7d637f88d584ab26798e94ad14c13b
- name: github.com/goji/httpauth
version: 2da839ab0f4df05a6db5eb277995589dadbd4fb9
- name: github.com/gorilla/context
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
- name: github.com/gorilla/mux
version: ac112f7d75a0714af1bd86ab17749b31f7809640
- name: github.com/gorilla/websocket
version: a69d9f6de432e2c6b296a947d8a5ee88f68522cf
- name: github.com/kennygrant/sanitize
version: 6a0bfdde8629a3a3a7418a7eae45c54154692514
- name: github.com/mitchellh/go-ps
version: 4fdf99ab29366514c69ccccddab5dc58b8d84062
- name: github.com/qiniu/log
version: a304a74568d6982c5b89de1c68ac8fca3add196a
- name: github.com/shurcooL/httpfs
version: bc35257962c2dea93e81c976b72c7c6eac45fd8a
subpackages:
- vfsutil
- name: github.com/shurcooL/vfsgen
version: 385e5833a54aaba5860ca26036b8e8b72135ab96
- name: github.com/urfave/cli
version: cfb38830724cc34fedffe9a2a29fb54fa9169cd1
- name: golang.org/x/net
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
subpackages:
- html
- html/atom
- name: golang.org/x/tools
version: 5831d16d18029819d39f99bdc2060b8eff410b6b
subpackages:
- godoc/vfs
testImports:
- name: github.com/gopherjs/gopherjs
version: 2b1d432c8a82c9bff0b0baffaeb3ec6e92974112
subpackages:
- js
- name: github.com/jtolds/gls
version: 77f18212c9c7edc9bd6a33d383a7b545ce62f064
- name: github.com/smartystreets/assertions
version: 1540c14c9f1bd1abeba90f29762a4c6e50582303
subpackages:
- internal/go-render/render
- internal/oglematchers
- name: github.com/smartystreets/goconvey
version: 9e8dc3f972df6c8fcc0375ef492c24d0bb204857
subpackages:
- convey
- convey/gotest
- convey/reporting
- name: github.com/smartystreets/logging
version: ac3a674540761aa0b4382094ba4795f917e85c7f

@ -0,0 +1,19 @@
package: github.com/codeskyblue/gosuv
import:
- package: github.com/codeskyblue/kexec
- package: github.com/equinox-io/equinox
- package: github.com/franela/goreq
- package: github.com/glycerine/rbuf
- package: github.com/go-yaml/yaml
- package: github.com/goji/httpauth
- package: github.com/gorilla/mux
- package: github.com/gorilla/websocket
- package: github.com/kennygrant/sanitize
- package: github.com/mitchellh/go-ps
- package: github.com/qiniu/log
- package: github.com/shurcooL/vfsgen
- package: github.com/urfave/cli
testImport:
- package: github.com/smartystreets/goconvey
subpackages:
- convey

@ -0,0 +1,19 @@
package main
import (
"log"
"syscall"
"time"
"github.com/codeskyblue/kexec"
)
func main() {
p := kexec.CommandString("python flask_main.py")
p.Start()
time.Sleep(3 * time.Second)
err := p.Terminate(syscall.SIGKILL)
if err != nil {
log.Println(err)
}
}

@ -0,0 +1,6 @@
import flask
app = flask.Flask(__name__)
if __name__ == '__main__':
app.run(port=46732, debug=True)

@ -0,0 +1,65 @@
package kexec
import (
"os"
"os/user"
"syscall"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
)
func TestCommand(t *testing.T) {
Convey("1 should equal 1", t, func() {
So(1, ShouldEqual, 1)
})
Convey("kexec should work as normal os/exec", t, func() {
cmd := Command("echo", "-n", "123")
data, err := cmd.Output()
So(err, ShouldBeNil)
So(string(data), ShouldEqual, "123")
})
Convey("the terminate should kill proc", t, func() {
cmd := CommandString("sleep 51")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Start()
time.Sleep(time.Millisecond * 50)
cmd.Terminate(syscall.SIGINT)
err := cmd.Wait()
So(err, ShouldNotBeNil)
//So(err.Error(), ShouldEqual, "signal: interrupt")
})
Convey("Should ok with call Wait twice", t, func() {
cmd := CommandString("not-exists-command-xxl213 true")
var err error
err = cmd.Start()
So(err, ShouldBeNil)
err1 := cmd.Wait()
So(err1, ShouldNotBeNil)
err2 := cmd.Wait()
So(err1, ShouldEqual, err2)
})
Convey("Set user works", t, func() {
u, err := user.Current()
So(err, ShouldBeNil)
// Set user must be root
if u.Uid != "0" {
return
}
cmd := Command("whoami")
err = cmd.SetUser("qard2")
So(err, ShouldBeNil)
output, err := cmd.Output()
So(err, ShouldBeNil)
So(string(output), ShouldEqual, "qard2\n")
})
}

@ -0,0 +1 @@
web: python flask_main.py

@ -0,0 +1,11 @@
import flask
app = flask.Flask(__name__)
@app.route('/')
def homepage():
return 'Home'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')

@ -0,0 +1,22 @@
package main
import (
"fmt"
"kproc"
"log"
"os/exec"
"syscall"
"time"
)
func main() {
p := kproc.ProcString("python flask_main.py")
p.Start()
time.Sleep(10 * time.Second)
err := p.Terminate(syscall.SIGKILL)
if err != nil {
log.Println(err)
}
out, _ := exec.Command("lsof", "-i:5000").CombinedOutput()
fmt.Println(string(out))
}

Binary file not shown.

@ -0,0 +1,426 @@
package update
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/equinox-io/equinox/internal/go-update/internal/binarydist"
)
var (
oldFile = []byte{0xDE, 0xAD, 0xBE, 0xEF}
newFile = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
newFileChecksum = sha256.Sum256(newFile)
)
func cleanup(path string) {
os.Remove(path)
os.Remove(fmt.Sprintf(".%s.new", path))
}
// we write with a separate name for each test so that we can run them in parallel
func writeOldFile(path string, t *testing.T) {
if err := ioutil.WriteFile(path, oldFile, 0777); err != nil {
t.Fatalf("Failed to write file for testing preparation: %v", err)
}
}
func validateUpdate(path string, err error, t *testing.T) {
if err != nil {
t.Fatalf("Failed to update: %v", err)
}
buf, err := ioutil.ReadFile(path)
if err != nil {
t.Fatalf("Failed to read file post-update: %v", err)
}
if !bytes.Equal(buf, newFile) {
t.Fatalf("File was not updated! Bytes read: %v, Bytes expected: %v", buf, newFile)
}
}
func TestApplySimple(t *testing.T) {
t.Parallel()
fName := "TestApplySimple"
defer cleanup(fName)
writeOldFile(fName, t)
err := Apply(bytes.NewReader(newFile), Options{
TargetPath: fName,
})
validateUpdate(fName, err, t)
}
func TestApplyOldSavePath(t *testing.T) {
t.Parallel()
fName := "TestApplyOldSavePath"
defer cleanup(fName)
writeOldFile(fName, t)
oldfName := "OldSavePath"
err := Apply(bytes.NewReader(newFile), Options{
TargetPath: fName,
OldSavePath: oldfName,
})
validateUpdate(fName, err, t)
if _, err := os.Stat(oldfName); os.IsNotExist(err) {
t.Fatalf("Failed to find the old file: %v", err)
}
cleanup(oldfName)
}
func TestVerifyChecksum(t *testing.T) {
t.Parallel()
fName := "TestVerifyChecksum"
defer cleanup(fName)
writeOldFile(fName, t)
err := Apply(bytes.NewReader(newFile), Options{
TargetPath: fName,
Checksum: newFileChecksum[:],
})
validateUpdate(fName, err, t)
}
func TestVerifyChecksumNegative(t *testing.T) {
t.Parallel()
fName := "TestVerifyChecksumNegative"
defer cleanup(fName)
writeOldFile(fName, t)
badChecksum := []byte{0x0A, 0x0B, 0x0C, 0xFF}
err := Apply(bytes.NewReader(newFile), Options{
TargetPath: fName,
Checksum: badChecksum,
})
if err == nil {
t.Fatalf("Failed to detect bad checksum!")
}
}
func TestApplyPatch(t *testing.T) {
t.Parallel()
fName := "TestApplyPatch"
defer cleanup(fName)
writeOldFile(fName, t)
patch := new(bytes.Buffer)
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(newFile), patch)
if err != nil {
t.Fatalf("Failed to create patch: %v", err)
}
err = Apply(patch, Options{
TargetPath: fName,
Patcher: NewBSDiffPatcher(),
})
validateUpdate(fName, err, t)
}
func TestCorruptPatch(t *testing.T) {
t.Parallel()
fName := "TestCorruptPatch"
defer cleanup(fName)
writeOldFile(fName, t)
badPatch := []byte{0x44, 0x38, 0x86, 0x3c, 0x4f, 0x8d, 0x26, 0x54, 0xb, 0x11, 0xce, 0xfe, 0xc1, 0xc0, 0xf8, 0x31, 0x38, 0xa0, 0x12, 0x1a, 0xa2, 0x57, 0x2a, 0xe1, 0x3a, 0x48, 0x62, 0x40, 0x2b, 0x81, 0x12, 0xb1, 0x21, 0xa5, 0x16, 0xed, 0x73, 0xd6, 0x54, 0x84, 0x29, 0xa6, 0xd6, 0xb2, 0x1b, 0xfb, 0xe6, 0xbe, 0x7b, 0x70}
err := Apply(bytes.NewReader(badPatch), Options{
TargetPath: fName,
Patcher: NewBSDiffPatcher(),
})
if err == nil {
t.Fatalf("Failed to detect corrupt patch!")
}
}
func TestVerifyChecksumPatchNegative(t *testing.T) {
t.Parallel()
fName := "TestVerifyChecksumPatchNegative"
defer cleanup(fName)
writeOldFile(fName, t)
patch := new(bytes.Buffer)
anotherFile := []byte{0x77, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(anotherFile), patch)
if err != nil {
t.Fatalf("Failed to create patch: %v", err)
}
err = Apply(patch, Options{
TargetPath: fName,
Checksum: newFileChecksum[:],
Patcher: NewBSDiffPatcher(),
})
if err == nil {
t.Fatalf("Failed to detect patch to wrong file!")
}
}
const ecdsaPublicKey = `
-----BEGIN PUBLIC KEY-----
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEL8ThbSyEucsCxnd4dCZR2hIy5nea54ko
O+jUUfIjkvwhCWzASm0lpCVdVpXKZXIe+NZ+44RQRv3+OqJkCCGzUgJkPNI3lxdG
9zu8rbrnxISV06VQ8No7Ei9wiTpqmTBB
-----END PUBLIC KEY-----
`
const ecdsaPrivateKey = `
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDBttCB/1NOY4T+WrG4FSV49Ayn3gK1DNzfGaJ01JUXeiNFCWQM2pqpU
om8ATPP/dkegBwYFK4EEACKhZANiAAQvxOFtLIS5ywLGd3h0JlHaEjLmd5rniSg7
6NRR8iOS/CEJbMBKbSWkJV1Wlcplch741n7jhFBG/f46omQIIbNSAmQ80jeXF0b3
O7ytuufEhJXTpVDw2jsSL3CJOmqZMEE=
-----END EC PRIVATE KEY-----
`
const rsaPublicKey = `
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxSWmu7trWKAwDFjiCN2D
Tk2jj2sgcr/CMlI4cSSiIOHrXCFxP1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKab
b9ead+kD0kxk7i2bFYvKX43oq66IW0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4
y20C59dPr9Dpcz8DZkdLsBV6YKF6Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjT
x4xRnjgTRRRlZvRtALHMUkIChgxDOhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv5
5fhJ08Rz7mmZmtH5JxTK5XTquo59sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7Nrf
fQIDAQAB
-----END PUBLIC KEY-----`
const rsaPrivateKey = `
-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAxSWmu7trWKAwDFjiCN2DTk2jj2sgcr/CMlI4cSSiIOHrXCFx
P1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKabb9ead+kD0kxk7i2bFYvKX43oq66I
W0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4y20C59dPr9Dpcz8DZkdLsBV6YKF6
Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjTx4xRnjgTRRRlZvRtALHMUkIChgxD
OhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv55fhJ08Rz7mmZmtH5JxTK5XTquo59
sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7NrffQIDAQABAoIBAAkN+6RvrTR61voa
Mvd5RQiZpEN4Bht/Fyo8gH8h0Zh1B9xJZOwlmMZLS5fdtHlfLEhR8qSrGDBL61vq
I8KkhEsUufF78EL+YzxVN+Q7cWYGHIOWFokqza7hzpSxUQO6lPOMQ1eIZaNueJTB
Zu07/47ISPPg/bXzgGVcpYlTCPTjUwKjtfyMqvX9AD7fIyYRm6zfE7EHj1J2sBFt
Yz1OGELg6HfJwXfpnPfBvftD0hWGzJ78Bp71fPJe6n5gnqmSqRvrcXNWFnH/yqkN
d6vPIxD6Z3LjvyZpkA7JillLva2L/zcIFhg4HZvQnWd8/PpDnUDonu36hcj4SC5j
W4aVPLkCgYEA4XzNKWxqYcajzFGZeSxlRHupSAl2MT7Cc5085MmE7dd31wK2T8O4
n7N4bkm/rjTbX85NsfWdKtWb6mpp8W3VlLP0rp4a/12OicVOkg4pv9LZDmY0sRlE
YuDJk1FeCZ50UrwTZI3rZ9IhZHhkgVA6uWAs7tYndONkxNHG0pjqs4sCgYEA39MZ
JwMqo3qsPntpgP940cCLflEsjS9hYNO3+Sv8Dq3P0HLVhBYajJnotf8VuU0fsQZG
grmtVn1yThFbMq7X1oY4F0XBA+paSiU18c4YyUnwax2u4sw9U/Q9tmQUZad5+ueT
qriMBwGv+ewO+nQxqvAsMUmemrVzrfwA5Oct+hcCgYAfiyXoNZJsOy2O15twqBVC
j0oPGcO+/9iT89sg5lACNbI+EdMPNYIOVTzzsL1v0VUfAe08h++Enn1BPcG0VHkc
ZFBGXTfJoXzfKQrkw7ZzbzuOGB4m6DH44xlP0oIlNlVvfX/5ASF9VJf3RiBJNsAA
TsP6ZVr/rw/ZuL7nlxy+IQKBgDhL/HOXlE3yOQiuOec8WsNHTs7C1BXe6PtVxVxi
988pYK/pclL6zEq5G5NLSceF4obAMVQIJ9UtUGbabrncyGUo9UrFPLsjYvprSZo8
YHegpVwL50UcYgCP2kXZ/ldjPIcjYDz8lhvdDMor2cidGTEJn9P11HLNWP9V91Ob
4jCZAoGAPNRSC5cC8iP/9j+s2/kdkfWJiNaolPYAUrmrkL6H39PYYZM5tnhaIYJV
Oh9AgABamU0eb3p3vXTISClVgV7ifq1HyZ7BSUhMfaY2Jk/s3sUHCWFxPZe9sgEG
KinIY/373KIkIV/5g4h2v1w330IWcfptxKcY/Er3DJr38f695GE=
-----END RSA PRIVATE KEY-----`
func signec(privatePEM string, source []byte, t *testing.T) []byte {
parseFn := func(p []byte) (crypto.Signer, error) { return x509.ParseECPrivateKey(p) }
return sign(parseFn, privatePEM, source, t)
}
func signrsa(privatePEM string, source []byte, t *testing.T) []byte {
parseFn := func(p []byte) (crypto.Signer, error) { return x509.ParsePKCS1PrivateKey(p) }
return sign(parseFn, privatePEM, source, t)
}
func sign(parsePrivKey func([]byte) (crypto.Signer, error), privatePEM string, source []byte, t *testing.T) []byte {
block, _ := pem.Decode([]byte(privatePEM))
if block == nil {
t.Fatalf("Failed to parse private key PEM")
}
priv, err := parsePrivKey(block.Bytes)
if err != nil {
t.Fatalf("Failed to parse private key DER: %v", err)
}
checksum := sha256.Sum256(source)
sig, err := priv.Sign(rand.Reader, checksum[:], crypto.SHA256)
if err != nil {
t.Fatalf("Failed to sign: %v", sig)
}
return sig
}
func TestVerifyECSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifySignature"
defer cleanup(fName)
writeOldFile(fName, t)
opts := Options{TargetPath: fName}
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
opts.Signature = signec(ecdsaPrivateKey, newFile, t)
err = Apply(bytes.NewReader(newFile), opts)
validateUpdate(fName, err, t)
}
func TestVerifyRSASignature(t *testing.T) {
t.Parallel()
fName := "TestVerifySignature"
defer cleanup(fName)
writeOldFile(fName, t)
opts := Options{
TargetPath: fName,
Verifier: NewRSAVerifier(),
}
err := opts.SetPublicKeyPEM([]byte(rsaPublicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
opts.Signature = signrsa(rsaPrivateKey, newFile, t)
err = Apply(bytes.NewReader(newFile), opts)
validateUpdate(fName, err, t)
}
func TestVerifyFailBadSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifyFailBadSignature"
defer cleanup(fName)
writeOldFile(fName, t)
opts := Options{
TargetPath: fName,
Signature: []byte{0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA},
}
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
err = Apply(bytes.NewReader(newFile), opts)
if err == nil {
t.Fatalf("Did not fail with bad signature")
}
}
func TestVerifyFailNoSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifySignatureWithPEM"
defer cleanup(fName)
writeOldFile(fName, t)
opts := Options{TargetPath: fName}
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
err = Apply(bytes.NewReader(newFile), opts)
if err == nil {
t.Fatalf("Did not fail with empty signature")
}
}
const wrongKey = `
-----BEGIN EC PRIVATE KEY-----
MIGkAgEBBDBzqYp6N2s8YWYifBjS03/fFfmGeIPcxQEi+bbFeekIYt8NIKIkhD+r
hpaIwSmot+qgBwYFK4EEACKhZANiAAR0EC8Usbkc4k30frfEB2ECmsIghu9DJSqE
RbH7jfq2ULNv8tN/clRjxf2YXgp+iP3SQF1R1EYERKpWr8I57pgfIZtoZXjwpbQC
VBbP/Ff+05HOqwPC7rJMy1VAJLKg7Cw=
-----END EC PRIVATE KEY-----
`
func TestVerifyFailWrongSignature(t *testing.T) {
t.Parallel()
fName := "TestVerifyFailWrongSignature"
defer cleanup(fName)
writeOldFile(fName, t)
opts := Options{TargetPath: fName}
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
if err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
opts.Signature = signec(wrongKey, newFile, t)
err = Apply(bytes.NewReader(newFile), opts)
if err == nil {
t.Fatalf("Verified an update that was signed by an untrusted key!")
}
}
func TestSignatureButNoPublicKey(t *testing.T) {
t.Parallel()
fName := "TestSignatureButNoPublicKey"
defer cleanup(fName)
writeOldFile(fName, t)
err := Apply(bytes.NewReader(newFile), Options{
TargetPath: fName,
Signature: signec(ecdsaPrivateKey, newFile, t),
})
if err == nil {
t.Fatalf("Allowed an update with a signautre verification when no public key was specified!")
}
}
func TestPublicKeyButNoSignature(t *testing.T) {
t.Parallel()
fName := "TestPublicKeyButNoSignature"
defer cleanup(fName)
writeOldFile(fName, t)
opts := Options{TargetPath: fName}
if err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey)); err != nil {
t.Fatalf("Could not parse public key: %v", err)
}
err := Apply(bytes.NewReader(newFile), opts)
if err == nil {
t.Fatalf("Allowed an update with no signautre when a public key was specified!")
}
}
func TestWriteError(t *testing.T) {
t.Parallel()
fName := "TestWriteError"
defer cleanup(fName)
writeOldFile(fName, t)
openFile = func(name string, flags int, perm os.FileMode) (*os.File, error) {
f, err := os.OpenFile(name, flags, perm)
// simulate Write() error by closing the file prematurely
f.Close()
return f, err
}
err := Apply(bytes.NewReader(newFile), Options{TargetPath: fName})
if err == nil {
t.Fatalf("Allowed an update to an empty file")
}
}

@ -0,0 +1,93 @@
package binarydist
import (
"crypto/rand"
"io"
"io/ioutil"
"os"
)
func mustOpen(path string) *os.File {
f, err := os.Open(path)
if err != nil {
panic(err)
}
return f
}
func mustReadAll(r io.Reader) []byte {
b, err := ioutil.ReadAll(r)
if err != nil {
panic(err)
}
return b
}
func fileCmp(a, b *os.File) int64 {
sa, err := a.Seek(0, 2)
if err != nil {
panic(err)
}
sb, err := b.Seek(0, 2)
if err != nil {
panic(err)
}
if sa != sb {
return sa
}
_, err = a.Seek(0, 0)
if err != nil {
panic(err)
}
_, err = b.Seek(0, 0)
if err != nil {
panic(err)
}
pa, err := ioutil.ReadAll(a)
if err != nil {
panic(err)
}
pb, err := ioutil.ReadAll(b)
if err != nil {
panic(err)
}
for i := range pa {
if pa[i] != pb[i] {
return int64(i)
}
}
return -1
}
func mustWriteRandFile(path string, size int) *os.File {
p := make([]byte, size)
_, err := rand.Read(p)
if err != nil {
panic(err)
}
f, err := os.Create(path)
if err != nil {
panic(err)
}
_, err = f.Write(p)
if err != nil {
panic(err)
}
_, err = f.Seek(0, 0)
if err != nil {
panic(err)
}
return f
}

@ -0,0 +1,67 @@
package binarydist
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"testing"
)
var diffT = []struct {
old *os.File
new *os.File
}{
{
old: mustWriteRandFile("test.old", 1e3),
new: mustWriteRandFile("test.new", 1e3),
},
{
old: mustOpen("testdata/sample.old"),
new: mustOpen("testdata/sample.new"),
},
}
func TestDiff(t *testing.T) {
for _, s := range diffT {
got, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
os.Remove(got.Name())
exp, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
cmd := exec.Command("bsdiff", s.old.Name(), s.new.Name(), exp.Name())
cmd.Stdout = os.Stdout
err = cmd.Run()
os.Remove(exp.Name())
if err != nil {
panic(err)
}
err = Diff(s.old, s.new, got)
if err != nil {
t.Fatal("err", err)
}
_, err = got.Seek(0, 0)
if err != nil {
panic(err)
}
gotBuf := mustReadAll(got)
expBuf := mustReadAll(exp)
if !bytes.Equal(gotBuf, expBuf) {
t.Fail()
t.Logf("diff %s %s", s.old.Name(), s.new.Name())
t.Logf("%s: len(got) = %d", got.Name(), len(gotBuf))
t.Logf("%s: len(exp) = %d", exp.Name(), len(expBuf))
i := matchlen(gotBuf, expBuf)
t.Logf("produced different output at pos %d; %d != %d", i, gotBuf[i], expBuf[i])
}
}
}

@ -0,0 +1,62 @@
package binarydist
import (
"io/ioutil"
"os"
"os/exec"
"testing"
)
func TestPatch(t *testing.T) {
mustWriteRandFile("test.old", 1e3)
mustWriteRandFile("test.new", 1e3)
got, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
os.Remove(got.Name())
err = exec.Command("bsdiff", "test.old", "test.new", "test.patch").Run()
if err != nil {
panic(err)
}
err = Patch(mustOpen("test.old"), got, mustOpen("test.patch"))
if err != nil {
t.Fatal("err", err)
}
ref, err := got.Seek(0, 2)
if err != nil {
panic(err)
}
t.Logf("got %d bytes", ref)
if n := fileCmp(got, mustOpen("test.new")); n > -1 {
t.Fatalf("produced different output at pos %d", n)
}
}
func TestPatchHk(t *testing.T) {
got, err := ioutil.TempFile("/tmp", "bspatch.")
if err != nil {
panic(err)
}
os.Remove(got.Name())
err = Patch(mustOpen("testdata/sample.old"), got, mustOpen("testdata/sample.patch"))
if err != nil {
t.Fatal("err", err)
}
ref, err := got.Seek(0, 2)
if err != nil {
panic(err)
}
t.Logf("got %d bytes", ref)
if n := fileCmp(got, mustOpen("testdata/sample.new")); n > -1 {
t.Fatalf("produced different output at pos %d", n)
}
}

@ -0,0 +1,33 @@
package binarydist
import (
"bytes"
"crypto/rand"
"testing"
)
var sortT = [][]byte{
mustRandBytes(1000),
mustReadAll(mustOpen("test.old")),
[]byte("abcdefabcdef"),
}
func TestQsufsort(t *testing.T) {
for _, s := range sortT {
I := qsufsort(s)
for i := 1; i < len(I); i++ {
if bytes.Compare(s[I[i-1]:], s[I[i]:]) > 0 {
t.Fatalf("unsorted at %d", i)
}
}
}
}
func mustRandBytes(n int) []byte {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
panic(err)
}
return b
}

@ -0,0 +1,203 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows
package osext
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
const (
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
executableEnvValueMatch = "match"
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
// fullpath to be of the form "dir/prog".
dir := filepath.Dir(filepath.Dir(ep))
fullpath, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
// Make child start with a relative program path.
// Alter argv[0] for child to verify getting real path without argv[0].
cmd := &exec.Cmd{
Dir: dir,
Path: fullpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
}
outs := string(out)
if !filepath.IsAbs(outs) {
t.Fatalf("Child returned %q, want an absolute path", out)
}
if !sameFile(outs, ep) {
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
}
}
func TestExecutableDelete(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip()
}
fpath, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
r, w := io.Pipe()
stderrBuff := &bytes.Buffer{}
stdoutBuff := &bytes.Buffer{}
cmd := &exec.Cmd{
Path: fpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
Stdin: r,
Stderr: stderrBuff,
Stdout: stdoutBuff,
}
err = cmd.Start()
if err != nil {
t.Fatalf("exec(self) start failed: %v", err)
}
tempPath := fpath + "_copy"
_ = os.Remove(tempPath)
err = copyFile(tempPath, fpath)
if err != nil {
t.Fatalf("copy file failed: %v", err)
}
err = os.Remove(fpath)
if err != nil {
t.Fatalf("remove running test file failed: %v", err)
}
err = os.Rename(tempPath, fpath)
if err != nil {
t.Fatalf("rename copy to previous name failed: %v", err)
}
w.Write([]byte{0})
w.Close()
err = cmd.Wait()
if err != nil {
t.Fatalf("exec wait failed: %v", err)
}
childPath := stderrBuff.String()
if !filepath.IsAbs(childPath) {
t.Fatalf("Child returned %q, want an absolute path", childPath)
}
if !sameFile(childPath, fpath) {
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
return false
}
fi2, err := os.Stat(fn2)
if err != nil {
return false
}
return os.SameFile(fi1, fi2)
}
func copyFile(dest, src string) error {
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
_, err = io.Copy(df, sf)
return err
}
func TestMain(m *testing.M) {
env := os.Getenv(executableEnvVar)
switch env {
case "":
os.Exit(m.Run())
case executableEnvValueMatch:
// First chdir to another path.
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
}
os.Chdir(dir)
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
case executableEnvValueDelete:
bb := make([]byte, 1)
var err error
n, err := os.Stdin.Read(bb)
if err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
os.Exit(2)
}
if n != 1 {
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
os.Exit(2)
}
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
}
os.Exit(0)
}

@ -0,0 +1,203 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin linux freebsd netbsd windows
package osext
import (
"bytes"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"runtime"
"testing"
)
const (
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
executableEnvValueMatch = "match"
executableEnvValueDelete = "delete"
)
func TestPrintExecutable(t *testing.T) {
ef, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
t.Log("Executable:", ef)
}
func TestPrintExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
t.Log("Executable Folder:", ef)
}
func TestExecutableFolder(t *testing.T) {
ef, err := ExecutableFolder()
if err != nil {
t.Fatalf("ExecutableFolder failed: %v", err)
}
if ef[len(ef)-1] == filepath.Separator {
t.Fatal("ExecutableFolder ends with a trailing slash.")
}
}
func TestExecutableMatch(t *testing.T) {
ep, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
// fullpath to be of the form "dir/prog".
dir := filepath.Dir(filepath.Dir(ep))
fullpath, err := filepath.Rel(dir, ep)
if err != nil {
t.Fatalf("filepath.Rel: %v", err)
}
// Make child start with a relative program path.
// Alter argv[0] for child to verify getting real path without argv[0].
cmd := &exec.Cmd{
Dir: dir,
Path: fullpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("exec(self) failed: %v", err)
}
outs := string(out)
if !filepath.IsAbs(outs) {
t.Fatalf("Child returned %q, want an absolute path", out)
}
if !sameFile(outs, ep) {
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
}
}
func TestExecutableDelete(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip()
}
fpath, err := Executable()
if err != nil {
t.Fatalf("Executable failed: %v", err)
}
r, w := io.Pipe()
stderrBuff := &bytes.Buffer{}
stdoutBuff := &bytes.Buffer{}
cmd := &exec.Cmd{
Path: fpath,
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
Stdin: r,
Stderr: stderrBuff,
Stdout: stdoutBuff,
}
err = cmd.Start()
if err != nil {
t.Fatalf("exec(self) start failed: %v", err)
}
tempPath := fpath + "_copy"
_ = os.Remove(tempPath)
err = copyFile(tempPath, fpath)
if err != nil {
t.Fatalf("copy file failed: %v", err)
}
err = os.Remove(fpath)
if err != nil {
t.Fatalf("remove running test file failed: %v", err)
}
err = os.Rename(tempPath, fpath)
if err != nil {
t.Fatalf("rename copy to previous name failed: %v", err)
}
w.Write([]byte{0})
w.Close()
err = cmd.Wait()
if err != nil {
t.Fatalf("exec wait failed: %v", err)
}
childPath := stderrBuff.String()
if !filepath.IsAbs(childPath) {
t.Fatalf("Child returned %q, want an absolute path", childPath)
}
if !sameFile(childPath, fpath) {
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
}
}
func sameFile(fn1, fn2 string) bool {
fi1, err := os.Stat(fn1)
if err != nil {
return false
}
fi2, err := os.Stat(fn2)
if err != nil {
return false
}
return os.SameFile(fi1, fi2)
}
func copyFile(dest, src string) error {
df, err := os.Create(dest)
if err != nil {
return err
}
defer df.Close()
sf, err := os.Open(src)
if err != nil {
return err
}
defer sf.Close()
_, err = io.Copy(df, sf)
return err
}
func TestMain(m *testing.M) {
env := os.Getenv(executableEnvVar)
switch env {
case "":
os.Exit(m.Run())
case executableEnvValueMatch:
// First chdir to another path.
dir := "/"
if runtime.GOOS == "windows" {
dir = filepath.VolumeName(".")
}
os.Chdir(dir)
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
case executableEnvValueDelete:
bb := make([]byte, 1)
var err error
n, err := os.Stdin.Read(bb)
if err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
os.Exit(2)
}
if n != 1 {
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
os.Exit(2)
}
if ep, err := Executable(); err != nil {
fmt.Fprint(os.Stderr, "ERROR: ", err)
} else {
fmt.Fprint(os.Stderr, ep)
}
}
os.Exit(0)
}

@ -0,0 +1,183 @@
package equinox
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/equinox-io/equinox/proto"
)
const fakeAppID = "fake_app_id"
var (
fakeBinary = []byte{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
newFakeBinary = []byte{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2}
ts *httptest.Server
key *ecdsa.PrivateKey
sha string
newSHA string
signature string
)
func init() {
shaBytes := sha256.Sum256(fakeBinary)
sha = hex.EncodeToString(shaBytes[:])
newSHABytes := sha256.Sum256(newFakeBinary)
newSHA = hex.EncodeToString(newSHABytes[:])
var err error
key, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
if err != nil {
panic(fmt.Sprintf("Failed to generate ecdsa key: %v", err))
}
sig, err := key.Sign(rand.Reader, newSHABytes[:], nil)
if err != nil {
panic(fmt.Sprintf("Failed to sign new binary: %v", err))
}
signature = hex.EncodeToString(sig)
}
func TestNotAvailable(t *testing.T) {
opts := setup(t, "TestNotAvailable", proto.Response{
Available: false,
})
defer cleanup(opts)
_, err := Check(fakeAppID, opts)
if err != NotAvailableErr {
t.Fatalf("Expected not available error, got: %v", err)
}
}
func TestEndToEnd(t *testing.T) {
opts := setup(t, "TestEndtoEnd", proto.Response{
Available: true,
Release: proto.Release{
Version: "0.1.2.3",
Title: "Release Title",
Description: "Release Description",
CreateDate: time.Now(),
},
Checksum: newSHA,
Signature: signature,
})
defer cleanup(opts)
resp, err := Check(fakeAppID, opts)
if err != nil {
t.Fatalf("Failed check: %v", err)
}
err = resp.Apply()
if err != nil {
t.Fatalf("Failed apply: %v", err)
}
buf, err := ioutil.ReadFile(opts.TargetPath)
if err != nil {
t.Fatalf("Failed to read file: %v", err)
}
if !bytes.Equal(buf, newFakeBinary) {
t.Fatalf("Binary did not update to new expected value. Got %v, expected %v", buf, newFakeBinary)
}
}
func TestInvalidPatch(t *testing.T) {
opts := setup(t, "TestInavlidPatch", proto.Response{
Available: true,
Release: proto.Release{
Version: "0.1.2.3",
Title: "Release Title",
Description: "Release Description",
CreateDate: time.Now(),
},
DownloadURL: "bad-request",
Checksum: newSHA,
Signature: signature,
Patch: proto.PatchBSDiff,
})
defer cleanup(opts)
resp, err := Check(fakeAppID, opts)
if err != nil {
t.Fatalf("Failed check: %v", err)
}
err = resp.Apply()
if err == nil {
t.Fatalf("Apply succeeded")
}
if err.Error() != "error downloading patch: bad-request" {
t.Fatalf("Expected a different error message: %s", err)
}
}
func setup(t *testing.T, name string, resp proto.Response) Options {
checkUserAgent := func(req *http.Request) {
if req.Header.Get("User-Agent") != userAgent {
t.Errorf("Expected user agent to be %s, not %s", userAgent, req.Header.Get("User-Agent"))
}
}
mux := http.NewServeMux()
mux.HandleFunc("/check", func(w http.ResponseWriter, r *http.Request) {
checkUserAgent(r)
var req proto.Request
err := json.NewDecoder(r.Body).Decode(&req)
if err != nil {
t.Fatalf("Failed to decode proto request: %v", err)
}
if resp.Available {
if req.AppID != fakeAppID {
t.Fatalf("Unexpected app ID. Got %v, expected %v", err)
}
if req.CurrentSHA256 != sha {
t.Fatalf("Unexpected request SHA: %v", sha)
}
}
json.NewEncoder(w).Encode(resp)
})
// Keying off the download URL may not be the best idea...
if resp.DownloadURL == "bad-request" {
mux.HandleFunc("/bin", func(w http.ResponseWriter, r *http.Request) {
checkUserAgent(r)
http.Error(w, "bad-request", http.StatusBadRequest)
})
} else {
mux.HandleFunc("/bin", func(w http.ResponseWriter, r *http.Request) {
checkUserAgent(r)
w.Write(newFakeBinary)
})
}
ts = httptest.NewServer(mux)
resp.DownloadURL = ts.URL + "/bin"
var opts Options
opts.CheckURL = ts.URL + "/check"
opts.PublicKey = key.Public()
if name != "" {
opts.TargetPath = name
ioutil.WriteFile(name, fakeBinary, 0644)
}
return opts
}
func cleanup(opts Options) {
if opts.TargetPath != "" {
os.Remove(opts.TargetPath)
}
ts.Close()
}

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
src

@ -0,0 +1,8 @@
language: go
go:
- 1.5.3
- tip
notifications:
email:
- ionathan@gmail.com
- marcosnils@gmail.com

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Jonathan Leibiusky and Marcos Lilljedahl
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -0,0 +1,3 @@
test:
go get -v -d -t ./...
go test -v

@ -0,0 +1,444 @@
[![Build Status](https://img.shields.io/travis/franela/goreq/master.svg)](https://travis-ci.org/franela/goreq)
[![GoDoc](https://godoc.org/github.com/franela/goreq?status.svg)](https://godoc.org/github.com/franela/goreq)
GoReq
=======
Simple and sane HTTP request library for Go language.
**Table of Contents**
- [Why GoReq?](#user-content-why-goreq)
- [How do I install it?](#user-content-how-do-i-install-it)
- [What can I do with it?](#user-content-what-can-i-do-with-it)
- [Making requests with different methods](#user-content-making-requests-with-different-methods)
- [GET](#user-content-get)
- [Tags](#user-content-tags)
- [POST](#user-content-post)
- [Sending payloads in the Body](#user-content-sending-payloads-in-the-body)
- [Specifiying request headers](#user-content-specifiying-request-headers)
- [Sending Cookies](#cookie-support)
- [Setting timeouts](#user-content-setting-timeouts)
- [Using the Response and Error](#user-content-using-the-response-and-error)
- [Receiving JSON](#user-content-receiving-json)
- [Sending/Receiving Compressed Payloads](#user-content-sendingreceiving-compressed-payloads)
- [Using gzip compression:](#user-content-using-gzip-compression)
- [Using deflate compression:](#user-content-using-deflate-compression)
- [Using compressed responses:](#user-content-using-compressed-responses)
- [Proxy](#proxy)
- [Debugging requests](#debug)
- [Getting raw Request & Response](#getting-raw-request--response)
- [TODO:](#user-content-todo)
Why GoReq?
==========
Go has very nice native libraries that allows you to do lots of cool things. But sometimes those libraries are too low level, which means that to do a simple thing, like an HTTP Request, it takes some time. And if you want to do something as simple as adding a timeout to a request, you will end up writing several lines of code.
This is why we think GoReq is useful. Because you can do all your HTTP requests in a very simple and comprehensive way, while enabling you to do more advanced stuff by giving you access to the native API.
How do I install it?
====================
```bash
go get github.com/franela/goreq
```
What can I do with it?
======================
## Making requests with different methods
#### GET
```go
res, err := goreq.Request{ Uri: "http://www.google.com" }.Do()
```
GoReq default method is GET.
You can also set value to GET method easily
```go
type Item struct {
Limit int
Skip int
Fields string
}
item := Item {
Limit: 3,
Skip: 5,
Fields: "Value",
}
res, err := goreq.Request{
Uri: "http://localhost:3000/",
QueryString: item,
}.Do()
```
The sample above will send `http://localhost:3000/?limit=3&skip=5&fields=Value`
Alternatively the `url` tag can be used in struct fields to customize encoding properties
```go
type Item struct {
TheLimit int `url:"the_limit"`
TheSkip string `url:"the_skip,omitempty"`
TheFields string `url:"-"`
}
item := Item {
TheLimit: 3,
TheSkip: "",
TheFields: "Value",
}
res, err := goreq.Request{
Uri: "http://localhost:3000/",
QueryString: item,
}.Do()
```
The sample above will send `http://localhost:3000/?the_limit=3`
QueryString also support url.Values
```go
item := url.Values{}
item.Set("Limit", 3)
item.Add("Field", "somefield")
item.Add("Field", "someotherfield")
res, err := goreq.Request{
Uri: "http://localhost:3000/",
QueryString: item,
}.Do()
```
The sample above will send `http://localhost:3000/?limit=3&field=somefield&field=someotherfield`
### Tags
Struct field `url` tag is mainly used as the request parameter name.
Tags can be comma separated multiple values, 1st value is for naming and rest has special meanings.
- special tag for 1st value
- `-`: value is ignored if set this
- special tag for rest 2nd value
- `omitempty`: zero-value is ignored if set this
- `squash`: the fields of embedded struct is used for parameter
#### Tag Examples
```go
type Place struct {
Country string `url:"country"`
City string `url:"city"`
ZipCode string `url:"zipcode,omitempty"`
}
type Person struct {
Place `url:",squash"`
FirstName string `url:"first_name"`
LastName string `url:"last_name"`
Age string `url:"age,omitempty"`
Password string `url:"-"`
}
johnbull := Person{
Place: Place{ // squash the embedded struct value
Country: "UK",
City: "London",
ZipCode: "SW1",
},
FirstName: "John",
LastName: "Doe",
Age: "35",
Password: "my-secret", // ignored for parameter
}
goreq.Request{
Uri: "http://localhost/",
QueryString: johnbull,
}.Do()
// => `http://localhost/?first_name=John&last_name=Doe&age=35&country=UK&city=London&zip_code=SW1`
// age and zipcode will be ignored because of `omitempty`
// but firstname isn't.
samurai := Person{
Place: Place{ // squash the embedded struct value
Country: "Japan",
City: "Tokyo",
},
LastName: "Yagyu",
}
goreq.Request{
Uri: "http://localhost/",
QueryString: samurai,
}.Do()
// => `http://localhost/?first_name=&last_name=yagyu&country=Japan&city=Tokyo`
```
#### POST
```go
res, err := goreq.Request{ Method: "POST", Uri: "http://www.google.com" }.Do()
```
## Sending payloads in the Body
You can send ```string```, ```Reader``` or ```interface{}``` in the body. The first two will be sent as text. The last one will be marshalled to JSON, if possible.
```go
type Item struct {
Id int
Name string
}
item := Item{ Id: 1111, Name: "foobar" }
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
}.Do()
```
## Specifiying request headers
We think that most of the times the request headers that you use are: ```Host```, ```Content-Type```, ```Accept``` and ```User-Agent```. This is why we decided to make it very easy to set these headers.
```go
res, err := goreq.Request{
Uri: "http://www.google.com",
Host: "foobar.com",
Accept: "application/json",
ContentType: "application/json",
UserAgent: "goreq",
}.Do()
```
But sometimes you need to set other headers. You can still do it.
```go
req := goreq.Request{ Uri: "http://www.google.com" }
req.AddHeader("X-Custom", "somevalue")
req.Do()
```
Alternatively you can use the `WithHeader` function to keep the syntax short
```go
res, err = goreq.Request{ Uri: "http://www.google.com" }.WithHeader("X-Custom", "somevalue").Do()
```
## Cookie support
Cookies can be either set at the request level by sending a [CookieJar](http://golang.org/pkg/net/http/cookiejar/) in the `CookieJar` request field
or you can use goreq's one-liner WithCookie method as shown below
```go
res, err := goreq.Request{
Uri: "http://www.google.com",
}.
WithCookie(&http.Cookie{Name: "c1", Value: "v1"}).
Do()
```
## Setting timeouts
GoReq supports 2 kind of timeouts. A general connection timeout and a request specific one. By default the connection timeout is of 1 second. There is no default for request timeout, which means it will wait forever.
You can change the connection timeout doing:
```go
goreq.SetConnectTimeout(100 * time.Millisecond)
```
And specify the request timeout doing:
```go
res, err := goreq.Request{
Uri: "http://www.google.com",
Timeout: 500 * time.Millisecond,
}.Do()
```
## Using the Response and Error
GoReq will always return 2 values: a ```Response``` and an ```Error```.
If ```Error``` is not ```nil``` it means that an error happened while doing the request and you shouldn't use the ```Response``` in any way.
You can check what happened by getting the error message:
```go
fmt.Println(err.Error())
```
And to make it easy to know if it was a timeout error, you can ask the error or return it:
```go
if serr, ok := err.(*goreq.Error); ok {
if serr.Timeout() {
...
}
}
return err
```
If you don't get an error, you can safely use the ```Response```.
```go
res.Uri // return final URL location of the response (fulfilled after redirect was made)
res.StatusCode // return the status code of the response
res.Body // gives you access to the body
res.Body.ToString() // will return the body as a string
res.Header.Get("Content-Type") // gives you access to all the response headers
```
Remember that you should **always** close `res.Body` if it's not `nil`
## Receiving JSON
GoReq will help you to receive and unmarshal JSON.
```go
type Item struct {
Id int
Name string
}
var item Item
res.Body.FromJsonTo(&item)
```
## Sending/Receiving Compressed Payloads
GoReq supports gzip, deflate and zlib compression of requests' body and transparent decompression of responses provided they have a correct `Content-Encoding` header.
##### Using gzip compression:
```go
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
Compression: goreq.Gzip(),
}.Do()
```
##### Using deflate/zlib compression:
```go
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
Compression: goreq.Deflate(),
}.Do()
```
##### Using compressed responses:
If servers replies a correct and matching `Content-Encoding` header (gzip requires `Content-Encoding: gzip` and deflate `Content-Encoding: deflate`) goreq transparently decompresses the response so the previous example should always work:
```go
type Item struct {
Id int
Name string
}
res, err := goreq.Request{
Method: "POST",
Uri: "http://www.google.com",
Body: item,
Compression: goreq.Gzip(),
}.Do()
var item Item
res.Body.FromJsonTo(&item)
```
If no `Content-Encoding` header is replied by the server GoReq will return the crude response.
## Proxy
If you need to use a proxy for your requests GoReq supports the standard `http_proxy` env variable as well as manually setting the proxy for each request
```go
res, err := goreq.Request{
Method: "GET",
Proxy: "http://myproxy:myproxyport",
Uri: "http://www.google.com",
}.Do()
```
### Proxy basic auth is also supported
```go
res, err := goreq.Request{
Method: "GET",
Proxy: "http://user:pass@myproxy:myproxyport",
Uri: "http://www.google.com",
}.Do()
```
## Debug
If you need to debug your http requests, it can print the http request detail.
```go
res, err := goreq.Request{
Method: "GET",
Uri: "http://www.google.com",
Compression: goreq.Gzip(),
ShowDebug: true,
}.Do()
fmt.Println(res, err)
```
and it will print the log:
```
GET / HTTP/1.1
Host: www.google.com
Accept:
Accept-Encoding: gzip
Content-Encoding: gzip
Content-Type:
```
### Getting raw Request & Response
To get the Request:
```go
req := goreq.Request{
Host: "foobar.com",
}
//req.Request will return a new instance of an http.Request so you can safely use it for something else
request, _ := req.NewRequest()
```
To get the Response:
```go
res, err := goreq.Request{
Method: "GET",
Uri: "http://www.google.com",
Compression: goreq.Gzip(),
ShowDebug: true,
}.Do()
// res.Response will contain the original http.Response structure
fmt.Println(res.Response, err)
```
TODO:
-----
We do have a couple of [issues](https://github.com/franela/goreq/issues) pending we'll be addressing soon. But feel free to
contribute and send us PRs (with tests please :smile:).

@ -0,0 +1,491 @@
package goreq
import (
"bufio"
"bytes"
"compress/gzip"
"compress/zlib"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/http/httputil"
"net/url"
"reflect"
"strings"
"time"
)
type itimeout interface {
Timeout() bool
}
type Request struct {
headers []headerTuple
cookies []*http.Cookie
Method string
Uri string
Body interface{}
QueryString interface{}
Timeout time.Duration
ContentType string
Accept string
Host string
UserAgent string
Insecure bool
MaxRedirects int
RedirectHeaders bool
Proxy string
Compression *compression
BasicAuthUsername string
BasicAuthPassword string
CookieJar http.CookieJar
ShowDebug bool
OnBeforeRequest func(goreq *Request, httpreq *http.Request)
}
type compression struct {
writer func(buffer io.Writer) (io.WriteCloser, error)
reader func(buffer io.Reader) (io.ReadCloser, error)
ContentEncoding string
}
type Response struct {
*http.Response
Uri string
Body *Body
req *http.Request
}
func (r Response) CancelRequest() {
cancelRequest(DefaultTransport, r.req)
}
func cancelRequest(transport interface{}, r *http.Request) {
if tp, ok := transport.(transportRequestCanceler); ok {
tp.CancelRequest(r)
}
}
type headerTuple struct {
name string
value string
}
type Body struct {
reader io.ReadCloser
compressedReader io.ReadCloser
}
type Error struct {
timeout bool
Err error
}
type transportRequestCanceler interface {
CancelRequest(*http.Request)
}
func (e *Error) Timeout() bool {
return e.timeout
}
func (e *Error) Error() string {
return e.Err.Error()
}
func (b *Body) Read(p []byte) (int, error) {
if b.compressedReader != nil {
return b.compressedReader.Read(p)
}
return b.reader.Read(p)
}
func (b *Body) Close() error {
err := b.reader.Close()
if b.compressedReader != nil {
return b.compressedReader.Close()
}
return err
}
func (b *Body) FromJsonTo(o interface{}) error {
return json.NewDecoder(b).Decode(o)
}
func (b *Body) ToString() (string, error) {
body, err := ioutil.ReadAll(b)
if err != nil {
return "", err
}
return string(body), nil
}
func Gzip() *compression {
reader := func(buffer io.Reader) (io.ReadCloser, error) {
return gzip.NewReader(buffer)
}
writer := func(buffer io.Writer) (io.WriteCloser, error) {
return gzip.NewWriter(buffer), nil
}
return &compression{writer: writer, reader: reader, ContentEncoding: "gzip"}
}
func Deflate() *compression {
reader := func(buffer io.Reader) (io.ReadCloser, error) {
return zlib.NewReader(buffer)
}
writer := func(buffer io.Writer) (io.WriteCloser, error) {
return zlib.NewWriter(buffer), nil
}
return &compression{writer: writer, reader: reader, ContentEncoding: "deflate"}
}
func Zlib() *compression {
return Deflate()
}
func paramParse(query interface{}) (string, error) {
switch query.(type) {
case url.Values:
return query.(url.Values).Encode(), nil
case *url.Values:
return query.(*url.Values).Encode(), nil
default:
var v = &url.Values{}
err := paramParseStruct(v, query)
return v.Encode(), err
}
}
func paramParseStruct(v *url.Values, query interface{}) error {
var (
s = reflect.ValueOf(query)
t = reflect.TypeOf(query)
)
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Interface {
s = s.Elem()
t = s.Type()
}
if t.Kind() != reflect.Struct {
return errors.New("Can not parse QueryString.")
}
for i := 0; i < t.NumField(); i++ {
var name string
field := s.Field(i)
typeField := t.Field(i)
if !field.CanInterface() {
continue
}
urlTag := typeField.Tag.Get("url")
if urlTag == "-" {
continue
}
name, opts := parseTag(urlTag)
var omitEmpty, squash bool
omitEmpty = opts.Contains("omitempty")
squash = opts.Contains("squash")
if squash {
err := paramParseStruct(v, field.Interface())
if err != nil {
return err
}
continue
}
if urlTag == "" {
name = strings.ToLower(typeField.Name)
}
if val := fmt.Sprintf("%v", field.Interface()); !(omitEmpty && len(val) == 0) {
v.Add(name, val)
}
}
return nil
}
func prepareRequestBody(b interface{}) (io.Reader, error) {
switch b.(type) {
case string:
// treat is as text
return strings.NewReader(b.(string)), nil
case io.Reader:
// treat is as text
return b.(io.Reader), nil
case []byte:
//treat as byte array
return bytes.NewReader(b.([]byte)), nil
case nil:
return nil, nil
default:
// try to jsonify it
j, err := json.Marshal(b)
if err == nil {
return bytes.NewReader(j), nil
}
return nil, err
}
}
var DefaultDialer = &net.Dialer{Timeout: 1000 * time.Millisecond}
var DefaultTransport http.RoundTripper = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyFromEnvironment}
var DefaultClient = &http.Client{Transport: DefaultTransport}
var proxyTransport http.RoundTripper
var proxyClient *http.Client
func SetConnectTimeout(duration time.Duration) {
DefaultDialer.Timeout = duration
}
func (r *Request) AddHeader(name string, value string) {
if r.headers == nil {
r.headers = []headerTuple{}
}
r.headers = append(r.headers, headerTuple{name: name, value: value})
}
func (r Request) WithHeader(name string, value string) Request {
r.AddHeader(name, value)
return r
}
func (r *Request) AddCookie(c *http.Cookie) {
r.cookies = append(r.cookies, c)
}
func (r Request) WithCookie(c *http.Cookie) Request {
r.AddCookie(c)
return r
}
func (r Request) Do() (*Response, error) {
var client = DefaultClient
var transport = DefaultTransport
var resUri string
var redirectFailed bool
r.Method = valueOrDefault(r.Method, "GET")
// use a client with a cookie jar if necessary. We create a new client not
// to modify the default one.
if r.CookieJar != nil {
client = &http.Client{
Transport: transport,
Jar: r.CookieJar,
}
}
if r.Proxy != "" {
proxyUrl, err := url.Parse(r.Proxy)
if err != nil {
// proxy address is in a wrong format
return nil, &Error{Err: err}
}
//If jar is specified new client needs to be built
if proxyTransport == nil || client.Jar != nil {
proxyTransport = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyURL(proxyUrl)}
proxyClient = &http.Client{Transport: proxyTransport, Jar: client.Jar}
} else if proxyTransport, ok := proxyTransport.(*http.Transport); ok {
proxyTransport.Proxy = http.ProxyURL(proxyUrl)
}
transport = proxyTransport
client = proxyClient
}
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
if len(via) > r.MaxRedirects {
redirectFailed = true
return errors.New("Error redirecting. MaxRedirects reached")
}
resUri = req.URL.String()
//By default Golang will not redirect request headers
// https://code.google.com/p/go/issues/detail?id=4800&q=request%20header
if r.RedirectHeaders {
for key, val := range via[0].Header {
req.Header[key] = val
}
}
return nil
}
if transport, ok := transport.(*http.Transport); ok {
if r.Insecure {
if transport.TLSClientConfig != nil {
transport.TLSClientConfig.InsecureSkipVerify = true
} else {
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
}
} else if transport.TLSClientConfig != nil {
// the default TLS client (when transport.TLSClientConfig==nil) is
// already set to verify, so do nothing in that case
transport.TLSClientConfig.InsecureSkipVerify = false
}
}
req, err := r.NewRequest()
if err != nil {
// we couldn't parse the URL.
return nil, &Error{Err: err}
}
timeout := false
if r.Timeout > 0 {
client.Timeout = r.Timeout
}
if r.ShowDebug {
dump, err := httputil.DumpRequest(req, true)
if err != nil {
log.Println(err)
}
log.Println(string(dump))
}
if r.OnBeforeRequest != nil {
r.OnBeforeRequest(&r, req)
}
res, err := client.Do(req)
if err != nil {
if !timeout {
if t, ok := err.(itimeout); ok {
timeout = t.Timeout()
}
if ue, ok := err.(*url.Error); ok {
if t, ok := ue.Err.(itimeout); ok {
timeout = t.Timeout()
}
}
}
var response *Response
//If redirect fails we still want to return response data
if redirectFailed {
if res != nil {
response = &Response{res, resUri, &Body{reader: res.Body}, req}
} else {
response = &Response{res, resUri, nil, req}
}
}
//If redirect fails and we haven't set a redirect count we shouldn't return an error
if redirectFailed && r.MaxRedirects == 0 {
return response, nil
}
return response, &Error{timeout: timeout, Err: err}
}
if r.Compression != nil && strings.Contains(res.Header.Get("Content-Encoding"), r.Compression.ContentEncoding) {
compressedReader, err := r.Compression.reader(res.Body)
if err != nil {
return nil, &Error{Err: err}
}
return &Response{res, resUri, &Body{reader: res.Body, compressedReader: compressedReader}, req}, nil
}
return &Response{res, resUri, &Body{reader: res.Body}, req}, nil
}
func (r Request) addHeaders(headersMap http.Header) {
if len(r.UserAgent) > 0 {
headersMap.Add("User-Agent", r.UserAgent)
}
if r.Accept != "" {
headersMap.Add("Accept", r.Accept)
}
if r.ContentType != "" {
headersMap.Add("Content-Type", r.ContentType)
}
}
func (r Request) NewRequest() (*http.Request, error) {
b, e := prepareRequestBody(r.Body)
if e != nil {
// there was a problem marshaling the body
return nil, &Error{Err: e}
}
if r.QueryString != nil {
param, e := paramParse(r.QueryString)
if e != nil {
return nil, &Error{Err: e}
}
r.Uri = r.Uri + "?" + param
}
var bodyReader io.Reader
if b != nil && r.Compression != nil {
buffer := bytes.NewBuffer([]byte{})
readBuffer := bufio.NewReader(b)
writer, err := r.Compression.writer(buffer)
if err != nil {
return nil, &Error{Err: err}
}
_, e = readBuffer.WriteTo(writer)
writer.Close()
if e != nil {
return nil, &Error{Err: e}
}
bodyReader = buffer
} else {
bodyReader = b
}
req, err := http.NewRequest(r.Method, r.Uri, bodyReader)
if err != nil {
return nil, err
}
// add headers to the request
req.Host = r.Host
r.addHeaders(req.Header)
if r.Compression != nil {
req.Header.Add("Content-Encoding", r.Compression.ContentEncoding)
req.Header.Add("Accept-Encoding", r.Compression.ContentEncoding)
}
if r.headers != nil {
for _, header := range r.headers {
req.Header.Add(header.name, header.value)
}
}
//use basic auth if required
if r.BasicAuthUsername != "" {
req.SetBasicAuth(r.BasicAuthUsername, r.BasicAuthPassword)
}
for _, c := range r.cookies {
req.AddCookie(c)
}
return req, nil
}
// Return value if nonempty, def otherwise.
func valueOrDefault(value, def string) string {
if value != "" {
return value
}
return def
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,64 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found here: https://github.com/golang/go/blob/master/LICENSE
package goreq
import (
"strings"
"unicode"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}

@ -0,0 +1,136 @@
package rbuf
import (
"bytes"
"fmt"
"io"
"testing"
cv "github.com/smartystreets/goconvey/convey"
)
// new tests just for atomic version
// same set of tests for non-atomic rbuf:
func TestAtomicRingBufReadWrite(t *testing.T) {
b := NewAtomicFixedSizeRingBuf(5)
data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
cv.Convey("Given a AtomicFixedSizeRingBuf of size 5", t, func() {
cv.Convey("Write(), Bytes(), and Read() should put and get bytes", func() {
n, err := b.Write(data[0:5])
cv.So(n, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.readable, cv.ShouldEqual, 5)
if n != 5 {
fmt.Printf("should have been able to write 5 bytes.\n")
}
if err != nil {
panic(err)
}
cv.So(b.Bytes(false), cv.ShouldResemble, data[0:5])
sink := make([]byte, 3)
n, err = b.Read(sink)
cv.So(n, cv.ShouldEqual, 3)
cv.So(b.Bytes(false), cv.ShouldResemble, data[3:5])
cv.So(sink, cv.ShouldResemble, data[0:3])
})
cv.Convey("Write() more than 5 should give back ErrShortWrite", func() {
b.Reset()
cv.So(b.readable, cv.ShouldEqual, 0)
n, err := b.Write(data[0:10])
cv.So(n, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, io.ErrShortWrite)
cv.So(b.readable, cv.ShouldEqual, 5)
if n != 5 {
fmt.Printf("should have been able to write 5 bytes.\n")
}
cv.So(b.Bytes(false), cv.ShouldResemble, data[0:5])
sink := make([]byte, 3)
n, err = b.Read(sink)
cv.So(n, cv.ShouldEqual, 3)
cv.So(b.Bytes(false), cv.ShouldResemble, data[3:5])
cv.So(sink, cv.ShouldResemble, data[0:3])
})
cv.Convey("we should be able to wrap data and then get it back in Bytes(false)", func() {
b.Reset()
n, err := b.Write(data[0:3])
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
sink := make([]byte, 3)
n, err = b.Read(sink) // put b.beg at 3
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.readable, cv.ShouldEqual, 0)
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
cv.So(n, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, nil)
by := b.Bytes(false)
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
})
cv.Convey("AtomicFixedSizeRingBuf::WriteTo() should work with wrapped data", func() {
b.Reset()
n, err := b.Write(data[0:3])
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
sink := make([]byte, 3)
n, err = b.Read(sink) // put b.beg at 3
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.readable, cv.ShouldEqual, 0)
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
var bb bytes.Buffer
m, err := b.WriteTo(&bb)
cv.So(m, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, nil)
by := bb.Bytes()
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
})
cv.Convey("AtomicFixedSizeRingBuf::ReadFrom() should work with wrapped data", func() {
b.Reset()
var bb bytes.Buffer
n, err := b.ReadFrom(&bb)
cv.So(n, cv.ShouldEqual, 0)
cv.So(err, cv.ShouldEqual, nil)
// write 4, then read 4 bytes
m, err := b.Write(data[0:4])
cv.So(m, cv.ShouldEqual, 4)
cv.So(err, cv.ShouldEqual, nil)
sink := make([]byte, 4)
k, err := b.Read(sink) // put b.beg at 4
cv.So(k, cv.ShouldEqual, 4)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.readable, cv.ShouldEqual, 0)
cv.So(b.Beg, cv.ShouldEqual, 4)
bbread := bytes.NewBuffer(data[4:9])
n, err = b.ReadFrom(bbread) // wrap 4 bytes around to the front, 5 bytes total.
by := b.Bytes(false)
cv.So(by, cv.ShouldResemble, data[4:9]) // but still get them back continguous from the ping-pong buffering
})
})
}

@ -17,9 +17,9 @@ type PointerRingBuf struct {
}
// constructor. NewPointerRingBuf will allocate internally
// a slice of size maxViewInBytes.
func NewPointerRingBuf(maxViewInBytes int) *PointerRingBuf {
n := maxViewInBytes
// a slice of size sliceN
func NewPointerRingBuf(sliceN int) *PointerRingBuf {
n := sliceN
r := &PointerRingBuf{
N: n,
Beg: 0,
@ -33,7 +33,7 @@ func NewPointerRingBuf(maxViewInBytes int) *PointerRingBuf {
// TwoContig returns all readable pointers, but in two separate slices,
// to avoid copying. The two slices are from the same buffer, but
// are not contiguous. Either or both may be empty slices.
func (b *PointerRingBuf) TwoContig(makeCopy bool) (first []interface{}, second []interface{}) {
func (b *PointerRingBuf) TwoContig() (first []interface{}, second []interface{}) {
extent := b.Beg + b.Readable
if extent <= b.N {
@ -170,3 +170,67 @@ func (b *PointerRingBuf) Adopt(me []interface{}) {
b.Readable = n
}
}
// Push writes len(p) pointers from p to the ring.
// It returns the number of elements written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
// Push must return a non-nil error if it returns n < len(p).
//
func (b *PointerRingBuf) Push(p []interface{}) (n int, err error) {
for {
if len(p) == 0 {
// nothing (left) to copy in; notice we shorten our
// local copy p (below) as we read from it.
return
}
writeCapacity := b.N - b.Readable
if writeCapacity <= 0 {
// we are all full up already.
return n, io.ErrShortWrite
}
if len(p) > writeCapacity {
err = io.ErrShortWrite
// leave err set and
// keep going, write what we can.
}
writeStart := (b.Beg + b.Readable) % b.N
upperLim := intMin(writeStart+writeCapacity, b.N)
k := copy(b.A[writeStart:upperLim], p)
n += k
b.Readable += k
p = p[k:]
// we can fill from b.A[0:something] from
// p's remainder, so loop
}
}
// PushAndMaybeOverwriteOldestData always consumes the full
// slice p, even if that means blowing away the oldest
// unread pointers in the ring to make room. In reality, only the last
// min(len(p),b.N) bytes of p will end up being written to the ring.
//
// This allows the ring to act as a record of the most recent
// b.N bytes of data -- a kind of temporal LRU cache, so the
// speak. The linux kernel's dmesg ring buffer is similar.
//
func (b *PointerRingBuf) PushAndMaybeOverwriteOldestData(p []interface{}) (n int, err error) {
writeCapacity := b.N - b.Readable
if len(p) > writeCapacity {
b.Advance(len(p) - writeCapacity)
}
startPos := 0
if len(p) > b.N {
startPos = len(p) - b.N
}
n, err = b.Push(p[startPos:])
if err != nil {
return n, err
}
return len(p), nil
}

@ -0,0 +1,72 @@
package rbuf
import (
"fmt"
"testing"
cv "github.com/smartystreets/goconvey/convey"
)
func TestPointerReadWrite(t *testing.T) {
b := NewPointerRingBuf(5)
data := []interface{}{}
for i := 0; i < 10; i++ {
data = append(data, interface{}(i))
}
cv.Convey("PointerRingBuf::PushAndMaybeOverwriteOldestData() should auto advance", t, func() {
b.Reset()
n, err := b.PushAndMaybeOverwriteOldestData(data[:3])
cv.So(err, cv.ShouldEqual, nil)
cv.So(n, cv.ShouldEqual, 3)
cv.So(b.Readable, cv.ShouldEqual, 3)
n, err = b.PushAndMaybeOverwriteOldestData(data[3:5])
cv.So(n, cv.ShouldEqual, 2)
cv.So(b.Readable, cv.ShouldEqual, 5)
check := make([]interface{}, 5)
n, err = b.ReadPtrs(check)
cv.So(n, cv.ShouldEqual, 5)
cv.So(check, cv.ShouldResemble, data[:5])
n, err = b.PushAndMaybeOverwriteOldestData(data[5:10])
cv.So(err, cv.ShouldEqual, nil)
cv.So(n, cv.ShouldEqual, 5)
n, err = b.ReadWithoutAdvance(check)
cv.So(n, cv.ShouldEqual, 5)
cv.So(check, cv.ShouldResemble, data[5:10])
// check TwoConfig
q, r := b.TwoContig()
//p("len q = %v", len(q))
//p("len r = %v", len(r))
found := make([]bool, 10)
for _, iface := range q {
q0 := iface.(int)
found[q0] = true
}
for _, iface := range r {
r0 := iface.(int)
found[r0] = true
}
totTrue := 0
for i := range found {
if found[i] {
totTrue++
}
}
cv.So(totTrue, cv.ShouldEqual, 5)
})
}
func p(format string, a ...interface{}) {
fmt.Printf("\n"+format+"\n", a...)
}

@ -0,0 +1,191 @@
package rbuf
import (
"bytes"
"fmt"
"io"
"testing"
cv "github.com/smartystreets/goconvey/convey"
)
func TestRingBufReadWrite(t *testing.T) {
b := NewFixedSizeRingBuf(5)
data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
cv.Convey("Given a FixedSizeRingBuf of size 5", t, func() {
cv.Convey("Write(), Bytes(), and Read() should put and get bytes", func() {
n, err := b.Write(data[0:5])
cv.So(n, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.Readable, cv.ShouldEqual, 5)
if n != 5 {
fmt.Printf("should have been able to write 5 bytes.\n")
}
if err != nil {
panic(err)
}
cv.So(b.Bytes(), cv.ShouldResemble, data[0:5])
sink := make([]byte, 3)
n, err = b.Read(sink)
cv.So(n, cv.ShouldEqual, 3)
cv.So(b.Bytes(), cv.ShouldResemble, data[3:5])
cv.So(sink, cv.ShouldResemble, data[0:3])
})
cv.Convey("Write() more than 5 should give back ErrShortWrite", func() {
b.Reset()
cv.So(b.Readable, cv.ShouldEqual, 0)
n, err := b.Write(data[0:10])
cv.So(n, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, io.ErrShortWrite)
cv.So(b.Readable, cv.ShouldEqual, 5)
if n != 5 {
fmt.Printf("should have been able to write 5 bytes.\n")
}
cv.So(b.Bytes(), cv.ShouldResemble, data[0:5])
sink := make([]byte, 3)
n, err = b.Read(sink)
cv.So(n, cv.ShouldEqual, 3)
cv.So(b.Bytes(), cv.ShouldResemble, data[3:5])
cv.So(sink, cv.ShouldResemble, data[0:3])
})
cv.Convey("we should be able to wrap data and then get it back in Bytes()", func() {
b.Reset()
n, err := b.Write(data[0:3])
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
sink := make([]byte, 3)
n, err = b.Read(sink) // put b.beg at 3
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.Readable, cv.ShouldEqual, 0)
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
cv.So(n, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, nil)
by := b.Bytes()
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
})
cv.Convey("FixedSizeRingBuf::WriteTo() should work with wrapped data", func() {
b.Reset()
n, err := b.Write(data[0:3])
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
sink := make([]byte, 3)
n, err = b.Read(sink) // put b.beg at 3
cv.So(n, cv.ShouldEqual, 3)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.Readable, cv.ShouldEqual, 0)
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
var bb bytes.Buffer
m, err := b.WriteTo(&bb)
cv.So(m, cv.ShouldEqual, 5)
cv.So(err, cv.ShouldEqual, nil)
by := bb.Bytes()
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
})
cv.Convey("FixedSizeRingBuf::ReadFrom() should work with wrapped data", func() {
b.Reset()
var bb bytes.Buffer
n, err := b.ReadFrom(&bb)
cv.So(n, cv.ShouldEqual, 0)
cv.So(err, cv.ShouldEqual, nil)
// write 4, then read 4 bytes
m, err := b.Write(data[0:4])
cv.So(m, cv.ShouldEqual, 4)
cv.So(err, cv.ShouldEqual, nil)
sink := make([]byte, 4)
k, err := b.Read(sink) // put b.beg at 4
cv.So(k, cv.ShouldEqual, 4)
cv.So(err, cv.ShouldEqual, nil)
cv.So(b.Readable, cv.ShouldEqual, 0)
cv.So(b.Beg, cv.ShouldEqual, 4)
bbread := bytes.NewBuffer(data[4:9])
n, err = b.ReadFrom(bbread) // wrap 4 bytes around to the front, 5 bytes total.
by := b.Bytes()
cv.So(by, cv.ShouldResemble, data[4:9]) // but still get them back continguous from the ping-pong buffering
})
cv.Convey("FixedSizeRingBuf::WriteAndMaybeOverwriteOldestData() should auto advance", func() {
b.Reset()
n, err := b.WriteAndMaybeOverwriteOldestData(data[:5])
cv.So(err, cv.ShouldEqual, nil)
cv.So(n, cv.ShouldEqual, 5)
n, err = b.WriteAndMaybeOverwriteOldestData(data[5:7])
cv.So(n, cv.ShouldEqual, 2)
cv.So(b.Bytes(), cv.ShouldResemble, data[2:7])
n, err = b.WriteAndMaybeOverwriteOldestData(data[0:9])
cv.So(err, cv.ShouldEqual, nil)
cv.So(n, cv.ShouldEqual, 9)
cv.So(b.Bytes(), cv.ShouldResemble, data[4:9])
})
})
}
func TestNextPrev(t *testing.T) {
b := NewFixedSizeRingBuf(6)
data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
cv.Convey("Given a FixedSizeRingBuf of size 6, filled with 4 elements at various begin points, then Nextpos() and Prev() should return the correct positions or <0 if done", t, func() {
k := b.N
for i := 0; i < b.N; i++ {
b.Reset()
b.Beg = i
_, err := b.Write(data[0:k])
panicOn(err)
// cannot go prev to first
cv.So(b.Prevpos(i), cv.ShouldEqual, -1)
// cannot go after last
cv.So(b.Nextpos((i+k-1)%b.N), cv.ShouldEqual, -1)
// in the middle we should be okay
for j := 1; j < k-1; j++ {
r := (i + j) % b.N
prev := b.Prevpos(r)
next := b.Nextpos(r)
cv.So(prev >= 0, cv.ShouldBeTrue)
cv.So(next >= 0, cv.ShouldBeTrue)
if next > r {
cv.So(next, cv.ShouldEqual, r+1)
} else {
cv.So(next, cv.ShouldEqual, 0)
}
if prev < r {
cv.So(prev, cv.ShouldEqual, r-1)
} else {
cv.So(prev, cv.ShouldEqual, b.N-1)
}
}
}
})
}
func panicOn(err error) {
if err != nil {
panic(err)
}
}

@ -42,12 +42,14 @@ The package API for yaml v2 will remain stable as described in [gopkg.in](https:
License
-------
The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
-------
Some more examples can be found in the "examples" folder.
```Go
package main

@ -120,7 +120,6 @@ func (p *parser) parse() *node {
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
}
panic("unreachable")
}
func (p *parser) node(kind int) *node {
@ -191,6 +190,7 @@ type decoder struct {
aliases map[string]bool
mapType reflect.Type
terrors []string
strict bool
}
var (
@ -200,8 +200,8 @@ var (
ifaceType = defaultMapType.Elem()
)
func newDecoder() *decoder {
d := &decoder{mapType: defaultMapType}
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[string]bool)
return d
}
@ -251,7 +251,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
@ -640,6 +640,8 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type()))
}
}
return true

File diff suppressed because it is too large Load Diff

@ -666,7 +666,6 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
return yaml_emitter_set_emitter_error(emitter,
"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
}
return false
}
// Expect ALIAS.
@ -995,7 +994,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
break_space = false
space_break = false
preceeded_by_whitespace = false
preceded_by_whitespace = false
followed_by_whitespace = false
previous_space = false
previous_break = false
@ -1017,7 +1016,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
flow_indicators = true
}
preceeded_by_whitespace = true
preceded_by_whitespace = true
for i, w := 0, 0; i < len(value); i += w {
w = width(value[i])
followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
@ -1048,7 +1047,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
block_indicators = true
}
case '#':
if preceeded_by_whitespace {
if preceded_by_whitespace {
flow_indicators = true
block_indicators = true
}
@ -1089,7 +1088,7 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
}
// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
preceeded_by_whitespace = is_blankz(value, i)
preceded_by_whitespace = is_blankz(value, i)
}
emitter.scalar_data.multiline = line_breaks

@ -0,0 +1,501 @@
package yaml_test
import (
"fmt"
"math"
"strconv"
"strings"
"time"
. "gopkg.in/check.v1"
"gopkg.in/yaml.v2"
"net"
"os"
)
var marshalIntTest = 123
var marshalTests = []struct {
value interface{}
data string
}{
{
nil,
"null\n",
}, {
&struct{}{},
"{}\n",
}, {
map[string]string{"v": "hi"},
"v: hi\n",
}, {
map[string]interface{}{"v": "hi"},
"v: hi\n",
}, {
map[string]string{"v": "true"},
"v: \"true\"\n",
}, {
map[string]string{"v": "false"},
"v: \"false\"\n",
}, {
map[string]interface{}{"v": true},
"v: true\n",
}, {
map[string]interface{}{"v": false},
"v: false\n",
}, {
map[string]interface{}{"v": 10},
"v: 10\n",
}, {
map[string]interface{}{"v": -10},
"v: -10\n",
}, {
map[string]uint{"v": 42},
"v: 42\n",
}, {
map[string]interface{}{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]int64{"v": int64(4294967296)},
"v: 4294967296\n",
}, {
map[string]uint64{"v": 4294967296},
"v: 4294967296\n",
}, {
map[string]interface{}{"v": "10"},
"v: \"10\"\n",
}, {
map[string]interface{}{"v": 0.1},
"v: 0.1\n",
}, {
map[string]interface{}{"v": float64(0.1)},
"v: 0.1\n",
}, {
map[string]interface{}{"v": -0.1},
"v: -0.1\n",
}, {
map[string]interface{}{"v": math.Inf(+1)},
"v: .inf\n",
}, {
map[string]interface{}{"v": math.Inf(-1)},
"v: -.inf\n",
}, {
map[string]interface{}{"v": math.NaN()},
"v: .nan\n",
}, {
map[string]interface{}{"v": nil},
"v: null\n",
}, {
map[string]interface{}{"v": ""},
"v: \"\"\n",
}, {
map[string][]string{"v": []string{"A", "B"}},
"v:\n- A\n- B\n",
}, {
map[string][]string{"v": []string{"A", "B\nC"}},
"v:\n- A\n- |-\n B\n C\n",
}, {
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
}, {
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
"a:\n b: c\n",
}, {
map[string]interface{}{"a": "-"},
"a: '-'\n",
},
// Simple values.
{
&marshalIntTest,
"123\n",
},
// Structures
{
&struct{ Hello string }{"world"},
"hello: world\n",
}, {
&struct {
A struct {
B string
}
}{struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{&struct{ B string }{"c"}},
"a:\n b: c\n",
}, {
&struct {
A *struct {
B string
}
}{},
"a: null\n",
}, {
&struct{ A int }{1},
"a: 1\n",
}, {
&struct{ A []int }{[]int{1, 2}},
"a:\n- 1\n- 2\n",
}, {
&struct {
B int "a"
}{1},
"a: 1\n",
}, {
&struct{ A bool }{true},
"a: true\n",
},
// Conditional flag
{
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{1, 0},
"a: 1\n",
}, {
&struct {
A int "a,omitempty"
B int "b,omitempty"
}{0, 0},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{nil},
"{}\n",
}, {
&struct {
A *struct{ X, y int } "a,omitempty,flow"
}{&struct{ X, y int }{}},
"a: {x: 0}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{1, 2}},
"a: {x: 1}\n",
}, {
&struct {
A struct{ X, y int } "a,omitempty,flow"
}{struct{ X, y int }{0, 1}},
"{}\n",
}, {
&struct {
A float64 "a,omitempty"
B float64 "b,omitempty"
}{1, 0},
"a: 1\n",
},
// Flow flag
{
&struct {
A []int "a,flow"
}{[]int{1, 2}},
"a: [1, 2]\n",
}, {
&struct {
A map[string]string "a,flow"
}{map[string]string{"b": "c", "d": "e"}},
"a: {b: c, d: e}\n",
}, {
&struct {
A struct {
B, D string
} "a,flow"
}{struct{ B, D string }{"c", "e"}},
"a: {b: c, d: e}\n",
},
// Unexported field
{
&struct {
u int
A int
}{0, 1},
"a: 1\n",
},
// Ignored field
{
&struct {
A int
B int "-"
}{1, 2},
"a: 1\n",
},
// Struct inlining
{
&struct {
A int
C inlineB `yaml:",inline"`
}{1, inlineB{2, inlineC{3}}},
"a: 1\nb: 2\nc: 3\n",
},
// Map inlining
{
&struct {
A int
C map[string]int `yaml:",inline"`
}{1, map[string]int{"b": 2, "c": 3}},
"a: 1\nb: 2\nc: 3\n",
},
// Duration
{
map[string]time.Duration{"a": 3 * time.Second},
"a: 3s\n",
},
// Issue #24: bug in map merging logic.
{
map[string]string{"a": "<foo>"},
"a: <foo>\n",
},
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
// with old YAML 1.1 parsers.
{
map[string]string{"a": "1:1"},
"a: \"1:1\"\n",
},
// Binary data.
{
map[string]string{"a": "\x00"},
"a: \"\\0\"\n",
}, {
map[string]string{"a": "\x80\x81\x82"},
"a: !!binary gIGC\n",
}, {
map[string]string{"a": strings.Repeat("\x90", 54)},
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
},
// Ordered maps.
{
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
},
// Encode unicode as utf-8 rather than in escaped form.
{
map[string]string{"a": "你好"},
"a: 你好\n",
},
// Support encoding.TextMarshaler.
{
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
"a: 1.2.3.4\n",
},
{
map[string]time.Time{"a": time.Unix(1424801979, 0)},
"a: 2015-02-24T18:19:39Z\n",
},
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
{
map[string]string{"a": "b: c"},
"a: 'b: c'\n",
},
// Containing hash mark ('#') in string should be quoted
{
map[string]string{"a": "Hello #comment"},
"a: 'Hello #comment'\n",
},
{
map[string]string{"a": "你好 #comment"},
"a: '你好 #comment'\n",
},
}
func (s *S) TestMarshal(c *C) {
defer os.Setenv("TZ", os.Getenv("TZ"))
os.Setenv("TZ", "UTC")
for _, item := range marshalTests {
data, err := yaml.Marshal(item.value)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, item.data)
}
}
var marshalErrorTests = []struct {
value interface{}
error string
panic string
}{{
value: &struct {
B int
inlineB ",inline"
}{1, inlineB{2, inlineC{3}}},
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
}, {
value: &struct {
A int
B map[string]int ",inline"
}{1, map[string]int{"a": 2}},
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
}}
func (s *S) TestMarshalErrors(c *C) {
for _, item := range marshalErrorTests {
if item.panic != "" {
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
} else {
_, err := yaml.Marshal(item.value)
c.Assert(err, ErrorMatches, item.error)
}
}
}
func (s *S) TestMarshalTypeCache(c *C) {
var data []byte
var err error
func() {
type T struct{ A int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
func() {
type T struct{ B int }
data, err = yaml.Marshal(&T{})
c.Assert(err, IsNil)
}()
c.Assert(string(data), Equals, "b: 0\n")
}
var marshalerTests = []struct {
data string
value interface{}
}{
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
{"_: 10\n", 10},
{"_: null\n", nil},
{"_: BAR!\n", "BAR!"},
}
type marshalerType struct {
value interface{}
}
func (o marshalerType) MarshalText() ([]byte, error) {
panic("MarshalText called on type with MarshalYAML")
}
func (o marshalerType) MarshalYAML() (interface{}, error) {
return o.value, nil
}
type marshalerValue struct {
Field marshalerType "_"
}
func (s *S) TestMarshaler(c *C) {
for _, item := range marshalerTests {
obj := &marshalerValue{}
obj.Field.value = item.value
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, string(item.data))
}
}
func (s *S) TestMarshalerWholeDocument(c *C) {
obj := &marshalerType{}
obj.value = map[string]string{"hello": "world!"}
data, err := yaml.Marshal(obj)
c.Assert(err, IsNil)
c.Assert(string(data), Equals, "hello: world!\n")
}
type failingMarshaler struct{}
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
return nil, failingErr
}
func (s *S) TestMarshalerError(c *C) {
_, err := yaml.Marshal(&failingMarshaler{})
c.Assert(err, Equals, failingErr)
}
func (s *S) TestSortedOutput(c *C) {
order := []interface{}{
false,
true,
1,
uint(1),
1.0,
1.1,
1.2,
2,
uint(2),
2.0,
2.1,
"",
".1",
".2",
".a",
"1",
"2",
"a!10",
"a/2",
"a/10",
"a~10",
"ab/1",
"b/1",
"b/01",
"b/2",
"b/02",
"b/3",
"b/03",
"b1",
"b01",
"b3",
"c2.10",
"c10.2",
"d1",
"d12",
"d12a",
}
m := make(map[interface{}]int)
for _, k := range order {
m[k] = 1
}
data, err := yaml.Marshal(m)
c.Assert(err, IsNil)
out := "\n" + string(data)
last := 0
for i, k := range order {
repr := fmt.Sprint(k)
if s, ok := k.(string); ok {
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
repr = `"` + repr + `"`
}
}
index := strings.Index(out, "\n"+repr+":")
if index == -1 {
c.Fatalf("%#v is not in the output: %#v", k, out)
}
if index < last {
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
}
last = index
}
}

@ -0,0 +1,41 @@
package yaml_test
import (
"fmt"
"log"
"gopkg.in/yaml.v2"
)
// An example showing how to unmarshal embedded
// structs from YAML.
type StructA struct {
A string `yaml:"a"`
}
type StructB struct {
// Embedded structs are not treated as embedded in YAML by default. To do that,
// add the ",inline" annotation below
StructA `yaml:",inline"`
B string `yaml:"b"`
}
var data = `
a: a string from struct A
b: a string from struct B
`
func ExampleUnmarshal_embedded() {
var b StructB
err := yaml.Unmarshal([]byte(data), &b)
if err != nil {
log.Fatal("cannot unmarshal data: %v", err)
}
fmt.Println(b.A)
fmt.Println(b.B)
// Output:
// a string from struct A
// a string from struct B
}

@ -166,7 +166,6 @@ func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool
default:
panic("invalid parser state")
}
return false
}
// Parse the production:

@ -3,6 +3,7 @@ package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"unicode/utf8"
@ -80,6 +81,8 @@ func resolvableTag(tag string) bool {
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
@ -135,9 +138,11 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
if err == nil {
return yaml_INT_TAG, uintv
}
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)

@ -9,7 +9,7 @@ import (
// ************
//
// The following notes assume that you are familiar with the YAML specification
// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
// some cases we are less restrictive that it requires.
//
// The process of transforming a YAML stream into a sequence of events is
@ -611,7 +611,7 @@ func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, co
if directive {
context = "while parsing a %TAG directive"
}
return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
}
func trace(args ...interface{}) func() {
@ -1944,7 +1944,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
} else {
// It's either the '!' tag or not really a tag handle. If it's a %TAG
// directive, it's an error. If it's a tag token, it must be a part of URI.
if directive && !(s[0] == '!' && s[1] == 0) {
if directive && string(s) != "!" {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
@ -1959,6 +1959,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
//size_t length = head ? strlen((char *)head) : 0
var s []byte
hasTag := len(head) > 0
// Copy the head if needed.
//
@ -2000,10 +2001,10 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
hasTag = true
}
// Check if the tag is non-empty.
if len(s) == 0 {
if !hasTag {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected tag URI")
return false

@ -0,0 +1,12 @@
package yaml_test
import (
. "gopkg.in/check.v1"
"testing"
)
func Test(t *testing.T) { TestingT(t) }
type S struct{}
var _ = Suite(&S{})

@ -77,8 +77,19 @@ type Marshaler interface {
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members will result in
// an error.
func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder()
d := newDecoder(strict)
p := newParser(in)
defer p.destroy()
node := p.parse()

@ -508,7 +508,7 @@ type yaml_parser_t struct {
problem string // Error description.
// The byte about which the problem occured.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t

@ -0,0 +1,116 @@
package httpauth
import (
"encoding/base64"
"net/http"
"testing"
)
func TestBasicAuthAuthenticateWithFunc(t *testing.T) {
requiredUser := "jqpublic"
requiredPass := "secret.sauce"
r := &http.Request{Method: "GET"}
// Dumb test function
fn := func(u, p string, req *http.Request) bool {
return u == requiredUser && p == requiredPass && req == r
}
// Provide a minimal test implementation.
authOpts := AuthOptions{
Realm: "Restricted",
AuthFunc: fn,
}
b := &basicAuth{opts: authOpts}
if b.authenticate(nil) {
t.Fatal("Should not succeed when http.Request is nil")
}
// Provide auth data, but no Authorization header
if b.authenticate(r) != false {
t.Fatal("No Authorization header supplied.")
}
// Initialise the map for HTTP headers
r.Header = http.Header(make(map[string][]string))
// Set a malformed/bad header
r.Header.Set("Authorization", " Basic")
if b.authenticate(r) != false {
t.Fatal("Malformed Authorization header supplied.")
}
// Test correct credentials
auth := base64.StdEncoding.EncodeToString([]byte("jqpublic:secret.sauce"))
r.Header.Set("Authorization", "Basic "+auth)
if b.authenticate(r) != true {
t.Fatal("Failed on correct credentials")
}
// Test incorrect credentials
auth = base64.StdEncoding.EncodeToString([]byte("jqpublic:hackydoo"))
r.Header.Set("Authorization", "Basic "+auth)
if b.authenticate(r) == true {
t.Fatal("Success when expecting failure")
}
}
func TestBasicAuthAuthenticate(t *testing.T) {
// Provide a minimal test implementation.
authOpts := AuthOptions{
Realm: "Restricted",
User: "test-user",
Password: "plain-text-password",
}
b := &basicAuth{
opts: authOpts,
}
r := &http.Request{Method: "GET"}
// Provide auth data, but no Authorization header
if b.authenticate(r) != false {
t.Fatal("No Authorization header supplied.")
}
// Initialise the map for HTTP headers
r.Header = http.Header(make(map[string][]string))
// Set a malformed/bad header
r.Header.Set("Authorization", " Basic")
if b.authenticate(r) != false {
t.Fatal("Malformed Authorization header supplied.")
}
// Test correct credentials
auth := base64.StdEncoding.EncodeToString([]byte(b.opts.User + ":" + b.opts.Password))
r.Header.Set("Authorization", "Basic "+auth)
if b.authenticate(r) != true {
t.Fatal("Failed on correct credentials")
}
}
func TestBasicAuthAuthenticateWithoutUserAndPass(t *testing.T) {
b := basicAuth{opts: AuthOptions{}}
r := &http.Request{Method: "GET"}
// Provide auth data, but no Authorization header
if b.authenticate(r) != false {
t.Fatal("No Authorization header supplied.")
}
// Initialise the map for HTTP headers
r.Header = http.Header(make(map[string][]string))
// Test correct credentials
auth := base64.StdEncoding.EncodeToString([]byte(b.opts.User + ":" + b.opts.Password))
r.Header.Set("Authorization", "Basic "+auth)
if b.authenticate(r) != false {
t.Fatal("Success when expecting failure")
}
}

@ -0,0 +1,2 @@
/node-syscall/build
/node_modules

@ -1,16 +1,15 @@
License: BSD License
Copyright (c) 2013 Kenny Grant. All rights reserved.
Copyright (c) 2013 Richard Musiol. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* to endorse or promote products derived from this software
without specific prior written permission.
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ -22,4 +21,4 @@ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,140 @@
GopherJS - A compiler from Go to JavaScript
-------------------------------------------
[![Sourcegraph](https://sourcegraph.com/github.com/gopherjs/gopherjs/-/badge.svg)](https://sourcegraph.com/github.com/gopherjs/gopherjs?badge)
[![Circle CI](https://circleci.com/gh/gopherjs/gopherjs.svg?style=svg)](https://circleci.com/gh/gopherjs/gopherjs)
GopherJS compiles Go code ([golang.org](https://golang.org/)) to pure JavaScript code. Its main purpose is to give you the opportunity to write front-end code in Go which will still run in all browsers.
### Playground
Give GopherJS a try on the [GopherJS Playground](http://gopherjs.github.io/playground/).
### What is supported?
Nearly everything, including Goroutines ([compatibility table](https://github.com/gopherjs/gopherjs/blob/master/doc/packages.md)). Performance is quite good in most cases, see [HTML5 game engine benchmark](https://ajhager.github.io/engi/demos/botmark.html). Cgo is not supported. Using a vendored copy of GopherJS is currently not supported, see [#415](https://github.com/gopherjs/gopherjs/issues/415).
### Installation and Usage
Get or update GopherJS and dependencies with:
```
go get -u github.com/gopherjs/gopherjs
```
Now you can use `gopherjs build [package]`, `gopherjs build [files]` or `gopherjs install [package]` which behave similar to the `go` tool. For `main` packages, these commands create a `.js` file and `.js.map` source map in the current directory or in `$GOPATH/bin`. The generated JavaScript file can be used as usual in a website. Use `gopherjs help [command]` to get a list of possible command line flags, e.g. for minification and automatically watching for changes.
*Note: GopherJS will try to write compiled object files of the core packages to your $GOROOT/pkg directory. If that fails, it will fall back to $GOPATH/pkg.*
#### gopherjs run, gopherjs test
If you want to use `gopherjs run` or `gopherjs test` to run the generated code locally, install Node.js 4.x (or newer), and the `source-map-support` module:
```
npm install --global source-map-support
```
For system calls (file system access, etc.), see [this page](https://github.com/gopherjs/gopherjs/blob/master/doc/syscalls.md).
#### gopherjs serve
`gopherjs serve` is a useful command you can use during development. It will start an HTTP server serving on ":8080" by default, and dynamically compile Go packages with GopherJS and serve them.
For example, navigating to `http://localhost:8080/example.com/user/project/` should compile and run the Go package `example.com/user/project`. The generated JavaScript output will be served at `http://localhost:8080/example.com/user/project/project.js`. If the directory contains `index.html` it will be served, otherwise a minimal `index.html` that includes `<script src="{{base}}.js"></script>` will be provided, causing the JavaScript to be executed. All other static files will be served too.
Refreshing in the browser will rebuild the served files if needed. Compilation errors will be displayed in terminal, and in browser console. Additionally, it will serve $GOROOT and $GOPATH for sourcemaps.
If you include an argument, it will be the root from which everything is served. For example, if you run gopherjs serve github.com/user/project then the generated JavaScript for the package github.com/user/project/mypkg will be served at http://localhost:8080/mypkg/mypkg.js.
### Performance Tips
- Use the `-m` command line flag to generate minified code.
- Apply gzip compression (https://en.wikipedia.org/wiki/HTTP_compression).
- Use `int` instead of `(u)int8/16/32/64`.
- Use `float64` instead of `float32`.
### Community
- [#gopherjs Channel on Gophers Slack](https://gophers.slack.com/messages/gopherjs/) (invites to Gophers Slack are available [here](http://blog.gopheracademy.com/gophers-slack-community/#how-can-i-be-invited-to-join:2facdc921b2310f18cb851c36fa92369))
- [Bindings to JavaScript APIs and libraries](https://github.com/gopherjs/gopherjs/wiki/bindings)
- [GopherJS Blog](https://medium.com/gopherjs)
- [GopherJS on Twitter](https://twitter.com/GopherJS)
### Getting started
#### Interacting with the DOM
The package `github.com/gopherjs/gopherjs/js` (see [documentation](https://godoc.org/github.com/gopherjs/gopherjs/js)) provides functions for interacting with native JavaScript APIs. For example the line
```js
document.write("Hello world!");
```
would look like this in Go:
```go
js.Global.Get("document").Call("write", "Hello world!")
```
You may also want use the [DOM bindings](http://dominik.honnef.co/go/js/dom), the [jQuery bindings](https://github.com/gopherjs/jquery) (see [TodoMVC Example](https://github.com/gopherjs/todomvc)) or the [AngularJS bindings](https://github.com/wvell/go-angularjs). Those are some of the [bindings to JavaScript APIs and libraries](https://github.com/gopherjs/gopherjs/wiki/bindings) by community members.
#### Providing library functions for use in other JavaScript code
Set a global variable to a map that contains the functions:
```go
package main
import "github.com/gopherjs/gopherjs/js"
func main() {
js.Global.Set("pet", map[string]interface{}{
"New": New,
})
}
type Pet struct {
name string
}
func New(name string) *js.Object {
return js.MakeWrapper(&Pet{name})
}
func (p *Pet) Name() string {
return p.name
}
func (p *Pet) SetName(name string) {
p.name = name
}
```
For more details see [Jason Stone's blog post](http://legacytotheedge.blogspot.de/2014/03/gopherjs-go-to-javascript-transpiler.html) about GopherJS.
### Architecture
#### General
GopherJS emulates a 32-bit environment. This means that `int`, `uint` and `uintptr` have a precision of 32 bits. However, the explicit 64-bit integer types `int64` and `uint64` are supported. The `GOARCH` value of GopherJS is "js". You may use it as a build constraint: `// +build js`.
#### Application Lifecycle
The `main` function is executed as usual after all `init` functions have run. JavaScript callbacks can also invoke Go functions, even after the `main` function has exited. Therefore the end of the `main` function should not be regarded as the end of the application and does not end the execution of other goroutines.
In the browser, calling `os.Exit` (e.g. indirectly by `log.Fatal`) also does not terminate the execution of the program. For convenience, it calls `runtime.Goexit` to immediately terminate the calling goroutine.
#### Goroutines
Goroutines are fully supported by GopherJS. The only restriction is that you need to start a new goroutine if you want to use blocking code called from external JavaScript:
```go
js.Global.Get("myButton").Call("addEventListener", "click", func() {
go func() {
[...]
someBlockingFunction()
[...]
}()
})
```
How it works:
JavaScript has no concept of concurrency (except web workers, but those are too strictly separated to be used for goroutines). Because of that, instructions in JavaScript are never blocking. A blocking call would effectively freeze the responsiveness of your web page, so calls with callback arguments are used instead.
GopherJS does some heavy lifting to work around this restriction: Whenever an instruction is blocking (e.g. communicating with a channel that isn't ready), the whole stack will unwind (= all functions return) and the goroutine will be put to sleep. Then another goroutine which is ready to resume gets picked and its stack with all local variables will be restored.
### GopherJS Development
If you're looking to make changes to the GopherJS compiler, see [Developer Guidelines](https://github.com/gopherjs/gopherjs/wiki/Developer-Guidelines) for additional developer information.

@ -0,0 +1,779 @@
package build
import (
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/fsnotify/fsnotify"
"github.com/gopherjs/gopherjs/compiler"
"github.com/gopherjs/gopherjs/compiler/natives"
"github.com/neelance/sourcemap"
)
type ImportCError struct {
pkgPath string
}
func (e *ImportCError) Error() string {
return e.pkgPath + `: importing "C" is not supported by GopherJS`
}
func NewBuildContext(installSuffix string, buildTags []string) *build.Context {
return &build.Context{
GOROOT: build.Default.GOROOT,
GOPATH: build.Default.GOPATH,
GOOS: build.Default.GOOS,
GOARCH: "js",
InstallSuffix: installSuffix,
Compiler: "gc",
BuildTags: append(buildTags, "netgo"),
ReleaseTags: build.Default.ReleaseTags,
CgoEnabled: true, // detect `import "C"` to throw proper error
}
}
// Import returns details about the Go package named by the import path. If the
// path is a local import path naming a package that can be imported using
// a standard import path, the returned package will set p.ImportPath to
// that path.
//
// In the directory containing the package, .go and .inc.js files are
// considered part of the package except for:
//
// - .go files in package documentation
// - files starting with _ or . (likely editor temporary files)
// - files with build constraints not satisfied by the context
//
// If an error occurs, Import returns a non-nil error and a nil
// *PackageData.
func Import(path string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) {
wd, err := os.Getwd()
if err != nil {
// Getwd may fail if we're in GOARCH=js mode. That's okay, handle
// it by falling back to empty working directory. It just means
// Import will not be able to resolve relative import paths.
wd = ""
}
return importWithSrcDir(path, wd, mode, installSuffix, buildTags)
}
func importWithSrcDir(path string, srcDir string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) {
buildContext := NewBuildContext(installSuffix, buildTags)
if path == "syscall" { // syscall needs to use a typical GOARCH like amd64 to pick up definitions for _Socklen, BpfInsn, IFNAMSIZ, Timeval, BpfStat, SYS_FCNTL, Flock_t, etc.
buildContext.GOARCH = runtime.GOARCH
buildContext.InstallSuffix = "js"
if installSuffix != "" {
buildContext.InstallSuffix += "_" + installSuffix
}
}
pkg, err := buildContext.Import(path, srcDir, mode)
if err != nil {
return nil, err
}
// TODO: Resolve issue #415 and remove this temporary workaround.
if strings.HasSuffix(pkg.ImportPath, "/vendor/github.com/gopherjs/gopherjs/js") {
return nil, fmt.Errorf("vendoring github.com/gopherjs/gopherjs/js package is not supported, see https://github.com/gopherjs/gopherjs/issues/415")
}
switch path {
case "os":
pkg.GoFiles = stripExecutable(pkg.GoFiles) // Need to strip executable implementation files, because some of them contain package scope variables that perform (indirectly) syscalls on init.
case "runtime":
pkg.GoFiles = []string{"error.go"}
case "runtime/internal/sys":
pkg.GoFiles = []string{fmt.Sprintf("zgoos_%s.go", buildContext.GOOS), "zversion.go"}
case "runtime/pprof":
pkg.GoFiles = nil
case "crypto/rand":
pkg.GoFiles = []string{"rand.go", "util.go"}
case "crypto/x509":
pkg.CgoFiles = nil
}
if len(pkg.CgoFiles) > 0 {
return nil, &ImportCError{path}
}
if pkg.IsCommand() {
pkg.PkgObj = filepath.Join(pkg.BinDir, filepath.Base(pkg.ImportPath)+".js")
}
if _, err := os.Stat(pkg.PkgObj); os.IsNotExist(err) && strings.HasPrefix(pkg.PkgObj, build.Default.GOROOT) {
// fall back to GOPATH
firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0] // TODO: Need to check inside all GOPATH workspaces.
gopathPkgObj := filepath.Join(firstGopathWorkspace, pkg.PkgObj[len(build.Default.GOROOT):])
if _, err := os.Stat(gopathPkgObj); err == nil {
pkg.PkgObj = gopathPkgObj
}
}
jsFiles, err := jsFilesFromDir(pkg.Dir)
if err != nil {
return nil, err
}
return &PackageData{Package: pkg, JSFiles: jsFiles}, nil
}
// stripExecutable strips all executable implementation .go files.
// They have "executable_" prefix.
func stripExecutable(goFiles []string) []string {
var s []string
for _, f := range goFiles {
if strings.HasPrefix(f, "executable_") {
continue
}
s = append(s, f)
}
return s
}
// ImportDir is like Import but processes the Go package found in the named
// directory.
func ImportDir(dir string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) {
pkg, err := NewBuildContext(installSuffix, buildTags).ImportDir(dir, mode)
if err != nil {
return nil, err
}
jsFiles, err := jsFilesFromDir(pkg.Dir)
if err != nil {
return nil, err
}
return &PackageData{Package: pkg, JSFiles: jsFiles}, nil
}
// parseAndAugment parses and returns all .go files of given pkg.
// Standard Go library packages are augmented with files in compiler/natives folder.
// If isTest is true and pkg.ImportPath has no _test suffix, package is built for running internal tests.
// If isTest is true and pkg.ImportPath has _test suffix, package is built for running external tests.
//
// The native packages are augmented by the contents of natives.FS in the following way.
// The file names do not matter except the usual `_test` suffix. The files for
// native overrides get added to the package (even if they have the same name
// as an existing file from the standard library). For all identifiers that exist
// in the original AND the overrides, the original identifier in the AST gets
// replaced by `_`. New identifiers that don't exist in original package get added.
func parseAndAugment(pkg *build.Package, isTest bool, fileSet *token.FileSet) ([]*ast.File, error) {
var files []*ast.File
replacedDeclNames := make(map[string]bool)
funcName := func(d *ast.FuncDecl) string {
if d.Recv == nil || len(d.Recv.List) == 0 {
return d.Name.Name
}
recv := d.Recv.List[0].Type
if star, ok := recv.(*ast.StarExpr); ok {
recv = star.X
}
return recv.(*ast.Ident).Name + "." + d.Name.Name
}
isXTest := strings.HasSuffix(pkg.ImportPath, "_test")
importPath := pkg.ImportPath
if isXTest {
importPath = importPath[:len(importPath)-5]
}
nativesContext := &build.Context{
GOROOT: "/",
GOOS: build.Default.GOOS,
GOARCH: "js",
Compiler: "gc",
JoinPath: path.Join,
SplitPathList: func(list string) []string {
if list == "" {
return nil
}
return strings.Split(list, "/")
},
IsAbsPath: path.IsAbs,
IsDir: func(name string) bool {
dir, err := natives.FS.Open(name)
if err != nil {
return false
}
defer dir.Close()
info, err := dir.Stat()
if err != nil {
return false
}
return info.IsDir()
},
HasSubdir: func(root, name string) (rel string, ok bool) {
panic("not implemented")
},
ReadDir: func(name string) (fi []os.FileInfo, err error) {
dir, err := natives.FS.Open(name)
if err != nil {
return nil, err
}
defer dir.Close()
return dir.Readdir(0)
},
OpenFile: func(name string) (r io.ReadCloser, err error) {
return natives.FS.Open(name)
},
}
if nativesPkg, err := nativesContext.Import(importPath, "", 0); err == nil {
names := nativesPkg.GoFiles
if isTest {
names = append(names, nativesPkg.TestGoFiles...)
}
if isXTest {
names = nativesPkg.XTestGoFiles
}
for _, name := range names {
fullPath := path.Join(nativesPkg.Dir, name)
r, err := nativesContext.OpenFile(fullPath)
if err != nil {
panic(err)
}
file, err := parser.ParseFile(fileSet, fullPath, r, parser.ParseComments)
if err != nil {
panic(err)
}
r.Close()
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.FuncDecl:
replacedDeclNames[funcName(d)] = true
case *ast.GenDecl:
switch d.Tok {
case token.TYPE:
for _, spec := range d.Specs {
replacedDeclNames[spec.(*ast.TypeSpec).Name.Name] = true
}
case token.VAR, token.CONST:
for _, spec := range d.Specs {
for _, name := range spec.(*ast.ValueSpec).Names {
replacedDeclNames[name.Name] = true
}
}
}
}
}
files = append(files, file)
}
}
delete(replacedDeclNames, "init")
var errList compiler.ErrorList
for _, name := range pkg.GoFiles {
if !filepath.IsAbs(name) {
name = filepath.Join(pkg.Dir, name)
}
r, err := os.Open(name)
if err != nil {
return nil, err
}
file, err := parser.ParseFile(fileSet, name, r, parser.ParseComments)
r.Close()
if err != nil {
if list, isList := err.(scanner.ErrorList); isList {
if len(list) > 10 {
list = append(list[:10], &scanner.Error{Pos: list[9].Pos, Msg: "too many errors"})
}
for _, entry := range list {
errList = append(errList, entry)
}
continue
}
errList = append(errList, err)
continue
}
switch pkg.ImportPath {
case "crypto/rand", "encoding/gob", "encoding/json", "expvar", "go/token", "log", "math/big", "math/rand", "regexp", "testing", "time":
for _, spec := range file.Imports {
path, _ := strconv.Unquote(spec.Path.Value)
if path == "sync" {
if spec.Name == nil {
spec.Name = ast.NewIdent("sync")
}
spec.Path.Value = `"github.com/gopherjs/gopherjs/nosync"`
}
}
}
for _, decl := range file.Decls {
switch d := decl.(type) {
case *ast.FuncDecl:
if replacedDeclNames[funcName(d)] {
d.Name = ast.NewIdent("_")
}
case *ast.GenDecl:
switch d.Tok {
case token.TYPE:
for _, spec := range d.Specs {
s := spec.(*ast.TypeSpec)
if replacedDeclNames[s.Name.Name] {
s.Name = ast.NewIdent("_")
}
}
case token.VAR, token.CONST:
for _, spec := range d.Specs {
s := spec.(*ast.ValueSpec)
for i, name := range s.Names {
if replacedDeclNames[name.Name] {
s.Names[i] = ast.NewIdent("_")
}
}
}
}
}
}
files = append(files, file)
}
if errList != nil {
return nil, errList
}
return files, nil
}
type Options struct {
GOROOT string
GOPATH string
Verbose bool
Quiet bool
Watch bool
CreateMapFile bool
MapToLocalDisk bool
Minify bool
Color bool
BuildTags []string
}
func (o *Options) PrintError(format string, a ...interface{}) {
if o.Color {
format = "\x1B[31m" + format + "\x1B[39m"
}
fmt.Fprintf(os.Stderr, format, a...)
}
func (o *Options) PrintSuccess(format string, a ...interface{}) {
if o.Color {
format = "\x1B[32m" + format + "\x1B[39m"
}
fmt.Fprintf(os.Stderr, format, a...)
}
type PackageData struct {
*build.Package
JSFiles []string
IsTest bool // IsTest is true if the package is being built for running tests.
SrcModTime time.Time
UpToDate bool
}
type Session struct {
options *Options
Archives map[string]*compiler.Archive
Types map[string]*types.Package
Watcher *fsnotify.Watcher
}
func NewSession(options *Options) *Session {
if options.GOROOT == "" {
options.GOROOT = build.Default.GOROOT
}
if options.GOPATH == "" {
options.GOPATH = build.Default.GOPATH
}
options.Verbose = options.Verbose || options.Watch
s := &Session{
options: options,
Archives: make(map[string]*compiler.Archive),
}
s.Types = make(map[string]*types.Package)
if options.Watch {
if out, err := exec.Command("ulimit", "-n").Output(); err == nil {
if n, err := strconv.Atoi(strings.TrimSpace(string(out))); err == nil && n < 1024 {
fmt.Printf("Warning: The maximum number of open file descriptors is very low (%d). Change it with 'ulimit -n 8192'.\n", n)
}
}
var err error
s.Watcher, err = fsnotify.NewWatcher()
if err != nil {
panic(err)
}
}
return s
}
func (s *Session) InstallSuffix() string {
if s.options.Minify {
return "min"
}
return ""
}
func (s *Session) BuildDir(packagePath string, importPath string, pkgObj string) error {
if s.Watcher != nil {
s.Watcher.Add(packagePath)
}
buildPkg, err := NewBuildContext(s.InstallSuffix(), s.options.BuildTags).ImportDir(packagePath, 0)
if err != nil {
return err
}
pkg := &PackageData{Package: buildPkg}
jsFiles, err := jsFilesFromDir(pkg.Dir)
if err != nil {
return err
}
pkg.JSFiles = jsFiles
archive, err := s.BuildPackage(pkg)
if err != nil {
return err
}
if pkgObj == "" {
pkgObj = filepath.Base(packagePath) + ".js"
}
if pkg.IsCommand() && !pkg.UpToDate {
if err := s.WriteCommandPackage(archive, pkgObj); err != nil {
return err
}
}
return nil
}
func (s *Session) BuildFiles(filenames []string, pkgObj string, packagePath string) error {
pkg := &PackageData{
Package: &build.Package{
Name: "main",
ImportPath: "main",
Dir: packagePath,
},
}
for _, file := range filenames {
if strings.HasSuffix(file, ".inc.js") {
pkg.JSFiles = append(pkg.JSFiles, file)
continue
}
pkg.GoFiles = append(pkg.GoFiles, file)
}
archive, err := s.BuildPackage(pkg)
if err != nil {
return err
}
if s.Types["main"].Name() != "main" {
return fmt.Errorf("cannot build/run non-main package")
}
return s.WriteCommandPackage(archive, pkgObj)
}
func (s *Session) BuildImportPath(path string) (*compiler.Archive, error) {
_, archive, err := s.buildImportPathWithSrcDir(path, "")
return archive, err
}
func (s *Session) buildImportPathWithSrcDir(path string, srcDir string) (*PackageData, *compiler.Archive, error) {
pkg, err := importWithSrcDir(path, srcDir, 0, s.InstallSuffix(), s.options.BuildTags)
if s.Watcher != nil && pkg != nil { // add watch even on error
s.Watcher.Add(pkg.Dir)
}
if err != nil {
return nil, nil, err
}
archive, err := s.BuildPackage(pkg)
if err != nil {
return nil, nil, err
}
return pkg, archive, nil
}
func (s *Session) BuildPackage(pkg *PackageData) (*compiler.Archive, error) {
if archive, ok := s.Archives[pkg.ImportPath]; ok {
return archive, nil
}
if pkg.PkgObj != "" {
var fileInfo os.FileInfo
gopherjsBinary, err := os.Executable()
if err == nil {
fileInfo, err = os.Stat(gopherjsBinary)
if err == nil {
pkg.SrcModTime = fileInfo.ModTime()
}
}
if err != nil {
os.Stderr.WriteString("Could not get GopherJS binary's modification timestamp. Please report issue.\n")
pkg.SrcModTime = time.Now()
}
for _, importedPkgPath := range pkg.Imports {
// Ignore all imports that aren't mentioned in import specs of pkg.
// For example, this ignores imports such as runtime/internal/sys and runtime/internal/atomic.
ignored := true
for _, pos := range pkg.ImportPos[importedPkgPath] {
importFile := filepath.Base(pos.Filename)
for _, file := range pkg.GoFiles {
if importFile == file {
ignored = false
break
}
}
if !ignored {
break
}
}
if importedPkgPath == "unsafe" || ignored {
continue
}
importedPkg, _, err := s.buildImportPathWithSrcDir(importedPkgPath, pkg.Dir)
if err != nil {
return nil, err
}
impModTime := importedPkg.SrcModTime
if impModTime.After(pkg.SrcModTime) {
pkg.SrcModTime = impModTime
}
}
for _, name := range append(pkg.GoFiles, pkg.JSFiles...) {
fileInfo, err := os.Stat(filepath.Join(pkg.Dir, name))
if err != nil {
return nil, err
}
if fileInfo.ModTime().After(pkg.SrcModTime) {
pkg.SrcModTime = fileInfo.ModTime()
}
}
pkgObjFileInfo, err := os.Stat(pkg.PkgObj)
if err == nil && !pkg.SrcModTime.After(pkgObjFileInfo.ModTime()) {
// package object is up to date, load from disk if library
pkg.UpToDate = true
if pkg.IsCommand() {
return nil, nil
}
objFile, err := os.Open(pkg.PkgObj)
if err != nil {
return nil, err
}
defer objFile.Close()
archive, err := compiler.ReadArchive(pkg.PkgObj, pkg.ImportPath, objFile, s.Types)
if err != nil {
return nil, err
}
s.Archives[pkg.ImportPath] = archive
return archive, err
}
}
fileSet := token.NewFileSet()
files, err := parseAndAugment(pkg.Package, pkg.IsTest, fileSet)
if err != nil {
return nil, err
}
localImportPathCache := make(map[string]*compiler.Archive)
importContext := &compiler.ImportContext{
Packages: s.Types,
Import: func(path string) (*compiler.Archive, error) {
if archive, ok := localImportPathCache[path]; ok {
return archive, nil
}
_, archive, err := s.buildImportPathWithSrcDir(path, pkg.Dir)
if err != nil {
return nil, err
}
localImportPathCache[path] = archive
return archive, nil
},
}
archive, err := compiler.Compile(pkg.ImportPath, files, fileSet, importContext, s.options.Minify)
if err != nil {
return nil, err
}
for _, jsFile := range pkg.JSFiles {
code, err := ioutil.ReadFile(filepath.Join(pkg.Dir, jsFile))
if err != nil {
return nil, err
}
archive.IncJSCode = append(archive.IncJSCode, []byte("\t(function() {\n")...)
archive.IncJSCode = append(archive.IncJSCode, code...)
archive.IncJSCode = append(archive.IncJSCode, []byte("\n\t}).call($global);\n")...)
}
if s.options.Verbose {
fmt.Println(pkg.ImportPath)
}
s.Archives[pkg.ImportPath] = archive
if pkg.PkgObj == "" || pkg.IsCommand() {
return archive, nil
}
if err := s.writeLibraryPackage(archive, pkg.PkgObj); err != nil {
if strings.HasPrefix(pkg.PkgObj, s.options.GOROOT) {
// fall back to first GOPATH workspace
firstGopathWorkspace := filepath.SplitList(s.options.GOPATH)[0]
if err := s.writeLibraryPackage(archive, filepath.Join(firstGopathWorkspace, pkg.PkgObj[len(s.options.GOROOT):])); err != nil {
return nil, err
}
return archive, nil
}
return nil, err
}
return archive, nil
}
func (s *Session) writeLibraryPackage(archive *compiler.Archive, pkgObj string) error {
if err := os.MkdirAll(filepath.Dir(pkgObj), 0777); err != nil {
return err
}
objFile, err := os.Create(pkgObj)
if err != nil {
return err
}
defer objFile.Close()
return compiler.WriteArchive(archive, objFile)
}
func (s *Session) WriteCommandPackage(archive *compiler.Archive, pkgObj string) error {
if err := os.MkdirAll(filepath.Dir(pkgObj), 0777); err != nil {
return err
}
codeFile, err := os.Create(pkgObj)
if err != nil {
return err
}
defer codeFile.Close()
sourceMapFilter := &compiler.SourceMapFilter{Writer: codeFile}
if s.options.CreateMapFile {
m := &sourcemap.Map{File: filepath.Base(pkgObj)}
mapFile, err := os.Create(pkgObj + ".map")
if err != nil {
return err
}
defer func() {
m.WriteTo(mapFile)
mapFile.Close()
fmt.Fprintf(codeFile, "//# sourceMappingURL=%s.map\n", filepath.Base(pkgObj))
}()
sourceMapFilter.MappingCallback = NewMappingCallback(m, s.options.GOROOT, s.options.GOPATH, s.options.MapToLocalDisk)
}
deps, err := compiler.ImportDependencies(archive, func(path string) (*compiler.Archive, error) {
if archive, ok := s.Archives[path]; ok {
return archive, nil
}
_, archive, err := s.buildImportPathWithSrcDir(path, "")
return archive, err
})
if err != nil {
return err
}
return compiler.WriteProgramCode(deps, sourceMapFilter)
}
func NewMappingCallback(m *sourcemap.Map, goroot, gopath string, localMap bool) func(generatedLine, generatedColumn int, originalPos token.Position) {
return func(generatedLine, generatedColumn int, originalPos token.Position) {
if !originalPos.IsValid() {
m.AddMapping(&sourcemap.Mapping{GeneratedLine: generatedLine, GeneratedColumn: generatedColumn})
return
}
file := originalPos.Filename
switch hasGopathPrefix, prefixLen := hasGopathPrefix(file, gopath); {
case localMap:
// no-op: keep file as-is
case hasGopathPrefix:
file = filepath.ToSlash(file[prefixLen+4:])
case strings.HasPrefix(file, goroot):
file = filepath.ToSlash(file[len(goroot)+4:])
default:
file = filepath.Base(file)
}
m.AddMapping(&sourcemap.Mapping{GeneratedLine: generatedLine, GeneratedColumn: generatedColumn, OriginalFile: file, OriginalLine: originalPos.Line, OriginalColumn: originalPos.Column})
}
}
func jsFilesFromDir(dir string) ([]string, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var jsFiles []string
for _, file := range files {
if strings.HasSuffix(file.Name(), ".inc.js") && file.Name()[0] != '_' && file.Name()[0] != '.' {
jsFiles = append(jsFiles, file.Name())
}
}
return jsFiles, nil
}
// hasGopathPrefix returns true and the length of the matched GOPATH workspace,
// iff file has a prefix that matches one of the GOPATH workspaces.
func hasGopathPrefix(file, gopath string) (hasGopathPrefix bool, prefixLen int) {
gopathWorkspaces := filepath.SplitList(gopath)
for _, gopathWorkspace := range gopathWorkspaces {
gopathWorkspace = filepath.Clean(gopathWorkspace)
if strings.HasPrefix(file, gopathWorkspace) {
return true, len(gopathWorkspace)
}
}
return false, 0
}
func (s *Session) WaitForChange() {
s.options.PrintSuccess("watching for changes...\n")
for {
select {
case ev := <-s.Watcher.Events:
if ev.Op&(fsnotify.Create|fsnotify.Write|fsnotify.Remove|fsnotify.Rename) == 0 || filepath.Base(ev.Name)[0] == '.' {
continue
}
if !strings.HasSuffix(ev.Name, ".go") && !strings.HasSuffix(ev.Name, ".inc.js") {
continue
}
s.options.PrintSuccess("change detected: %s\n", ev.Name)
case err := <-s.Watcher.Errors:
s.options.PrintError("watcher error: %s\n", err.Error())
}
break
}
go func() {
for range s.Watcher.Events {
// consume, else Close() may deadlock
}
}()
s.Watcher.Close()
}

@ -0,0 +1,199 @@
package build
import (
"fmt"
gobuild "go/build"
"go/token"
"strconv"
"strings"
"testing"
"github.com/kisielk/gotool"
"github.com/shurcooL/go/importgraphutil"
)
// Natives augment the standard library with GopherJS-specific changes.
// This test ensures that none of the standard library packages are modified
// in a way that adds imports which the original upstream standard library package
// does not already import. Doing that can increase generated output size or cause
// other unexpected issues (since the cmd/go tool does not know about these extra imports),
// so it's best to avoid it.
//
// It checks all standard library packages. Each package is considered as a normal
// package, as a test package, and as an external test package.
func TestNativesDontImportExtraPackages(t *testing.T) {
// Calculate the forward import graph for all standard library packages.
// It's needed for populateImportSet.
stdOnly := gobuild.Default
stdOnly.GOPATH = "" // We only care about standard library, so skip all GOPATH packages.
forward, _, err := importgraphutil.BuildNoTests(&stdOnly)
if err != nil {
t.Fatalf("importgraphutil.BuildNoTests: %v", err)
}
// populateImportSet takes a slice of imports, and populates set with those
// imports, as well as their transitive dependencies. That way, the set can
// be quickly queried to check if a package is in the import graph of imports.
//
// Note, this does not include transitive imports of test/xtest packages,
// which could cause some false positives. It currently doesn't, but if it does,
// then support for that should be added here.
populateImportSet := func(imports []string, set *stringSet) {
for _, p := range imports {
(*set)[p] = struct{}{}
switch p {
case "sync":
(*set)["github.com/gopherjs/gopherjs/nosync"] = struct{}{}
}
transitiveImports := forward.Search(p)
for p := range transitiveImports {
(*set)[p] = struct{}{}
}
}
}
// Check all standard library packages.
//
// The general strategy is to first import each standard library package using the
// normal build.Import, which returns a *build.Package. That contains Imports, TestImports,
// and XTestImports values that are considered the "real imports".
//
// That list of direct imports is then expanded to the transitive closure by populateImportSet,
// meaning all packages that are indirectly imported are also added to the set.
//
// Then, github.com/gopherjs/gopherjs/build.parseAndAugment(*build.Package) returns []*ast.File.
// Those augmented parsed Go files of the package are checked, one file at at time, one import
// at a time. Each import is verified to belong in the set of allowed real imports.
for _, pkg := range gotool.ImportPaths([]string{"std"}) {
// Normal package.
{
// Import the real normal package, and populate its real import set.
bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
if err != nil {
t.Fatalf("gobuild.Import: %v", err)
}
realImports := make(stringSet)
populateImportSet(bpkg.Imports, &realImports)
// Use parseAndAugment to get a list of augmented AST files.
fset := token.NewFileSet()
files, err := parseAndAugment(bpkg, false, fset)
if err != nil {
t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
}
// Verify imports of normal augmented AST files.
for _, f := range files {
fileName := fset.File(f.Pos()).Name()
normalFile := !strings.HasSuffix(fileName, "_test.go")
if !normalFile {
continue
}
for _, imp := range f.Imports {
importPath, err := strconv.Unquote(imp.Path.Value)
if err != nil {
t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
}
if importPath == "github.com/gopherjs/gopherjs/js" {
continue
}
if _, ok := realImports[importPath]; !ok {
t.Errorf("augmented normal package %q imports %q in file %v, but real %q doesn't:\nrealImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realImports)
}
}
}
}
// Test package.
{
// Import the real test package, and populate its real import set.
bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
if err != nil {
t.Fatalf("gobuild.Import: %v", err)
}
realTestImports := make(stringSet)
populateImportSet(bpkg.TestImports, &realTestImports)
// Use parseAndAugment to get a list of augmented AST files.
fset := token.NewFileSet()
files, err := parseAndAugment(bpkg, true, fset)
if err != nil {
t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
}
// Verify imports of test augmented AST files.
for _, f := range files {
fileName, pkgName := fset.File(f.Pos()).Name(), f.Name.String()
testFile := strings.HasSuffix(fileName, "_test.go") && !strings.HasSuffix(pkgName, "_test")
if !testFile {
continue
}
for _, imp := range f.Imports {
importPath, err := strconv.Unquote(imp.Path.Value)
if err != nil {
t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
}
if importPath == "github.com/gopherjs/gopherjs/js" {
continue
}
if _, ok := realTestImports[importPath]; !ok {
t.Errorf("augmented test package %q imports %q in file %v, but real %q doesn't:\nrealTestImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realTestImports)
}
}
}
}
// External test package.
{
// Import the real external test package, and populate its real import set.
bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
if err != nil {
t.Fatalf("gobuild.Import: %v", err)
}
realXTestImports := make(stringSet)
populateImportSet(bpkg.XTestImports, &realXTestImports)
// Add _test suffix to import path to cause parseAndAugment to use external test mode.
bpkg.ImportPath += "_test"
// Use parseAndAugment to get a list of augmented AST files, then check only the external test files.
fset := token.NewFileSet()
files, err := parseAndAugment(bpkg, true, fset)
if err != nil {
t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
}
// Verify imports of external test augmented AST files.
for _, f := range files {
fileName, pkgName := fset.File(f.Pos()).Name(), f.Name.String()
xTestFile := strings.HasSuffix(fileName, "_test.go") && strings.HasSuffix(pkgName, "_test")
if !xTestFile {
continue
}
for _, imp := range f.Imports {
importPath, err := strconv.Unquote(imp.Path.Value)
if err != nil {
t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
}
if importPath == "github.com/gopherjs/gopherjs/js" {
continue
}
if _, ok := realXTestImports[importPath]; !ok {
t.Errorf("augmented external test package %q imports %q in file %v, but real %q doesn't:\nrealXTestImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realXTestImports)
}
}
}
}
}
}
// stringSet is used to print a set of strings in a more readable way.
type stringSet map[string]struct{}
func (m stringSet) String() string {
s := make([]string, 0, len(m))
for v := range m {
s = append(s, v)
}
return fmt.Sprintf("%q", s)
}

@ -0,0 +1,129 @@
machine:
node:
version: 6.2.2
environment:
SOURCE_MAP_SUPPORT: false
dependencies:
pre:
- cd /usr/local && sudo rm -rf go && curl https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz | sudo tar -xz && sudo chmod a+w go/src/path/filepath
post:
- mv ./gopherjs $HOME/bin
- npm install --global node-gyp
- cd node-syscall && node-gyp rebuild && mkdir -p ~/.node_libraries/ && cp build/Release/syscall.node ~/.node_libraries/syscall.node
test:
override:
- diff -u <(echo -n) <(gofmt -d .)
- go tool vet *.go # Go package in root directory.
- for d in */; do echo $d; done | grep -v tests/ | grep -v third_party/ | xargs go tool vet # All subdirectories except "tests", "third_party".
- >
gopherjs test --short --minify
github.com/gopherjs/gopherjs/tests
github.com/gopherjs/gopherjs/tests/main
github.com/gopherjs/gopherjs/js
archive/tar
archive/zip
bufio
bytes
compress/bzip2
compress/flate
compress/gzip
compress/lzw
compress/zlib
container/heap
container/list
container/ring
crypto/aes
crypto/cipher
crypto/des
crypto/dsa
crypto/ecdsa
crypto/elliptic
crypto/hmac
crypto/md5
crypto/rand
crypto/rc4
crypto/rsa
crypto/sha1
crypto/sha256
crypto/sha512
crypto/subtle
crypto/x509
database/sql
database/sql/driver
debug/dwarf
debug/elf
debug/macho
debug/pe
encoding/ascii85
encoding/asn1
encoding/base32
encoding/base64
encoding/binary
encoding/csv
encoding/gob
encoding/hex
encoding/json
encoding/pem
encoding/xml
errors
expvar
flag
fmt
go/ast
go/constant
go/doc
go/format
go/parser
go/printer
go/scanner
go/token
hash/adler32
hash/crc32
hash/crc64
hash/fnv
html
html/template
image
image/color
image/draw
image/gif
image/jpeg
image/png
index/suffixarray
io
io/ioutil
math
math/big
math/cmplx
math/rand
mime
mime/multipart
mime/quotedprintable
net/http/cookiejar
net/http/fcgi
net/mail
net/rpc/jsonrpc
net/textproto
net/url
path
path/filepath
reflect
regexp
regexp/syntax
sort
strconv
strings
sync
sync/atomic
testing/quick
text/scanner
text/tabwriter
text/template
text/template/parse
time
unicode
unicode/utf16
unicode/utf8
- go test -v -race ./...

@ -0,0 +1,43 @@
package analysis
import (
"go/ast"
"go/constant"
"go/token"
"go/types"
)
func BoolValue(expr ast.Expr, info *types.Info) (bool, bool) {
v := info.Types[expr].Value
if v != nil && v.Kind() == constant.Bool {
return constant.BoolVal(v), true
}
switch e := expr.(type) {
case *ast.BinaryExpr:
switch e.Op {
case token.LAND:
if b, ok := BoolValue(e.X, info); ok {
if !b {
return false, true
}
return BoolValue(e.Y, info)
}
case token.LOR:
if b, ok := BoolValue(e.X, info); ok {
if b {
return true, true
}
return BoolValue(e.Y, info)
}
}
case *ast.UnaryExpr:
if e.Op == token.NOT {
if b, ok := BoolValue(e.X, info); ok {
return !b, true
}
}
case *ast.ParenExpr:
return BoolValue(e.X, info)
}
return false, false
}

@ -0,0 +1,32 @@
package analysis
import (
"go/ast"
"go/token"
)
func HasBreak(n ast.Node) bool {
v := hasBreakVisitor{}
ast.Walk(&v, n)
return v.hasBreak
}
type hasBreakVisitor struct {
hasBreak bool
}
func (v *hasBreakVisitor) Visit(node ast.Node) (w ast.Visitor) {
if v.hasBreak {
return nil
}
switch n := node.(type) {
case *ast.BranchStmt:
if n.Tok == token.BREAK && n.Label == nil {
v.hasBreak = true
return nil
}
case *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt, ast.Expr:
return nil
}
return v
}

@ -0,0 +1,70 @@
package analysis
import (
"go/ast"
"go/token"
"go/types"
)
func EscapingObjects(n ast.Node, info *types.Info) []*types.Var {
v := escapeAnalysis{
info: info,
escaping: make(map[*types.Var]bool),
topScope: info.Scopes[n],
bottomScopes: make(map[*types.Scope]bool),
}
ast.Walk(&v, n)
var list []*types.Var
for obj := range v.escaping {
list = append(list, obj)
}
return list
}
type escapeAnalysis struct {
info *types.Info
escaping map[*types.Var]bool
topScope *types.Scope
bottomScopes map[*types.Scope]bool
}
func (v *escapeAnalysis) Visit(node ast.Node) (w ast.Visitor) {
// huge overapproximation
switch n := node.(type) {
case *ast.UnaryExpr:
if n.Op == token.AND {
if _, ok := n.X.(*ast.Ident); ok {
return &escapingObjectCollector{v}
}
}
case *ast.FuncLit:
v.bottomScopes[v.info.Scopes[n.Type]] = true
return &escapingObjectCollector{v}
case *ast.ForStmt:
v.bottomScopes[v.info.Scopes[n.Body]] = true
case *ast.RangeStmt:
v.bottomScopes[v.info.Scopes[n.Body]] = true
}
return v
}
type escapingObjectCollector struct {
analysis *escapeAnalysis
}
func (v *escapingObjectCollector) Visit(node ast.Node) (w ast.Visitor) {
if id, ok := node.(*ast.Ident); ok {
if obj, ok := v.analysis.info.Uses[id].(*types.Var); ok {
for s := obj.Parent(); s != nil; s = s.Parent() {
if s == v.analysis.topScope {
v.analysis.escaping[obj] = true
break
}
if v.analysis.bottomScopes[s] {
break
}
}
}
}
return v
}

@ -0,0 +1,254 @@
package analysis
import (
"go/ast"
"go/token"
"go/types"
"github.com/gopherjs/gopherjs/compiler/astutil"
"github.com/gopherjs/gopherjs/compiler/typesutil"
)
type continueStmt struct {
forStmt *ast.ForStmt
analyzeStack []ast.Node
}
type Info struct {
*types.Info
Pkg *types.Package
IsBlocking func(*types.Func) bool
HasPointer map[*types.Var]bool
FuncDeclInfos map[*types.Func]*FuncInfo
FuncLitInfos map[*ast.FuncLit]*FuncInfo
InitFuncInfo *FuncInfo
allInfos []*FuncInfo
comments ast.CommentMap
}
type FuncInfo struct {
HasDefer bool
Flattened map[ast.Node]bool
Blocking map[ast.Node]bool
GotoLabel map[*types.Label]bool
LocalCalls map[*types.Func][][]ast.Node
ContinueStmts []continueStmt
p *Info
analyzeStack []ast.Node
}
func (info *Info) newFuncInfo() *FuncInfo {
funcInfo := &FuncInfo{
p: info,
Flattened: make(map[ast.Node]bool),
Blocking: make(map[ast.Node]bool),
GotoLabel: make(map[*types.Label]bool),
LocalCalls: make(map[*types.Func][][]ast.Node),
}
info.allInfos = append(info.allInfos, funcInfo)
return funcInfo
}
func AnalyzePkg(files []*ast.File, fileSet *token.FileSet, typesInfo *types.Info, typesPkg *types.Package, isBlocking func(*types.Func) bool) *Info {
info := &Info{
Info: typesInfo,
Pkg: typesPkg,
HasPointer: make(map[*types.Var]bool),
comments: make(ast.CommentMap),
IsBlocking: isBlocking,
FuncDeclInfos: make(map[*types.Func]*FuncInfo),
FuncLitInfos: make(map[*ast.FuncLit]*FuncInfo),
}
info.InitFuncInfo = info.newFuncInfo()
for _, file := range files {
for k, v := range ast.NewCommentMap(fileSet, file, file.Comments) {
info.comments[k] = v
}
ast.Walk(info.InitFuncInfo, file)
}
for {
done := true
for _, funcInfo := range info.allInfos {
for obj, calls := range funcInfo.LocalCalls {
if len(info.FuncDeclInfos[obj].Blocking) != 0 {
for _, call := range calls {
funcInfo.markBlocking(call)
}
delete(funcInfo.LocalCalls, obj)
done = false
}
}
}
if done {
break
}
}
for _, funcInfo := range info.allInfos {
for _, continueStmt := range funcInfo.ContinueStmts {
if funcInfo.Blocking[continueStmt.forStmt.Post] {
funcInfo.markBlocking(continueStmt.analyzeStack)
}
}
}
return info
}
func (c *FuncInfo) Visit(node ast.Node) ast.Visitor {
if node == nil {
if len(c.analyzeStack) != 0 {
c.analyzeStack = c.analyzeStack[:len(c.analyzeStack)-1]
}
return nil
}
c.analyzeStack = append(c.analyzeStack, node)
switch n := node.(type) {
case *ast.FuncDecl:
newInfo := c.p.newFuncInfo()
c.p.FuncDeclInfos[c.p.Defs[n.Name].(*types.Func)] = newInfo
return newInfo
case *ast.FuncLit:
newInfo := c.p.newFuncInfo()
c.p.FuncLitInfos[n] = newInfo
return newInfo
case *ast.BranchStmt:
switch n.Tok {
case token.GOTO:
for _, n2 := range c.analyzeStack {
c.Flattened[n2] = true
}
c.GotoLabel[c.p.Uses[n.Label].(*types.Label)] = true
case token.CONTINUE:
if n.Label != nil {
label := c.p.Uses[n.Label].(*types.Label)
for i := len(c.analyzeStack) - 1; i >= 0; i-- {
if labelStmt, ok := c.analyzeStack[i].(*ast.LabeledStmt); ok && c.p.Defs[labelStmt.Label] == label {
if _, ok := labelStmt.Stmt.(*ast.RangeStmt); ok {
return nil
}
stack := make([]ast.Node, len(c.analyzeStack))
copy(stack, c.analyzeStack)
c.ContinueStmts = append(c.ContinueStmts, continueStmt{labelStmt.Stmt.(*ast.ForStmt), stack})
return nil
}
}
return nil
}
for i := len(c.analyzeStack) - 1; i >= 0; i-- {
if _, ok := c.analyzeStack[i].(*ast.RangeStmt); ok {
return nil
}
if forStmt, ok := c.analyzeStack[i].(*ast.ForStmt); ok {
stack := make([]ast.Node, len(c.analyzeStack))
copy(stack, c.analyzeStack)
c.ContinueStmts = append(c.ContinueStmts, continueStmt{forStmt, stack})
return nil
}
}
}
case *ast.CallExpr:
callTo := func(obj types.Object) {
switch o := obj.(type) {
case *types.Func:
if recv := o.Type().(*types.Signature).Recv(); recv != nil {
if _, ok := recv.Type().Underlying().(*types.Interface); ok {
c.markBlocking(c.analyzeStack)
return
}
}
if o.Pkg() != c.p.Pkg {
if c.p.IsBlocking(o) {
c.markBlocking(c.analyzeStack)
}
return
}
stack := make([]ast.Node, len(c.analyzeStack))
copy(stack, c.analyzeStack)
c.LocalCalls[o] = append(c.LocalCalls[o], stack)
case *types.Var:
c.markBlocking(c.analyzeStack)
}
}
switch f := astutil.RemoveParens(n.Fun).(type) {
case *ast.Ident:
callTo(c.p.Uses[f])
case *ast.SelectorExpr:
if sel := c.p.Selections[f]; sel != nil && typesutil.IsJsObject(sel.Recv()) {
break
}
callTo(c.p.Uses[f.Sel])
case *ast.FuncLit:
ast.Walk(c, n.Fun)
for _, arg := range n.Args {
ast.Walk(c, arg)
}
if len(c.p.FuncLitInfos[f].Blocking) != 0 {
c.markBlocking(c.analyzeStack)
}
return nil
default:
if !astutil.IsTypeExpr(f, c.p.Info) {
c.markBlocking(c.analyzeStack)
}
}
case *ast.SendStmt:
c.markBlocking(c.analyzeStack)
case *ast.UnaryExpr:
switch n.Op {
case token.AND:
if id, ok := astutil.RemoveParens(n.X).(*ast.Ident); ok {
c.p.HasPointer[c.p.Uses[id].(*types.Var)] = true
}
case token.ARROW:
c.markBlocking(c.analyzeStack)
}
case *ast.RangeStmt:
if _, ok := c.p.TypeOf(n.X).Underlying().(*types.Chan); ok {
c.markBlocking(c.analyzeStack)
}
case *ast.SelectStmt:
for _, s := range n.Body.List {
if s.(*ast.CommClause).Comm == nil { // default clause
return c
}
}
c.markBlocking(c.analyzeStack)
case *ast.CommClause:
switch comm := n.Comm.(type) {
case *ast.SendStmt:
ast.Walk(c, comm.Chan)
ast.Walk(c, comm.Value)
case *ast.ExprStmt:
ast.Walk(c, comm.X.(*ast.UnaryExpr).X)
case *ast.AssignStmt:
ast.Walk(c, comm.Rhs[0].(*ast.UnaryExpr).X)
}
for _, s := range n.Body {
ast.Walk(c, s)
}
return nil
case *ast.GoStmt:
ast.Walk(c, n.Call.Fun)
for _, arg := range n.Call.Args {
ast.Walk(c, arg)
}
return nil
case *ast.DeferStmt:
c.HasDefer = true
if funcLit, ok := n.Call.Fun.(*ast.FuncLit); ok {
ast.Walk(c, funcLit.Body)
}
}
return c
}
func (c *FuncInfo) markBlocking(stack []ast.Node) {
for _, n := range stack {
c.Blocking[n] = true
c.Flattened[n] = true
}
}

@ -0,0 +1,37 @@
package analysis
import (
"go/ast"
"go/token"
"go/types"
)
func HasSideEffect(n ast.Node, info *types.Info) bool {
v := hasSideEffectVisitor{info: info}
ast.Walk(&v, n)
return v.hasSideEffect
}
type hasSideEffectVisitor struct {
info *types.Info
hasSideEffect bool
}
func (v *hasSideEffectVisitor) Visit(node ast.Node) (w ast.Visitor) {
if v.hasSideEffect {
return nil
}
switch n := node.(type) {
case *ast.CallExpr:
if _, isSig := v.info.TypeOf(n.Fun).(*types.Signature); isSig { // skip conversions
v.hasSideEffect = true
return nil
}
case *ast.UnaryExpr:
if n.Op == token.ARROW {
v.hasSideEffect = true
return nil
}
}
return v
}

@ -0,0 +1,48 @@
package astutil
import (
"go/ast"
"go/types"
)
func RemoveParens(e ast.Expr) ast.Expr {
for {
p, isParen := e.(*ast.ParenExpr)
if !isParen {
return e
}
e = p.X
}
}
func SetType(info *types.Info, t types.Type, e ast.Expr) ast.Expr {
info.Types[e] = types.TypeAndValue{Type: t}
return e
}
func NewIdent(name string, t types.Type, info *types.Info, pkg *types.Package) *ast.Ident {
ident := ast.NewIdent(name)
info.Types[ident] = types.TypeAndValue{Type: t}
obj := types.NewVar(0, pkg, name, t)
info.Uses[ident] = obj
return ident
}
func IsTypeExpr(expr ast.Expr, info *types.Info) bool {
switch e := expr.(type) {
case *ast.ArrayType, *ast.ChanType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.StructType:
return true
case *ast.StarExpr:
return IsTypeExpr(e.X, info)
case *ast.Ident:
_, ok := info.Uses[e].(*types.TypeName)
return ok
case *ast.SelectorExpr:
_, ok := info.Uses[e.Sel].(*types.TypeName)
return ok
case *ast.ParenExpr:
return IsTypeExpr(e.X, info)
default:
return false
}
}

@ -0,0 +1,293 @@
package compiler
import (
"bytes"
"encoding/binary"
"encoding/gob"
"encoding/json"
"fmt"
"go/token"
"go/types"
"io"
"strings"
"github.com/gopherjs/gopherjs/compiler/prelude"
"github.com/gopherjs/gopherjs/third_party/importer"
)
var sizes32 = &types.StdSizes{WordSize: 4, MaxAlign: 8}
var reservedKeywords = make(map[string]bool)
var _ = ___GOPHERJS_REQUIRES_GO_VERSION_1_8___ // Compile error on other Go versions, because they're not supported.
func init() {
for _, keyword := range []string{"abstract", "arguments", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "eval", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "let", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "undefined", "var", "void", "volatile", "while", "with", "yield"} {
reservedKeywords[keyword] = true
}
}
type ErrorList []error
func (err ErrorList) Error() string {
return err[0].Error()
}
type Archive struct {
ImportPath string
Name string
Imports []string
ExportData []byte
Declarations []*Decl
IncJSCode []byte
FileSet []byte
Minified bool
}
type Decl struct {
FullName string
Vars []string
DeclCode []byte
MethodListCode []byte
TypeInitCode []byte
InitCode []byte
DceObjectFilter string
DceMethodFilter string
DceDeps []string
Blocking bool
}
type Dependency struct {
Pkg string
Type string
Method string
}
func ImportDependencies(archive *Archive, importPkg func(string) (*Archive, error)) ([]*Archive, error) {
var deps []*Archive
paths := make(map[string]bool)
var collectDependencies func(path string) error
collectDependencies = func(path string) error {
if paths[path] {
return nil
}
dep, err := importPkg(path)
if err != nil {
return err
}
for _, imp := range dep.Imports {
if err := collectDependencies(imp); err != nil {
return err
}
}
deps = append(deps, dep)
paths[dep.ImportPath] = true
return nil
}
if err := collectDependencies("runtime"); err != nil {
return nil, err
}
for _, imp := range archive.Imports {
if err := collectDependencies(imp); err != nil {
return nil, err
}
}
deps = append(deps, archive)
return deps, nil
}
type dceInfo struct {
decl *Decl
objectFilter string
methodFilter string
}
func WriteProgramCode(pkgs []*Archive, w *SourceMapFilter) error {
mainPkg := pkgs[len(pkgs)-1]
minify := mainPkg.Minified
byFilter := make(map[string][]*dceInfo)
var pendingDecls []*Decl
for _, pkg := range pkgs {
for _, d := range pkg.Declarations {
if d.DceObjectFilter == "" && d.DceMethodFilter == "" {
pendingDecls = append(pendingDecls, d)
continue
}
info := &dceInfo{decl: d}
if d.DceObjectFilter != "" {
info.objectFilter = pkg.ImportPath + "." + d.DceObjectFilter
byFilter[info.objectFilter] = append(byFilter[info.objectFilter], info)
}
if d.DceMethodFilter != "" {
info.methodFilter = pkg.ImportPath + "." + d.DceMethodFilter
byFilter[info.methodFilter] = append(byFilter[info.methodFilter], info)
}
}
}
dceSelection := make(map[*Decl]struct{})
for len(pendingDecls) != 0 {
d := pendingDecls[len(pendingDecls)-1]
pendingDecls = pendingDecls[:len(pendingDecls)-1]
dceSelection[d] = struct{}{}
for _, dep := range d.DceDeps {
if infos, ok := byFilter[dep]; ok {
delete(byFilter, dep)
for _, info := range infos {
if info.objectFilter == dep {
info.objectFilter = ""
}
if info.methodFilter == dep {
info.methodFilter = ""
}
if info.objectFilter == "" && info.methodFilter == "" {
pendingDecls = append(pendingDecls, info.decl)
}
}
}
}
}
if _, err := w.Write([]byte("\"use strict\";\n(function() {\n\n")); err != nil {
return err
}
if _, err := w.Write(removeWhitespace([]byte(prelude.Prelude), minify)); err != nil {
return err
}
if _, err := w.Write([]byte("\n")); err != nil {
return err
}
// write packages
for _, pkg := range pkgs {
if err := WritePkgCode(pkg, dceSelection, minify, w); err != nil {
return err
}
}
if _, err := w.Write([]byte("$synthesizeMethods();\nvar $mainPkg = $packages[\"" + string(mainPkg.ImportPath) + "\"];\n$packages[\"runtime\"].$init();\n$go($mainPkg.$init, []);\n$flushConsole();\n\n}).call(this);\n")); err != nil {
return err
}
return nil
}
func WritePkgCode(pkg *Archive, dceSelection map[*Decl]struct{}, minify bool, w *SourceMapFilter) error {
if w.MappingCallback != nil && pkg.FileSet != nil {
w.fileSet = token.NewFileSet()
if err := w.fileSet.Read(json.NewDecoder(bytes.NewReader(pkg.FileSet)).Decode); err != nil {
panic(err)
}
}
if _, err := w.Write(pkg.IncJSCode); err != nil {
return err
}
if _, err := w.Write(removeWhitespace([]byte(fmt.Sprintf("$packages[\"%s\"] = (function() {\n", pkg.ImportPath)), minify)); err != nil {
return err
}
vars := []string{"$pkg = {}", "$init"}
var filteredDecls []*Decl
for _, d := range pkg.Declarations {
if _, ok := dceSelection[d]; ok {
vars = append(vars, d.Vars...)
filteredDecls = append(filteredDecls, d)
}
}
if _, err := w.Write(removeWhitespace([]byte(fmt.Sprintf("\tvar %s;\n", strings.Join(vars, ", "))), minify)); err != nil {
return err
}
for _, d := range filteredDecls {
if _, err := w.Write(d.DeclCode); err != nil {
return err
}
}
for _, d := range filteredDecls {
if _, err := w.Write(d.MethodListCode); err != nil {
return err
}
}
for _, d := range filteredDecls {
if _, err := w.Write(d.TypeInitCode); err != nil {
return err
}
}
if _, err := w.Write(removeWhitespace([]byte("\t$init = function() {\n\t\t$pkg.$init = function() {};\n\t\t/* */ var $f, $c = false, $s = 0, $r; if (this !== undefined && this.$blk !== undefined) { $f = this; $c = true; $s = $f.$s; $r = $f.$r; } s: while (true) { switch ($s) { case 0:\n"), minify)); err != nil {
return err
}
for _, d := range filteredDecls {
if _, err := w.Write(d.InitCode); err != nil {
return err
}
}
if _, err := w.Write(removeWhitespace([]byte("\t\t/* */ } return; } if ($f === undefined) { $f = { $blk: $init }; } $f.$s = $s; $f.$r = $r; return $f;\n\t};\n\t$pkg.$init = $init;\n\treturn $pkg;\n})();"), minify)); err != nil {
return err
}
if _, err := w.Write([]byte("\n")); err != nil { // keep this \n even when minified
return err
}
return nil
}
func ReadArchive(filename, path string, r io.Reader, packages map[string]*types.Package) (*Archive, error) {
var a Archive
if err := gob.NewDecoder(r).Decode(&a); err != nil {
return nil, err
}
var err error
_, packages[path], err = importer.ImportData(packages, a.ExportData)
if err != nil {
return nil, err
}
return &a, nil
}
func WriteArchive(a *Archive, w io.Writer) error {
return gob.NewEncoder(w).Encode(a)
}
type SourceMapFilter struct {
Writer io.Writer
MappingCallback func(generatedLine, generatedColumn int, originalPos token.Position)
line int
column int
fileSet *token.FileSet
}
func (f *SourceMapFilter) Write(p []byte) (n int, err error) {
var n2 int
for {
i := bytes.IndexByte(p, '\b')
w := p
if i != -1 {
w = p[:i]
}
n2, err = f.Writer.Write(w)
n += n2
for {
i := bytes.IndexByte(w, '\n')
if i == -1 {
f.column += len(w)
break
}
f.line++
f.column = 0
w = w[i+1:]
}
if err != nil || i == -1 {
return
}
if f.MappingCallback != nil {
f.MappingCallback(f.line+1, f.column, f.fileSet.Position(token.Pos(binary.BigEndian.Uint32(p[i+1:i+5]))))
}
p = p[i+5:]
n += 5
}
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,106 @@
package filter
import (
"go/ast"
"go/token"
"go/types"
"github.com/gopherjs/gopherjs/compiler/astutil"
)
func Assign(stmt ast.Stmt, info *types.Info, pkg *types.Package) ast.Stmt {
if s, ok := stmt.(*ast.AssignStmt); ok && s.Tok != token.ASSIGN && s.Tok != token.DEFINE {
var op token.Token
switch s.Tok {
case token.ADD_ASSIGN:
op = token.ADD
case token.SUB_ASSIGN:
op = token.SUB
case token.MUL_ASSIGN:
op = token.MUL
case token.QUO_ASSIGN:
op = token.QUO
case token.REM_ASSIGN:
op = token.REM
case token.AND_ASSIGN:
op = token.AND
case token.OR_ASSIGN:
op = token.OR
case token.XOR_ASSIGN:
op = token.XOR
case token.SHL_ASSIGN:
op = token.SHL
case token.SHR_ASSIGN:
op = token.SHR
case token.AND_NOT_ASSIGN:
op = token.AND_NOT
default:
panic(s.Tok)
}
var list []ast.Stmt
var viaTmpVars func(expr ast.Expr, name string) ast.Expr
viaTmpVars = func(expr ast.Expr, name string) ast.Expr {
switch e := astutil.RemoveParens(expr).(type) {
case *ast.IndexExpr:
return astutil.SetType(info, info.TypeOf(e), &ast.IndexExpr{
X: viaTmpVars(e.X, "_slice"),
Index: viaTmpVars(e.Index, "_index"),
})
case *ast.SelectorExpr:
sel, ok := info.Selections[e]
if !ok {
// qualified identifier
return e
}
newSel := &ast.SelectorExpr{
X: viaTmpVars(e.X, "_struct"),
Sel: e.Sel,
}
info.Selections[newSel] = sel
return astutil.SetType(info, info.TypeOf(e), newSel)
case *ast.StarExpr:
return astutil.SetType(info, info.TypeOf(e), &ast.StarExpr{
X: viaTmpVars(e.X, "_ptr"),
})
case *ast.Ident, *ast.BasicLit:
return e
default:
tmpVar := astutil.NewIdent(name, info.TypeOf(e), info, pkg)
list = append(list, &ast.AssignStmt{
Lhs: []ast.Expr{tmpVar},
Tok: token.DEFINE,
Rhs: []ast.Expr{e},
})
return tmpVar
}
}
lhs := viaTmpVars(s.Lhs[0], "_val")
list = append(list, &ast.AssignStmt{
Lhs: []ast.Expr{lhs},
Tok: token.ASSIGN,
Rhs: []ast.Expr{
astutil.SetType(info, info.TypeOf(s.Lhs[0]), &ast.BinaryExpr{
X: lhs,
Op: op,
Y: astutil.SetType(info, info.TypeOf(s.Rhs[0]), &ast.ParenExpr{
X: s.Rhs[0],
}),
}),
},
})
return &ast.BlockStmt{
List: list,
}
}
return stmt
}

@ -0,0 +1,39 @@
package filter
import (
"go/ast"
"go/constant"
"go/token"
"go/types"
)
func IncDecStmt(stmt ast.Stmt, info *types.Info) ast.Stmt {
if s, ok := stmt.(*ast.IncDecStmt); ok {
t := info.TypeOf(s.X)
if iExpr, isIExpr := s.X.(*ast.IndexExpr); isIExpr {
switch u := info.TypeOf(iExpr.X).Underlying().(type) {
case *types.Array:
t = u.Elem()
case *types.Slice:
t = u.Elem()
case *types.Map:
t = u.Elem()
}
}
tok := token.ADD_ASSIGN
if s.Tok == token.DEC {
tok = token.SUB_ASSIGN
}
one := &ast.BasicLit{Kind: token.INT}
info.Types[one] = types.TypeAndValue{Type: t, Value: constant.MakeInt64(1)}
return &ast.AssignStmt{
Lhs: []ast.Expr{s.X},
Tok: tok,
Rhs: []ast.Expr{one},
}
}
return stmt
}

@ -0,0 +1,8 @@
// Package natives provides native packages via a virtual filesystem.
//
// See documentation of parseAndAugment in github.com/gopherjs/gopherjs/build
// for explanation of behavior used to augment the native packages using the files
// in src subfolder.
package natives
//go:generate vfsgendev -source="github.com/gopherjs/gopherjs/compiler/natives".FS -tag=gopherjsdev

@ -0,0 +1,29 @@
// +build gopherjsdev
package natives
import (
"go/build"
"log"
"net/http"
"os"
"strings"
"github.com/shurcooL/httpfs/filter"
)
func importPathToDir(importPath string) string {
p, err := build.Import(importPath, "", build.FindOnly)
if err != nil {
log.Fatalln(err)
}
return p.Dir
}
// FS is a virtual filesystem that contains native packages.
var FS = filter.Keep(
http.Dir(importPathToDir("github.com/gopherjs/gopherjs/compiler/natives")),
func(path string, fi os.FileInfo) bool {
return path == "/" || path == "/src" || strings.HasPrefix(path, "/src/")
},
)

File diff suppressed because one or more lines are too long

@ -0,0 +1,43 @@
// +build js
package bytes
func IndexByte(s []byte, c byte) int {
for i, b := range s {
if b == c {
return i
}
}
return -1
}
func Equal(a, b []byte) bool {
if len(a) != len(b) {
return false
}
for i, c := range a {
if c != b[i] {
return false
}
}
return true
}
func Compare(a, b []byte) int {
for i, ca := range a {
if i >= len(b) {
return 1
}
cb := b[i]
if ca < cb {
return -1
}
if ca > cb {
return 1
}
}
if len(a) < len(b) {
return -1
}
return 0
}

@ -0,0 +1,11 @@
// +build js
package bytes_test
import (
"testing"
)
func TestEqualNearPageBoundary(t *testing.T) {
t.Skip()
}

@ -0,0 +1,49 @@
// +build js
package rand
import (
"errors"
"github.com/gopherjs/gopherjs/js"
)
func init() {
Reader = &rngReader{}
}
type rngReader struct{}
func (r *rngReader) Read(b []byte) (n int, err error) {
array := js.InternalObject(b).Get("$array")
offset := js.InternalObject(b).Get("$offset").Int()
// browser
crypto := js.Global.Get("crypto")
if crypto == js.Undefined {
crypto = js.Global.Get("msCrypto")
}
if crypto != js.Undefined {
if crypto.Get("getRandomValues") != js.Undefined {
n = len(b)
if n > 65536 {
// Avoid QuotaExceededError thrown by getRandomValues
// when length is more than 65536, as specified in
// http://www.w3.org/TR/WebCryptoAPI/#Crypto-method-getRandomValues
n = 65536
}
crypto.Call("getRandomValues", array.Call("subarray", offset, offset+n))
return n, nil
}
}
// Node.js
if require := js.Global.Get("require"); require != js.Undefined {
if randomBytes := require.Invoke("crypto").Get("randomBytes"); randomBytes != js.Undefined {
array.Call("set", randomBytes.Invoke(len(b)), offset)
return len(b), nil
}
}
return 0, errors.New("crypto/rand not available in this environment")
}

@ -0,0 +1,14 @@
// +build js
package x509
import "os"
func loadSystemRoots() (*CertPool, error) {
// no system roots
return NewCertPool(), nil
}
func execSecurityRoots() (*CertPool, error) {
return nil, os.ErrNotExist
}

@ -0,0 +1,17 @@
// +build js
package x509
import "testing"
func TestSystemRoots(t *testing.T) {
t.Skip("no system roots")
}
func TestSystemVerify(t *testing.T) {
t.Skip("no system")
}
func TestImports(t *testing.T) {
t.Skip("no system")
}

@ -0,0 +1,32 @@
// +build js
package driver
var valueConverterTests = []valueConverterTest{
{Bool, "true", true, ""},
{Bool, "True", true, ""},
{Bool, []byte("t"), true, ""},
{Bool, true, true, ""},
{Bool, "1", true, ""},
{Bool, 1, true, ""},
{Bool, int64(1), true, ""},
{Bool, uint16(1), true, ""},
{Bool, "false", false, ""},
{Bool, false, false, ""},
{Bool, "0", false, ""},
{Bool, 0, false, ""},
{Bool, int64(0), false, ""},
{Bool, uint16(0), false, ""},
{c: Bool, in: "foo", err: "sql/driver: couldn't convert \"foo\" into type bool"},
{c: Bool, in: 2, err: "sql/driver: couldn't convert 2 into type bool"},
{DefaultParameterConverter, now, now, ""},
{DefaultParameterConverter, (*int64)(nil), nil, ""},
{DefaultParameterConverter, &answer, answer, ""},
{DefaultParameterConverter, &now, now, ""},
//{DefaultParameterConverter, i(9), int64(9), ""}, // TODO: Fix.
{DefaultParameterConverter, f(0.1), float64(0.1), ""},
{DefaultParameterConverter, b(true), true, ""},
//{DefaultParameterConverter, bs{1}, []byte{1}, ""}, // TODO: Fix.
{DefaultParameterConverter, s("a"), "a", ""},
{DefaultParameterConverter, is{1}, nil, "unsupported type driver.is, a slice of int"},
}

@ -0,0 +1,9 @@
// +build js
package elf
import "testing"
func TestNoSectionOverlaps(t *testing.T) {
t.Skip("not 6l")
}

@ -0,0 +1,9 @@
// +build js
package json
import "testing"
func TestHTTPDecoding(t *testing.T) {
t.Skip("network access is not supported by GopherJS")
}

@ -0,0 +1,5 @@
// +build js
package fmt_test
const intCount = 100

@ -0,0 +1,11 @@
// +build js
package token
import (
"testing"
)
func TestFileSetRace(t *testing.T) {
t.Skip()
}

@ -0,0 +1,26 @@
// +build js
package testenv
import (
"runtime"
"strings"
)
// HasExec reports whether the current system can start new processes
// using os.StartProcess or (more commonly) exec.Command.
func HasExec() bool {
switch runtime.GOOS {
case "nacl":
return false
case "darwin":
if strings.HasPrefix(runtime.GOARCH, "arm") {
return false
}
}
switch runtime.GOARCH {
case "js":
return false
}
return true
}

@ -0,0 +1,19 @@
// +build js
package io_test
import (
"testing"
)
func TestMultiWriter_WriteStringSingleAlloc(t *testing.T) {
t.Skip()
}
func TestMultiReaderFlatten(t *testing.T) {
t.Skip()
}
func TestMultiReaderFreesExhaustedReaders(t *testing.T) {
t.Skip("test relies on runtime.SetFinalizer, which GopherJS does not implement")
}

@ -0,0 +1,40 @@
// +build js
package big
func mulWW(x, y Word) (z1, z0 Word) {
return mulWW_g(x, y)
}
func divWW(x1, x0, y Word) (q, r Word) {
return divWW_g(x1, x0, y)
}
func addVV(z, x, y []Word) (c Word) {
return addVV_g(z, x, y)
}
func subVV(z, x, y []Word) (c Word) {
return subVV_g(z, x, y)
}
func addVW(z, x []Word, y Word) (c Word) {
return addVW_g(z, x, y)
}
func subVW(z, x []Word, y Word) (c Word) {
return subVW_g(z, x, y)
}
func shlVU(z, x []Word, s uint) (c Word) {
return shlVU_g(z, x, s)
}
func shrVU(z, x []Word, s uint) (c Word) {
return shrVU_g(z, x, s)
}
func mulAddVWW(z, x []Word, y, r Word) (c Word) {
return mulAddVWW_g(z, x, y, r)
}
func addMulVVW(z, x []Word, y Word) (c Word) {
return addMulVVW_g(z, x, y)
}
func divWVW(z []Word, xn Word, x []Word, y Word) (r Word) {
return divWVW_g(z, xn, x, y)
}
func bitLen(x Word) (n int) {
return bitLen_g(x)
}

@ -0,0 +1,13 @@
// +build js
package big
import "testing"
func TestBytes(t *testing.T) {
t.Skip("broken")
}
func TestModSqrt(t *testing.T) {
t.Skip("slow")
}

@ -0,0 +1,238 @@
// +build js
package math
import (
"github.com/gopherjs/gopherjs/js"
)
var math = js.Global.Get("Math")
var zero float64 = 0
var posInf = 1 / zero
var negInf = -1 / zero
var nan = 0 / zero
func Acos(x float64) float64 {
return math.Call("acos", x).Float()
}
func Asin(x float64) float64 {
return math.Call("asin", x).Float()
}
func Atan(x float64) float64 {
return math.Call("atan", x).Float()
}
func Atan2(y, x float64) float64 {
return math.Call("atan2", y, x).Float()
}
func Ceil(x float64) float64 {
return math.Call("ceil", x).Float()
}
func Copysign(x, y float64) float64 {
if (x < 0 || 1/x == negInf) != (y < 0 || 1/y == negInf) {
return -x
}
return x
}
func Cos(x float64) float64 {
return math.Call("cos", x).Float()
}
func Cosh(x float64) float64 {
return math.Call("cosh", x).Float()
}
func Dim(x, y float64) float64 {
return dim(x, y)
}
func Exp(x float64) float64 {
return math.Call("exp", x).Float()
}
func Exp2(x float64) float64 {
return math.Call("pow", 2, x).Float()
}
func Expm1(x float64) float64 {
return expm1(x)
}
func Floor(x float64) float64 {
return math.Call("floor", x).Float()
}
func Frexp(f float64) (frac float64, exp int) {
return frexp(f)
}
func Hypot(p, q float64) float64 {
return hypot(p, q)
}
func Inf(sign int) float64 {
switch {
case sign >= 0:
return posInf
default:
return negInf
}
}
func IsInf(f float64, sign int) bool {
if f == posInf {
return sign >= 0
}
if f == negInf {
return sign <= 0
}
return false
}
func IsNaN(f float64) (is bool) {
return f != f
}
func Ldexp(frac float64, exp int) float64 {
if frac == 0 {
return frac
}
if exp >= 1024 {
return frac * math.Call("pow", 2, 1023).Float() * math.Call("pow", 2, exp-1023).Float()
}
if exp <= -1024 {
return frac * math.Call("pow", 2, -1023).Float() * math.Call("pow", 2, exp+1023).Float()
}
return frac * math.Call("pow", 2, exp).Float()
}
func Log(x float64) float64 {
if x != x { // workaround for optimizer bug in V8, remove at some point
return nan
}
return math.Call("log", x).Float()
}
func Log10(x float64) float64 {
return log10(x)
}
func Log1p(x float64) float64 {
return log1p(x)
}
func Log2(x float64) float64 {
return log2(x)
}
func Max(x, y float64) float64 {
return max(x, y)
}
func Min(x, y float64) float64 {
return min(x, y)
}
func Mod(x, y float64) float64 {
return js.Global.Call("$mod", x, y).Float()
}
func Modf(f float64) (float64, float64) {
if f == posInf || f == negInf {
return f, nan
}
if 1/f == negInf {
return f, f
}
frac := Mod(f, 1)
return f - frac, frac
}
func NaN() float64 {
return nan
}
func Pow(x, y float64) float64 {
if x == 1 || (x == -1 && (y == posInf || y == negInf)) {
return 1
}
return math.Call("pow", x, y).Float()
}
func Remainder(x, y float64) float64 {
return remainder(x, y)
}
func Signbit(x float64) bool {
return x < 0 || 1/x == negInf
}
func Sin(x float64) float64 {
return math.Call("sin", x).Float()
}
func Sinh(x float64) float64 {
return math.Call("sinh", x).Float()
}
func Sincos(x float64) (sin, cos float64) {
return Sin(x), Cos(x)
}
func Sqrt(x float64) float64 {
return math.Call("sqrt", x).Float()
}
func Tan(x float64) float64 {
return math.Call("tan", x).Float()
}
func Tanh(x float64) float64 {
return math.Call("tanh", x).Float()
}
func Trunc(x float64) float64 {
if x == posInf || x == negInf || x != x || 1/x == negInf {
return x
}
return float64(int(x))
}
var buf struct {
uint32array [2]uint32
float32array [2]float32
float64array [1]float64
}
func init() {
ab := js.Global.Get("ArrayBuffer").New(8)
js.InternalObject(buf).Set("uint32array", js.Global.Get("Uint32Array").New(ab))
js.InternalObject(buf).Set("float32array", js.Global.Get("Float32Array").New(ab))
js.InternalObject(buf).Set("float64array", js.Global.Get("Float64Array").New(ab))
}
func Float32bits(f float32) uint32 {
buf.float32array[0] = f
return buf.uint32array[0]
}
func Float32frombits(b uint32) float32 {
buf.uint32array[0] = b
return buf.float32array[0]
}
func Float64bits(f float64) uint64 {
buf.float64array[0] = f
return uint64(buf.uint32array[1])<<32 + uint64(buf.uint32array[0])
}
func Float64frombits(b uint64) float64 {
buf.uint32array[0] = uint32(b)
buf.uint32array[1] = uint32(b >> 32)
return buf.float64array[0]
}

@ -0,0 +1,12 @@
// +build js
package math_test
// Slighly higher tolerances than upstream, otherwise TestGamma fails.
// TODO: Is there a better way to fix TestGamma? It's weird that only one test
// requires increasing tolerances. Perhaps there's a better fix? Maybe we
// should override TestGamma specifically and not the package-wide tolerances,
// because this will cause many other tests to be less accurate. Or maybe this
// is fine?
func close(a, b float64) bool { return tolerance(a, b, 4e-14) }
func veryclose(a, b float64) bool { return tolerance(a, b, 6e-15) }

@ -0,0 +1,13 @@
// +build js
package rand
import "testing"
func TestFloat32(t *testing.T) {
t.Skip("slow")
}
func TestConcurrent(t *testing.T) {
t.Skip("using nosync")
}

@ -0,0 +1,14 @@
// +build js
package cookiejar_test
import "fmt"
func ExampleNew() {
// network access not supported by GopherJS, and this test depends on httptest.NewServer
fmt.Println(`After 1st request:
Flavor: Chocolate Chip
After 2nd request:
Flavor: Oatmeal Raisin`)
}

@ -0,0 +1,134 @@
// +build js
package http
import (
"errors"
"fmt"
"io"
"io/ioutil"
"strconv"
"github.com/gopherjs/gopherjs/js"
)
// streamReader implements an io.ReadCloser wrapper for ReadableStream of https://fetch.spec.whatwg.org/.
type streamReader struct {
pending []byte
stream *js.Object
}
func (r *streamReader) Read(p []byte) (n int, err error) {
if len(r.pending) == 0 {
var (
bCh = make(chan []byte)
errCh = make(chan error)
)
r.stream.Call("read").Call("then",
func(result *js.Object) {
if result.Get("done").Bool() {
errCh <- io.EOF
return
}
bCh <- result.Get("value").Interface().([]byte)
},
func(reason *js.Object) {
// Assumes it's a DOMException.
errCh <- errors.New(reason.Get("message").String())
},
)
select {
case b := <-bCh:
r.pending = b
case err := <-errCh:
return 0, err
}
}
n = copy(p, r.pending)
r.pending = r.pending[n:]
return n, nil
}
func (r *streamReader) Close() error {
// This ignores any error returned from cancel method. So far, I did not encounter any concrete
// situation where reporting the error is meaningful. Most users ignore error from resp.Body.Close().
// If there's a need to report error here, it can be implemented and tested when that need comes up.
r.stream.Call("cancel")
return nil
}
// fetchTransport is a RoundTripper that is implemented using Fetch API. It supports streaming
// response bodies.
type fetchTransport struct{}
func (t *fetchTransport) RoundTrip(req *Request) (*Response, error) {
headers := js.Global.Get("Headers").New()
for key, values := range req.Header {
for _, value := range values {
headers.Call("append", key, value)
}
}
opt := map[string]interface{}{
"method": req.Method,
"headers": headers,
"credentials": "same-origin",
}
if req.Body != nil {
// TODO: Find out if request body can be streamed into the fetch request rather than in advance here.
// See BufferSource at https://fetch.spec.whatwg.org/#body-mixin.
body, err := ioutil.ReadAll(req.Body)
if err != nil {
req.Body.Close() // RoundTrip must always close the body, including on errors.
return nil, err
}
req.Body.Close()
opt["body"] = body
}
respPromise := js.Global.Call("fetch", req.URL.String(), opt)
var (
respCh = make(chan *Response)
errCh = make(chan error)
)
respPromise.Call("then",
func(result *js.Object) {
header := Header{}
result.Get("headers").Call("forEach", func(value, key *js.Object) {
ck := CanonicalHeaderKey(key.String())
header[ck] = append(header[ck], value.String())
})
contentLength := int64(-1)
if cl, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64); err == nil {
contentLength = cl
}
select {
case respCh <- &Response{
Status: result.Get("status").String() + " " + StatusText(result.Get("status").Int()),
StatusCode: result.Get("status").Int(),
Header: header,
ContentLength: contentLength,
Body: &streamReader{stream: result.Get("body").Call("getReader")},
Request: req,
}:
case <-req.Context().Done():
}
},
func(reason *js.Object) {
select {
case errCh <- fmt.Errorf("net/http: fetch() failed: %s", reason.String()):
case <-req.Context().Done():
}
},
)
select {
case <-req.Context().Done():
// TODO: Abort request if possible using Fetch API.
return nil, errors.New("net/http: request canceled")
case resp := <-respCh:
return resp, nil
case err := <-errCh:
return nil, err
}
}

@ -0,0 +1,113 @@
// +build js
package http
import (
"bufio"
"bytes"
"errors"
"io/ioutil"
"net/textproto"
"strconv"
"github.com/gopherjs/gopherjs/js"
)
var DefaultTransport = func() RoundTripper {
switch {
case js.Global.Get("fetch") != js.Undefined && js.Global.Get("ReadableStream") != js.Undefined: // ReadableStream is used as a check for support of streaming response bodies, see https://fetch.spec.whatwg.org/#streams.
return &fetchTransport{}
case js.Global.Get("XMLHttpRequest") != js.Undefined:
return &XHRTransport{}
default:
return noTransport{}
}
}()
// noTransport is used when neither Fetch API nor XMLHttpRequest API are available. It always fails.
type noTransport struct{}
func (noTransport) RoundTrip(req *Request) (*Response, error) {
return nil, errors.New("net/http: neither of Fetch nor XMLHttpRequest APIs is available")
}
type XHRTransport struct {
inflight map[*Request]*js.Object
}
func (t *XHRTransport) RoundTrip(req *Request) (*Response, error) {
xhr := js.Global.Get("XMLHttpRequest").New()
if t.inflight == nil {
t.inflight = map[*Request]*js.Object{}
}
t.inflight[req] = xhr
defer delete(t.inflight, req)
respCh := make(chan *Response)
errCh := make(chan error)
xhr.Set("onload", func() {
header, _ := textproto.NewReader(bufio.NewReader(bytes.NewReader([]byte(xhr.Call("getAllResponseHeaders").String() + "\n")))).ReadMIMEHeader()
body := js.Global.Get("Uint8Array").New(xhr.Get("response")).Interface().([]byte)
contentLength := int64(-1)
switch req.Method {
case "HEAD":
if l, err := strconv.ParseInt(header.Get("Content-Length"), 10, 64); err == nil {
contentLength = l
}
default:
contentLength = int64(len(body))
}
respCh <- &Response{
Status: xhr.Get("status").String() + " " + xhr.Get("statusText").String(),
StatusCode: xhr.Get("status").Int(),
Header: Header(header),
ContentLength: contentLength,
Body: ioutil.NopCloser(bytes.NewReader(body)),
Request: req,
}
})
xhr.Set("onerror", func(e *js.Object) {
errCh <- errors.New("net/http: XMLHttpRequest failed")
})
xhr.Set("onabort", func(e *js.Object) {
errCh <- errors.New("net/http: request canceled")
})
xhr.Call("open", req.Method, req.URL.String())
xhr.Set("responseType", "arraybuffer") // has to be after "open" until https://bugzilla.mozilla.org/show_bug.cgi?id=1110761 is resolved
for key, values := range req.Header {
for _, value := range values {
xhr.Call("setRequestHeader", key, value)
}
}
if req.Body == nil {
xhr.Call("send")
} else {
body, err := ioutil.ReadAll(req.Body)
if err != nil {
req.Body.Close() // RoundTrip must always close the body, including on errors.
return nil, err
}
req.Body.Close()
xhr.Call("send", body)
}
select {
case resp := <-respCh:
return resp, nil
case err := <-errCh:
return nil, err
}
}
func (t *XHRTransport) CancelRequest(req *Request) {
if xhr, ok := t.inflight[req]; ok {
xhr.Call("abort")
}
}

@ -0,0 +1,41 @@
// +build js
package net
import (
"errors"
"syscall"
"github.com/gopherjs/gopherjs/js"
)
func byteIndex(s string, c byte) int {
return js.InternalObject(s).Call("indexOf", js.Global.Get("String").Call("fromCharCode", c)).Int()
}
func Listen(net, laddr string) (Listener, error) {
panic(errors.New("network access is not supported by GopherJS"))
}
func (d *Dialer) Dial(network, address string) (Conn, error) {
panic(errors.New("network access is not supported by GopherJS"))
}
func sysInit() {
}
func probeIPv4Stack() bool {
return false
}
func probeIPv6Stack() (supportsIPv6, supportsIPv4map bool) {
return false, false
}
func probeWindowsIPStack() (supportsVistaIP bool) {
return false
}
func maxListenerBacklog() int {
return syscall.SOMAXCONN
}

@ -0,0 +1,32 @@
// +build js
package os
import (
"errors"
"github.com/gopherjs/gopherjs/js"
)
func runtime_args() []string { // not called on Windows
return Args
}
func init() {
if process := js.Global.Get("process"); process != js.Undefined {
argv := process.Get("argv")
Args = make([]string, argv.Length()-1)
for i := 0; i < argv.Length()-1; i++ {
Args[i] = argv.Index(i + 1).String()
}
}
if len(Args) == 0 {
Args = []string{"?"}
}
}
func runtime_beforeExit() {}
func executable() (string, error) {
return "", errors.New("Executable not implemented for GOARCH=js")
}

@ -0,0 +1,14 @@
// +build js
package reflect_test
import "fmt"
func ExampleStructOf() {
// GopherJS does not implement reflect.addReflectOff needed for this test.
// See https://github.com/gopherjs/gopherjs/issues/499
fmt.Println(`value: &{Height:0.4 Age:2}
json: {"height":0.4,"age":2}
value: &{Height:1.5 Age:10}`)
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,154 @@
// +build js
package reflect_test
import (
"math"
"reflect"
"testing"
)
func TestAlignment(t *testing.T) {
t.Skip()
}
func TestSliceOverflow(t *testing.T) {
t.Skip()
}
func TestFuncLayout(t *testing.T) {
t.Skip()
}
func TestArrayOfDirectIface(t *testing.T) {
t.Skip()
}
func TestTypelinksSorted(t *testing.T) {
t.Skip()
}
func TestGCBits(t *testing.T) {
t.Skip()
}
func TestChanAlloc(t *testing.T) {
t.Skip()
}
func TestNameBytesAreAligned(t *testing.T) {
t.Skip()
}
func TestOffsetLock(t *testing.T) {
t.Skip()
}
func TestSelectOnInvalid(t *testing.T) {
reflect.Select([]reflect.SelectCase{
{
Dir: reflect.SelectRecv,
Chan: reflect.Value{},
}, {
Dir: reflect.SelectSend,
Chan: reflect.Value{},
Send: reflect.ValueOf(1),
}, {
Dir: reflect.SelectDefault,
},
})
}
func TestStructOf(t *testing.T) {
t.Skip("StructOf")
}
func TestStructOfExportRules(t *testing.T) {
t.Skip("StructOf")
}
func TestStructOfGC(t *testing.T) {
t.Skip("StructOf")
}
func TestStructOfAlg(t *testing.T) {
t.Skip("StructOf")
}
func TestStructOfGenericAlg(t *testing.T) {
t.Skip("StructOf")
}
func TestStructOfDirectIface(t *testing.T) {
t.Skip("StructOf")
}
func TestStructOfWithInterface(t *testing.T) {
t.Skip("StructOf")
}
var deepEqualTests = []DeepEqualTest{
// Equalities
{nil, nil, true},
{1, 1, true},
{int32(1), int32(1), true},
{0.5, 0.5, true},
{float32(0.5), float32(0.5), true},
{"hello", "hello", true},
{make([]int, 10), make([]int, 10), true},
{&[3]int{1, 2, 3}, &[3]int{1, 2, 3}, true},
{Basic{1, 0.5}, Basic{1, 0.5}, true},
{error(nil), error(nil), true},
{map[int]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, true},
{fn1, fn2, true},
// Inequalities
{1, 2, false},
{int32(1), int32(2), false},
{0.5, 0.6, false},
{float32(0.5), float32(0.6), false},
{"hello", "hey", false},
{make([]int, 10), make([]int, 11), false},
{&[3]int{1, 2, 3}, &[3]int{1, 2, 4}, false},
{Basic{1, 0.5}, Basic{1, 0.6}, false},
{Basic{1, 0}, Basic{2, 0}, false},
{map[int]string{1: "one", 3: "two"}, map[int]string{2: "two", 1: "one"}, false},
{map[int]string{1: "one", 2: "txo"}, map[int]string{2: "two", 1: "one"}, false},
{map[int]string{1: "one"}, map[int]string{2: "two", 1: "one"}, false},
{map[int]string{2: "two", 1: "one"}, map[int]string{1: "one"}, false},
{nil, 1, false},
{1, nil, false},
{fn1, fn3, false},
{fn3, fn3, false},
{[][]int{{1}}, [][]int{{2}}, false},
{math.NaN(), math.NaN(), false},
{&[1]float64{math.NaN()}, &[1]float64{math.NaN()}, false},
{&[1]float64{math.NaN()}, self{}, true},
{[]float64{math.NaN()}, []float64{math.NaN()}, false},
{[]float64{math.NaN()}, self{}, true},
{map[float64]float64{math.NaN(): 1}, map[float64]float64{1: 2}, false},
{map[float64]float64{math.NaN(): 1}, self{}, true},
// Nil vs empty: not the same.
{[]int{}, []int(nil), false},
{[]int{}, []int{}, true},
{[]int(nil), []int(nil), true},
{map[int]int{}, map[int]int(nil), false},
{map[int]int{}, map[int]int{}, true},
{map[int]int(nil), map[int]int(nil), true},
// Mismatched types
{1, 1.0, false},
{int32(1), int64(1), false},
{0.5, "hello", false},
{[]int{1, 2, 3}, [3]int{1, 2, 3}, false},
{&[3]interface{}{1, 2, 4}, &[3]interface{}{1, 2, "s"}, false},
{Basic{1, 0.5}, NotBasic{1, 0.5}, false},
{map[uint]string{1: "one", 2: "two"}, map[int]string{2: "two", 1: "one"}, false},
// Possible loops.
{&loop1, &loop1, true},
//{&loop1, &loop2, true}, // TODO: Fix.
{&loopy1, &loopy1, true},
//{&loopy1, &loopy2, true}, // TODO: Fix.
}

@ -0,0 +1,29 @@
// +build js
package reflect
func Swapper(slice interface{}) func(i, j int) {
v := ValueOf(slice)
if v.Kind() != Slice {
panic(&ValueError{Method: "Swapper", Kind: v.Kind()})
}
// Fast path for slices of size 0 and 1. Nothing to swap.
switch v.Len() {
case 0:
return func(i, j int) { panic("reflect: slice index out of range") }
case 1:
return func(i, j int) {
if i != 0 || j != 0 {
panic("reflect: slice index out of range")
}
}
}
tmp := New(v.Type().Elem()).Elem()
return func(i, j int) {
v1 := v.Index(i)
v2 := v.Index(j)
tmp.Set(v1)
v1.Set(v2)
v2.Set(tmp)
}
}

@ -0,0 +1,11 @@
// +build js
package regexp
import (
"testing"
)
func TestOnePassCutoff(t *testing.T) {
t.Skip() // "Maximum call stack size exceeded" on V8
}

@ -0,0 +1,14 @@
// +build js
package debug
func setGCPercent(int32) int32 {
// Not implemented. Return initial setting.
return 100
}
func setMaxStack(bytes int) int {
// Not implemented. Return initial setting.
// The initial setting is 1 GB on 64-bit systems, 250 MB on 32-bit systems.
return 250000000
}

@ -0,0 +1,49 @@
// +build js
package pprof
import (
"io"
"sync"
)
type Profile struct {
name string
mu sync.Mutex
m map[interface{}][]uintptr
count func() int
write func(io.Writer, int) error
}
func (p *Profile) WriteTo(w io.Writer, debug int) error {
return nil
}
func (p *Profile) Count() int {
return 0
}
func (p *Profile) Name() string {
return ""
}
func (p *Profile) Add(value interface{}, skip int) {
}
func (p *Profile) Remove(value interface{}) {
}
func StartCPUProfile(w io.Writer) error {
return nil
}
func StopCPUProfile() {
}
func WriteHeapProfile(w io.Writer) error {
return nil
}
func Lookup(name string) *Profile {
return nil
}

@ -0,0 +1,201 @@
// +build js
package runtime
import (
"runtime/internal/sys"
"github.com/gopherjs/gopherjs/js"
)
const GOOS = sys.GOOS
const GOARCH = "js"
const Compiler = "gopherjs"
// fake for error.go
type eface struct {
_type *_type
}
type _type struct {
}
func (t *_type) string() string {
return ""
}
func init() {
jsPkg := js.Global.Get("$packages").Get("github.com/gopherjs/gopherjs/js")
js.Global.Set("$jsObjectPtr", jsPkg.Get("Object").Get("ptr"))
js.Global.Set("$jsErrorPtr", jsPkg.Get("Error").Get("ptr"))
js.Global.Set("$throwRuntimeError", js.InternalObject(func(msg string) {
panic(errorString(msg))
}))
// avoid dead code elimination
var e error
e = &TypeAssertionError{}
_ = e
}
func GOROOT() string {
process := js.Global.Get("process")
if process == js.Undefined {
return "/"
}
goroot := process.Get("env").Get("GOROOT")
if goroot != js.Undefined {
return goroot.String()
}
return sys.DefaultGoroot
}
func Breakpoint() {
js.Debugger()
}
func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
info := js.Global.Get("Error").New().Get("stack").Call("split", "\n").Index(skip + 2)
if info == js.Undefined {
return 0, "", 0, false
}
parts := info.Call("substring", info.Call("indexOf", "(").Int()+1, info.Call("indexOf", ")").Int()).Call("split", ":")
return 0, parts.Index(0).String(), parts.Index(1).Int(), true
}
func Callers(skip int, pc []uintptr) int {
return 0
}
func GC() {
}
func Goexit() {
js.Global.Get("$curGoroutine").Set("exit", true)
js.Global.Call("$throw", nil)
}
func GOMAXPROCS(n int) int {
return 1
}
func Gosched() {
c := make(chan struct{})
js.Global.Call("$setTimeout", js.InternalObject(func() { close(c) }), 0)
<-c
}
func NumCPU() int {
return 1
}
func NumGoroutine() int {
return js.Global.Get("$totalGoroutines").Int()
}
type MemStats struct {
// General statistics.
Alloc uint64 // bytes allocated and still in use
TotalAlloc uint64 // bytes allocated (even if freed)
Sys uint64 // bytes obtained from system (sum of XxxSys below)
Lookups uint64 // number of pointer lookups
Mallocs uint64 // number of mallocs
Frees uint64 // number of frees
// Main allocation heap statistics.
HeapAlloc uint64 // bytes allocated and still in use
HeapSys uint64 // bytes obtained from system
HeapIdle uint64 // bytes in idle spans
HeapInuse uint64 // bytes in non-idle span
HeapReleased uint64 // bytes released to the OS
HeapObjects uint64 // total number of allocated objects
// Low-level fixed-size structure allocator statistics.
// Inuse is bytes used now.
// Sys is bytes obtained from system.
StackInuse uint64 // bytes used by stack allocator
StackSys uint64
MSpanInuse uint64 // mspan structures
MSpanSys uint64
MCacheInuse uint64 // mcache structures
MCacheSys uint64
BuckHashSys uint64 // profiling bucket hash table
GCSys uint64 // GC metadata
OtherSys uint64 // other system allocations
// Garbage collector statistics.
NextGC uint64 // next collection will happen when HeapAlloc ≥ this amount
LastGC uint64 // end time of last collection (nanoseconds since 1970)
PauseTotalNs uint64
PauseNs [256]uint64 // circular buffer of recent GC pause durations, most recent at [(NumGC+255)%256]
PauseEnd [256]uint64 // circular buffer of recent GC pause end times
NumGC uint32
GCCPUFraction float64 // fraction of CPU time used by GC
EnableGC bool
DebugGC bool
// Per-size allocation statistics.
// 61 is NumSizeClasses in the C code.
BySize [61]struct {
Size uint32
Mallocs uint64
Frees uint64
}
}
func ReadMemStats(m *MemStats) {
}
func SetFinalizer(x, f interface{}) {
}
type Func struct {
opaque struct{} // unexported field to disallow conversions
}
func (_ *Func) Entry() uintptr { return 0 }
func (_ *Func) FileLine(pc uintptr) (file string, line int) { return "", 0 }
func (_ *Func) Name() string { return "" }
func FuncForPC(pc uintptr) *Func {
return nil
}
var MemProfileRate int = 512 * 1024
func SetBlockProfileRate(rate int) {
}
func SetMutexProfileFraction(rate int) int {
// TODO: Investigate this. If it's possible to implement, consider doing so, otherwise remove this comment.
return 0
}
func Stack(buf []byte, all bool) int {
s := js.Global.Get("Error").New().Get("stack")
if s == js.Undefined {
return 0
}
return copy(buf, s.Call("substr", s.Call("indexOf", "\n").Int()+1).String())
}
func LockOSThread() {}
func UnlockOSThread() {}
func Version() string {
return sys.TheVersion
}
func StartTrace() error { return nil }
func StopTrace() {}
func ReadTrace() []byte
// We fake a cgo environment to catch errors. Therefor we have to implement this and always return 0
func NumCgoCall() int64 {
return 0
}
func efaceOf(ep *interface{}) *eface {
panic("efaceOf: not supported")
}
func KeepAlive(interface{}) {}

@ -0,0 +1,47 @@
// +build js
package strings
import (
"unicode/utf8"
"github.com/gopherjs/gopherjs/js"
)
func IndexByte(s string, c byte) int {
return js.InternalObject(s).Call("indexOf", js.Global.Get("String").Call("fromCharCode", c)).Int()
}
func Index(s, sep string) int {
return js.InternalObject(s).Call("indexOf", js.InternalObject(sep)).Int()
}
func LastIndex(s, sep string) int {
return js.InternalObject(s).Call("lastIndexOf", js.InternalObject(sep)).Int()
}
func Count(s, sep string) int {
n := 0
// special cases
switch {
case len(sep) == 0:
return utf8.RuneCountInString(s) + 1
case len(sep) > len(s):
return 0
case len(sep) == len(s):
if sep == s {
return 1
}
return 0
}
for {
pos := Index(s, sep)
if pos == -1 {
break
}
n++
s = s[pos+len(sep):]
}
return n
}

@ -0,0 +1,185 @@
// +build js
package atomic
import (
"unsafe"
"github.com/gopherjs/gopherjs/js"
)
func SwapInt32(addr *int32, new int32) int32 {
old := *addr
*addr = new
return old
}
func SwapInt64(addr *int64, new int64) int64 {
old := *addr
*addr = new
return old
}
func SwapUint32(addr *uint32, new uint32) uint32 {
old := *addr
*addr = new
return old
}
func SwapUint64(addr *uint64, new uint64) uint64 {
old := *addr
*addr = new
return old
}
func SwapUintptr(addr *uintptr, new uintptr) uintptr {
old := *addr
*addr = new
return old
}
func SwapPointer(addr *unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
old := *addr
*addr = new
return old
}
func CompareAndSwapInt32(addr *int32, old, new int32) bool {
if *addr == old {
*addr = new
return true
}
return false
}
func CompareAndSwapInt64(addr *int64, old, new int64) bool {
if *addr == old {
*addr = new
return true
}
return false
}
func CompareAndSwapUint32(addr *uint32, old, new uint32) bool {
if *addr == old {
*addr = new
return true
}
return false
}
func CompareAndSwapUint64(addr *uint64, old, new uint64) bool {
if *addr == old {
*addr = new
return true
}
return false
}
func CompareAndSwapUintptr(addr *uintptr, old, new uintptr) bool {
if *addr == old {
*addr = new
return true
}
return false
}
func CompareAndSwapPointer(addr *unsafe.Pointer, old, new unsafe.Pointer) bool {
if *addr == old {
*addr = new
return true
}
return false
}
func AddInt32(addr *int32, delta int32) int32 {
new := *addr + delta
*addr = new
return new
}
func AddUint32(addr *uint32, delta uint32) uint32 {
new := *addr + delta
*addr = new
return new
}
func AddInt64(addr *int64, delta int64) int64 {
new := *addr + delta
*addr = new
return new
}
func AddUint64(addr *uint64, delta uint64) uint64 {
new := *addr + delta
*addr = new
return new
}
func AddUintptr(addr *uintptr, delta uintptr) uintptr {
new := *addr + delta
*addr = new
return new
}
func LoadInt32(addr *int32) int32 {
return *addr
}
func LoadInt64(addr *int64) int64 {
return *addr
}
func LoadUint32(addr *uint32) uint32 {
return *addr
}
func LoadUint64(addr *uint64) uint64 {
return *addr
}
func LoadUintptr(addr *uintptr) uintptr {
return *addr
}
func LoadPointer(addr *unsafe.Pointer) unsafe.Pointer {
return *addr
}
func StoreInt32(addr *int32, val int32) {
*addr = val
}
func StoreInt64(addr *int64, val int64) {
*addr = val
}
func StoreUint32(addr *uint32, val uint32) {
*addr = val
}
func StoreUint64(addr *uint64, val uint64) {
*addr = val
}
func StoreUintptr(addr *uintptr, val uintptr) {
*addr = val
}
func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer) {
*addr = val
}
func (v *Value) Load() (x interface{}) {
return v.v
}
func (v *Value) Store(x interface{}) {
if x == nil {
panic("sync/atomic: store of nil value into Value")
}
if v.v != nil && js.InternalObject(x).Get("constructor") != js.InternalObject(v.v).Get("constructor") {
panic("sync/atomic: store of inconsistently typed value into Value")
}
v.v = x
}

@ -0,0 +1,9 @@
// +build js
package atomic_test
import "testing"
func TestHammerStoreLoad(t *testing.T) {
t.Skip("use of unsafe")
}

@ -0,0 +1,41 @@
// +build js
package sync
type Cond struct {
// fields used by vanilla implementation
noCopy noCopy
L Locker
notify notifyList
checker copyChecker
// fields used by new implementation
n int
ch chan bool
}
func (c *Cond) Wait() {
c.n++
if c.ch == nil {
c.ch = make(chan bool)
}
c.L.Unlock()
<-c.ch
c.L.Lock()
}
func (c *Cond) Signal() {
if c.n == 0 {
return
}
c.n--
c.ch <- true
}
func (c *Cond) Broadcast() {
n := c.n
c.n = 0
for i := 0; i < n; i++ {
c.ch <- true
}
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save