parent
343bc45061
commit
4d7526736a
@ -1,16 +0,0 @@
|
||||
desc: Auto generated by fswatch [gosuv]
|
||||
triggers:
|
||||
- name: ""
|
||||
pattens:
|
||||
- '**/*.go'
|
||||
- '**/*.c'
|
||||
- '**/*.py'
|
||||
env:
|
||||
DEBUG: "1"
|
||||
cmd: sh ./build.sh
|
||||
shell: true
|
||||
delay: 100ms
|
||||
signal: TERM
|
||||
watch_paths:
|
||||
- .
|
||||
watch_depth: 0
|
@ -1,8 +0,0 @@
|
||||
author: codeskyblue
|
||||
description: Port of python supervisor
|
||||
os: darwin linux
|
||||
includes: []
|
||||
excludes:
|
||||
- \.git
|
||||
script:
|
||||
- go build -tags bindata -ldflags "-X main.Version=$(git describe --tags --dirty --always)"
|
@ -1,18 +0,0 @@
|
||||
builds:
|
||||
-
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: arm
|
||||
flags: -tags vfs
|
||||
hooks:
|
||||
pre: go generate
|
@ -1,10 +0,0 @@
|
||||
language: go
|
||||
sudo: false
|
||||
go:
|
||||
- 1.7
|
||||
install:
|
||||
- go get -v
|
||||
script:
|
||||
- go test -v
|
||||
after_success:
|
||||
- test -n "$TRAVIS_TAG" && curl -sL https://git.io/goreleaser | bash
|
@ -1,70 +0,0 @@
|
||||
hash: ad7063d34838040bf683183342f3814298179801e2e055f29bca796456c88300
|
||||
updated: 2017-08-11T19:08:31.193345249+08:00
|
||||
imports:
|
||||
- name: github.com/codeskyblue/kexec
|
||||
version: 863094f94c7fb7c235764bf8f0f79cccea78c8eb
|
||||
- name: github.com/equinox-io/equinox
|
||||
version: 6f97d0d3970881d3e53dd6f547a41109eb055e54
|
||||
subpackages:
|
||||
- internal/go-update
|
||||
- internal/go-update/internal/binarydist
|
||||
- internal/go-update/internal/osext
|
||||
- internal/osext
|
||||
- proto
|
||||
- name: github.com/franela/goreq
|
||||
version: b5b0f5eb2d16f20345cce0a544a75163579c0b00
|
||||
- name: github.com/glycerine/rbuf
|
||||
version: 96ad00d7fa74f7dd9857f2b6068451062b4ebc5d
|
||||
- name: github.com/go-yaml/yaml
|
||||
version: 25c4ec802a7d637f88d584ab26798e94ad14c13b
|
||||
- name: github.com/goji/httpauth
|
||||
version: 2da839ab0f4df05a6db5eb277995589dadbd4fb9
|
||||
- name: github.com/gorilla/context
|
||||
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
|
||||
- name: github.com/gorilla/mux
|
||||
version: ac112f7d75a0714af1bd86ab17749b31f7809640
|
||||
- name: github.com/gorilla/websocket
|
||||
version: a69d9f6de432e2c6b296a947d8a5ee88f68522cf
|
||||
- name: github.com/kennygrant/sanitize
|
||||
version: 6a0bfdde8629a3a3a7418a7eae45c54154692514
|
||||
- name: github.com/mitchellh/go-ps
|
||||
version: 4fdf99ab29366514c69ccccddab5dc58b8d84062
|
||||
- name: github.com/qiniu/log
|
||||
version: a304a74568d6982c5b89de1c68ac8fca3add196a
|
||||
- name: github.com/shurcooL/httpfs
|
||||
version: bc35257962c2dea93e81c976b72c7c6eac45fd8a
|
||||
subpackages:
|
||||
- vfsutil
|
||||
- name: github.com/shurcooL/vfsgen
|
||||
version: 385e5833a54aaba5860ca26036b8e8b72135ab96
|
||||
- name: github.com/urfave/cli
|
||||
version: cfb38830724cc34fedffe9a2a29fb54fa9169cd1
|
||||
- name: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- html
|
||||
- html/atom
|
||||
- name: golang.org/x/tools
|
||||
version: 5831d16d18029819d39f99bdc2060b8eff410b6b
|
||||
subpackages:
|
||||
- godoc/vfs
|
||||
testImports:
|
||||
- name: github.com/gopherjs/gopherjs
|
||||
version: 2b1d432c8a82c9bff0b0baffaeb3ec6e92974112
|
||||
subpackages:
|
||||
- js
|
||||
- name: github.com/jtolds/gls
|
||||
version: 77f18212c9c7edc9bd6a33d383a7b545ce62f064
|
||||
- name: github.com/smartystreets/assertions
|
||||
version: 1540c14c9f1bd1abeba90f29762a4c6e50582303
|
||||
subpackages:
|
||||
- internal/go-render/render
|
||||
- internal/oglematchers
|
||||
- name: github.com/smartystreets/goconvey
|
||||
version: 9e8dc3f972df6c8fcc0375ef492c24d0bb204857
|
||||
subpackages:
|
||||
- convey
|
||||
- convey/gotest
|
||||
- convey/reporting
|
||||
- name: github.com/smartystreets/logging
|
||||
version: ac3a674540761aa0b4382094ba4795f917e85c7f
|
@ -1,19 +0,0 @@
|
||||
package: github.com/codeskyblue/gosuv
|
||||
import:
|
||||
- package: github.com/codeskyblue/kexec
|
||||
- package: github.com/equinox-io/equinox
|
||||
- package: github.com/franela/goreq
|
||||
- package: github.com/glycerine/rbuf
|
||||
- package: github.com/go-yaml/yaml
|
||||
- package: github.com/goji/httpauth
|
||||
- package: github.com/gorilla/mux
|
||||
- package: github.com/gorilla/websocket
|
||||
- package: github.com/kennygrant/sanitize
|
||||
- package: github.com/mitchellh/go-ps
|
||||
- package: github.com/qiniu/log
|
||||
- package: github.com/shurcooL/vfsgen
|
||||
- package: github.com/urfave/cli
|
||||
testImport:
|
||||
- package: github.com/smartystreets/goconvey
|
||||
subpackages:
|
||||
- convey
|
@ -0,0 +1,91 @@
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
|
||||
github.com/GeertJohan/go.rice v1.0.2 h1:PtRw+Tg3oa3HYwiDBZyvOJ8LdIyf6lAovJJtr7YOAYk=
|
||||
github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4=
|
||||
github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c=
|
||||
github.com/codeskyblue/gosuv v0.0.0-20180605125048-3744b1b28ce4 h1:xk+Ju5zwiEjq5IiMn7J/UyqBRm6U2Lx43kQR2ikI4yk=
|
||||
github.com/codeskyblue/gosuv v0.0.0-20180605125048-3744b1b28ce4/go.mod h1:BgyIM5LuI5LANtp7WnEFgp+TM5x9q7E6BjiZ9TiPjIE=
|
||||
github.com/codeskyblue/kexec v0.0.0-20180119015717-5a4bed90d99a h1:sh6+bBCba9tb/h88RgfYj4k3uG987X8gxLASw8eJLvc=
|
||||
github.com/codeskyblue/kexec v0.0.0-20180119015717-5a4bed90d99a/go.mod h1:6m1GKzdd6CW8W+GUW7u4I+2LEd4QEhsYn6nU429YI+Q=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
|
||||
github.com/daaku/go.zipexe v1.0.1 h1:wV4zMsDOI2SZ2m7Tdz1Ps96Zrx+TzaK15VbUaGozw0M=
|
||||
github.com/daaku/go.zipexe v1.0.1/go.mod h1:5xWogtqlYnfBXkSB1o9xysukNP9GTvaNkqzUZbt3Bw8=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/equinox-io/equinox v1.2.0 h1:bBS7Ou+Y7Jwgmy8TWSYxEh85WctuFn7FPlgbUzX4DBA=
|
||||
github.com/equinox-io/equinox v1.2.0/go.mod h1:6s3HJB0PYUNgs0mxmI8fHdfVl3TQ25ieA/PVfr+eyVo=
|
||||
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf h1:NrF81UtW8gG2LBGkXFQFqlfNnvMt9WdB46sfdJY4oqc=
|
||||
github.com/franela/goblin v0.0.0-20211003143422-0a4f594942bf/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54=
|
||||
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/glycerine/rbuf v0.0.0-20190314090850-75b78581bebe h1:S7HF/JKUdDrsd66htKdBOt/t3WvhU3l8EXe0U3WxEDA=
|
||||
github.com/glycerine/rbuf v0.0.0-20190314090850-75b78581bebe/go.mod h1:BOGkN1CszB3i4g9xn96RH4t5uXnxJjnC5/RWJ1Wx7GM=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d h1:lBXNCxVENCipq4D1Is42JVOP4eQjlB8TQ6H69Yx5J9Q=
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20220410123724-9e86199038b0 h1:fWY+zXdWhvWndXqnMj4SyC/vi8sK508OjhGCtMzsA9M=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20220410123724-9e86199038b0/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o=
|
||||
github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak=
|
||||
github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
|
||||
github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
|
||||
github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/assertions v1.2.1 h1:bKNHfEv7tSIjZ8JbKaFjzFINljxG4lzZvmHUnElzOIg=
|
||||
github.com/smartystreets/assertions v1.2.1/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8=
|
||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/urfave/cli v1.22.5 h1:lNq9sAHXK2qfdI8W+GRItjCEkI+2oR4d+MEHy1CKXoU=
|
||||
github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
@ -0,0 +1,7 @@
|
||||
/example/example
|
||||
/example/example.exe
|
||||
/rice/rice
|
||||
/rice/rice.exe
|
||||
|
||||
*.rice-box.go
|
||||
.wercker
|
@ -0,0 +1,16 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- master
|
||||
- 1.11.x
|
||||
- 1.10.x
|
||||
- 1.9.x
|
||||
- 1.8.x
|
||||
|
||||
install:
|
||||
- go get -t ./...
|
||||
- env
|
||||
script:
|
||||
- go build -x ./...
|
||||
- go test -cover ./...
|
||||
- go vet ./...
|
@ -0,0 +1,4 @@
|
||||
Geert-Johan Riemer <geertjohan@geertjohan.net>
|
||||
Paul Maddox <paul.maddox@gmail.com>
|
||||
Vincent Petithory <vincent.petithory@gmail.com>
|
||||
|
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2013, Geert-Johan Riemer
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,129 @@
|
||||
# go.rice
|
||||
|
||||
[![Build Status](https://travis-ci.org/GeertJohan/go.rice.png)](https://travis-ci.org/GeertJohan/go.rice)
|
||||
[![Godoc](https://img.shields.io/badge/godoc-go.rice-blue.svg?style=flat-square)](https://godoc.org/github.com/GeertJohan/go.rice)
|
||||
|
||||
go.rice is a [Go](http://golang.org) package that makes working with resources such as html,js,css,images and templates easy. During development `go.rice` will load required files directly from disk. Upon deployment it's easy to add all resource files to a executable using the `rice` tool, without changing the source code for your package. go.rice provides methods to add resources to a binary in different scenarios.
|
||||
|
||||
## What does it do
|
||||
|
||||
The first thing go.rice does is finding the correct absolute path for your resource files. Say you are executing a binary in your home directory, but your `html-files` are in `$GOPATH/src/yourApplication/html-files`. `go.rice` will lookup the correct path for that directory (relative to the location of yourApplication). All you have to do is include the resources using `rice.FindBox("html-files")`.
|
||||
|
||||
This works fine when the source is available to the machine executing the binary, which is the case when installing the executable with `go get` or `go install`. But it does not work when you wish to provide a single binary without source. This is where the `rice` tool comes in. It analyses source code and finds call's to `rice.FindBox(..)`. Then it adds the required directories to the executable binary, There are two strategies to do this. You can 'embed' the assets by generating go source code and then compile them into the executable binary, or you can 'append' the assets to the executable binary after compiling. In both cases the `rice.FindBox(..)` call detects the embedded or appended resources and load those, instead of looking up files from disk.
|
||||
|
||||
## Installation
|
||||
|
||||
Use `go get` to install the package the `rice` tool.
|
||||
|
||||
```bash
|
||||
go get github.com/GeertJohan/go.rice
|
||||
go get github.com/GeertJohan/go.rice/rice
|
||||
```
|
||||
|
||||
## Package usage
|
||||
|
||||
Import the package: `import "github.com/GeertJohan/go.rice"`
|
||||
|
||||
Serving a static content folder over HTTP with a rice Box:
|
||||
|
||||
```go
|
||||
http.Handle("/", http.FileServer(rice.MustFindBox("http-files").HTTPBox()))
|
||||
http.ListenAndServe(":8080", nil)
|
||||
```
|
||||
|
||||
Serve a static content folder over HTTP at a non-root location:
|
||||
|
||||
```go
|
||||
box := rice.MustFindBox("cssfiles")
|
||||
cssFileServer := http.StripPrefix("/css/", http.FileServer(box.HTTPBox()))
|
||||
http.Handle("/css/", cssFileServer)
|
||||
http.ListenAndServe(":8080", nil)
|
||||
```
|
||||
|
||||
Note the *trailing slash* in `/css/` in both the call to
|
||||
`http.StripPrefix` and `http.Handle`.
|
||||
|
||||
Loading a template:
|
||||
|
||||
```go
|
||||
// find a rice.Box
|
||||
templateBox, err := rice.FindBox("example-templates")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// get file contents as string
|
||||
templateString, err := templateBox.String("message.tmpl")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// parse and execute the template
|
||||
tmplMessage, err := template.New("message").Parse(templateString)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
tmplMessage.Execute(os.Stdout, map[string]string{"Message": "Hello, world!"})
|
||||
|
||||
```
|
||||
|
||||
Never call `FindBox()` or `MustFindBox()` from an `init()` function, as there is no guarantee the boxes are loaded at that time.
|
||||
|
||||
### Calling FindBox and MustFindBox
|
||||
|
||||
Always call `FindBox()` or `MustFindBox()` with string literals e.g. `FindBox("example")`. Do not use string constants or variables. This will prevent the rice tool to fail with error `Error: found call to rice.FindBox, but argument must be a string literal.`.
|
||||
|
||||
## Tool usage
|
||||
|
||||
The `rice` tool lets you add the resources to a binary executable so the files are not loaded from the filesystem anymore. This creates a 'standalone' executable. There are multiple strategies to add the resources and assets to a binary, each has pro's and con's but all will work without requiring changes to the way you load the resources.
|
||||
|
||||
### `rice embed-go`: Embed resources by generating Go source code
|
||||
|
||||
Execute this method before building. It generates a single Go source file called *rice-box.go* for each package. The generated go file contains all assets. The Go tool compiles this into the binary.
|
||||
|
||||
The downside with this option is that the generated go source file can become large, which may slow down compilation and requires more memory to compile.
|
||||
|
||||
Execute the following commands:
|
||||
|
||||
```bash
|
||||
rice embed-go
|
||||
go build
|
||||
```
|
||||
|
||||
*A Note on Symbolic Links*: `embed-go` uses the `os.Walk` function from the standard library. The `os.Walk` function does **not** follow symbolic links. When creating a box, be aware that any symbolic links inside your box's directory are not followed. When the box itself is a symbolic link, the rice tool resolves its actual location before adding the contents.
|
||||
|
||||
### `rice append`: Append resources to executable as zip file
|
||||
|
||||
This method changes an already built executable. It appends the resources as zip file to the binary. It makes compilation a lot faster. Using the append method works great for adding large assets to an executable binary.
|
||||
|
||||
A downside for appending is that it does not provide a working Seek method.
|
||||
|
||||
Run the following commands to create a standalone executable.
|
||||
|
||||
```bash
|
||||
go build -o example
|
||||
rice append --exec example
|
||||
```
|
||||
|
||||
## Help information
|
||||
|
||||
Run `rice --help` for information about all flags and subcommands.
|
||||
|
||||
You can use the `--help` flag on each sub-command. For example: `rice append --help`.
|
||||
|
||||
## Order of precedence
|
||||
|
||||
When opening a new box, the `rice.FindBox(..)` tries to locate the resources in the following order:
|
||||
|
||||
- embedded (generated as `rice-box.go`)
|
||||
- appended (appended to the binary executable after compiling)
|
||||
- 'live' from filesystem
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under a Simplified BSD license. Please read the [LICENSE file][license].
|
||||
|
||||
## Package documentation
|
||||
|
||||
You will find package documentation at [godoc.org/github.com/GeertJohan/go.rice][godoc].
|
||||
|
||||
[license]: https://github.com/GeertJohan/go.rice/blob/master/LICENSE
|
||||
[godoc]: http://godoc.org/github.com/GeertJohan/go.rice
|
@ -0,0 +1,142 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/daaku/go.zipexe"
|
||||
)
|
||||
|
||||
// appendedBox defines an appended box
|
||||
type appendedBox struct {
|
||||
Name string // box name
|
||||
Files map[string]*appendedFile // appended files (*zip.File) by full path
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
type appendedFile struct {
|
||||
zipFile *zip.File
|
||||
dir bool
|
||||
dirInfo *appendedDirInfo
|
||||
children []*appendedFile
|
||||
content []byte
|
||||
}
|
||||
|
||||
// appendedBoxes is a public register of appendes boxes
|
||||
var appendedBoxes = make(map[string]*appendedBox)
|
||||
|
||||
func init() {
|
||||
// find if exec is appended
|
||||
thisFile, err := os.Executable()
|
||||
if err != nil {
|
||||
return // not appended or cant find self executable
|
||||
}
|
||||
thisFile, err = filepath.EvalSymlinks(thisFile)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
closer, rd, err := zipexe.OpenCloser(thisFile)
|
||||
if err != nil {
|
||||
return // not appended
|
||||
}
|
||||
defer closer.Close()
|
||||
|
||||
for _, f := range rd.File {
|
||||
// get box and file name from f.Name
|
||||
fileParts := strings.SplitN(strings.TrimLeft(filepath.ToSlash(f.Name), "/"), "/", 2)
|
||||
boxName := fileParts[0]
|
||||
var fileName string
|
||||
if len(fileParts) > 1 {
|
||||
fileName = fileParts[1]
|
||||
}
|
||||
|
||||
// find box or create new one if doesn't exist
|
||||
box := appendedBoxes[boxName]
|
||||
if box == nil {
|
||||
box = &appendedBox{
|
||||
Name: boxName,
|
||||
Files: make(map[string]*appendedFile),
|
||||
Time: f.ModTime(),
|
||||
}
|
||||
appendedBoxes[boxName] = box
|
||||
}
|
||||
|
||||
// create and add file to box
|
||||
af := &appendedFile{
|
||||
zipFile: f,
|
||||
}
|
||||
if f.Comment == "dir" {
|
||||
af.dir = true
|
||||
af.dirInfo = &appendedDirInfo{
|
||||
name: filepath.Base(af.zipFile.Name),
|
||||
time: af.zipFile.ModTime(),
|
||||
}
|
||||
} else {
|
||||
// this is a file, we need it's contents so we can create a bytes.Reader when the file is opened
|
||||
// make a new byteslice
|
||||
af.content = make([]byte, af.zipFile.FileInfo().Size())
|
||||
// ignore reading empty files from zip (empty file still is a valid file to be read though!)
|
||||
if len(af.content) > 0 {
|
||||
// open io.ReadCloser
|
||||
rc, err := af.zipFile.Open()
|
||||
if err != nil {
|
||||
af.content = nil // this will cause an error when the file is being opened or seeked (which is good)
|
||||
// TODO: it's quite blunt to just log this stuff. but this is in init, so rice.Debug can't be changed yet..
|
||||
log.Printf("error opening appended file %s: %v", af.zipFile.Name, err)
|
||||
} else {
|
||||
_, err = rc.Read(af.content)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
af.content = nil // this will cause an error when the file is being opened or seeked (which is good)
|
||||
// TODO: it's quite blunt to just log this stuff. but this is in init, so rice.Debug can't be changed yet..
|
||||
log.Printf("error reading data for appended file %s: %v", af.zipFile.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add appendedFile to box file list
|
||||
box.Files[fileName] = af
|
||||
|
||||
// add to parent dir (if any)
|
||||
dirName := filepath.Dir(fileName)
|
||||
if dirName == "." {
|
||||
dirName = ""
|
||||
}
|
||||
if fileName != "" { // don't make box root dir a child of itself
|
||||
if dir := box.Files[dirName]; dir != nil {
|
||||
dir.children = append(dir.children, af)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// implements os.FileInfo.
|
||||
// used for Readdir()
|
||||
type appendedDirInfo struct {
|
||||
name string
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (adi *appendedDirInfo) Name() string {
|
||||
return adi.name
|
||||
}
|
||||
func (adi *appendedDirInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
func (adi *appendedDirInfo) Mode() os.FileMode {
|
||||
return os.ModeDir
|
||||
}
|
||||
func (adi *appendedDirInfo) ModTime() time.Time {
|
||||
return adi.time
|
||||
}
|
||||
func (adi *appendedDirInfo) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
func (adi *appendedDirInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
@ -0,0 +1,339 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/GeertJohan/go.rice/embedded"
|
||||
)
|
||||
|
||||
// Box abstracts a directory for resources/files.
|
||||
// It can either load files from disk, or from embedded code (when `rice --embed` was ran).
|
||||
type Box struct {
|
||||
name string
|
||||
absolutePath string
|
||||
embed *embedded.EmbeddedBox
|
||||
appendd *appendedBox
|
||||
}
|
||||
|
||||
var defaultLocateOrder = []LocateMethod{LocateEmbedded, LocateAppended, LocateFS}
|
||||
|
||||
func findBox(name string, order []LocateMethod) (*Box, error) {
|
||||
b := &Box{name: name}
|
||||
|
||||
// no support for absolute paths since gopath can be different on different machines.
|
||||
// therefore, required box must be located relative to package requiring it.
|
||||
if filepath.IsAbs(name) {
|
||||
return nil, errors.New("given name/path is absolute")
|
||||
}
|
||||
|
||||
var err error
|
||||
for _, method := range order {
|
||||
switch method {
|
||||
case LocateEmbedded:
|
||||
if embed := embedded.EmbeddedBoxes[name]; embed != nil {
|
||||
b.embed = embed
|
||||
return b, nil
|
||||
}
|
||||
|
||||
case LocateAppended:
|
||||
appendedBoxName := strings.Replace(name, `/`, `-`, -1)
|
||||
if appendd := appendedBoxes[appendedBoxName]; appendd != nil {
|
||||
b.appendd = appendd
|
||||
return b, nil
|
||||
}
|
||||
|
||||
case LocateFS:
|
||||
// resolve absolute directory path
|
||||
err := b.resolveAbsolutePathFromCaller()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// check if absolutePath exists on filesystem
|
||||
info, err := os.Stat(b.absolutePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// check if absolutePath is actually a directory
|
||||
if !info.IsDir() {
|
||||
err = errors.New("given name/path is not a directory")
|
||||
continue
|
||||
}
|
||||
return b, nil
|
||||
case LocateWorkingDirectory:
|
||||
// resolve absolute directory path
|
||||
err := b.resolveAbsolutePathFromWorkingDirectory()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// check if absolutePath exists on filesystem
|
||||
info, err := os.Stat(b.absolutePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// check if absolutePath is actually a directory
|
||||
if !info.IsDir() {
|
||||
err = errors.New("given name/path is not a directory")
|
||||
continue
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
err = fmt.Errorf("could not locate box %q", name)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FindBox returns a Box instance for given name.
|
||||
// When the given name is a relative path, it's base path will be the calling pkg/cmd's source root.
|
||||
// When the given name is absolute, it's absolute. derp.
|
||||
// Make sure the path doesn't contain any sensitive information as it might be placed into generated go source (embedded).
|
||||
func FindBox(name string) (*Box, error) {
|
||||
return findBox(name, defaultLocateOrder)
|
||||
}
|
||||
|
||||
// MustFindBox returns a Box instance for given name, like FindBox does.
|
||||
// It does not return an error, instead it panics when an error occurs.
|
||||
func MustFindBox(name string) *Box {
|
||||
box, err := findBox(name, defaultLocateOrder)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return box
|
||||
}
|
||||
|
||||
// This is injected as a mutable function literal so that we can mock it out in
|
||||
// tests and return a fixed test file.
|
||||
var resolveAbsolutePathFromCaller = func(name string, nStackFrames int) (string, error) {
|
||||
_, callingGoFile, _, ok := runtime.Caller(nStackFrames)
|
||||
if !ok {
|
||||
return "", errors.New("couldn't find caller on stack")
|
||||
}
|
||||
|
||||
// resolve to proper path
|
||||
pkgDir := filepath.Dir(callingGoFile)
|
||||
// fix for go cover
|
||||
const coverPath = "_test/_obj_test"
|
||||
if !filepath.IsAbs(pkgDir) {
|
||||
if i := strings.Index(pkgDir, coverPath); i >= 0 {
|
||||
pkgDir = pkgDir[:i] + pkgDir[i+len(coverPath):] // remove coverPath
|
||||
pkgDir = filepath.Join(os.Getenv("GOPATH"), "src", pkgDir) // make absolute
|
||||
}
|
||||
}
|
||||
return filepath.Join(pkgDir, name), nil
|
||||
}
|
||||
|
||||
func (b *Box) resolveAbsolutePathFromCaller() error {
|
||||
path, err := resolveAbsolutePathFromCaller(b.name, 4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.absolutePath = path
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (b *Box) resolveAbsolutePathFromWorkingDirectory() error {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.absolutePath = filepath.Join(path, b.name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsEmbedded indicates wether this box was embedded into the application
|
||||
func (b *Box) IsEmbedded() bool {
|
||||
return b.embed != nil
|
||||
}
|
||||
|
||||
// IsAppended indicates wether this box was appended to the application
|
||||
func (b *Box) IsAppended() bool {
|
||||
return b.appendd != nil
|
||||
}
|
||||
|
||||
// Time returns how actual the box is.
|
||||
// When the box is embedded, it's value is saved in the embedding code.
|
||||
// When the box is live, this methods returns time.Now()
|
||||
func (b *Box) Time() time.Time {
|
||||
if b.IsEmbedded() {
|
||||
return b.embed.Time
|
||||
}
|
||||
|
||||
if b.IsAppended() {
|
||||
return b.appendd.Time
|
||||
}
|
||||
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// Open opens a File from the box
|
||||
// If there is an error, it will be of type *os.PathError.
|
||||
func (b *Box) Open(name string) (*File, error) {
|
||||
if Debug {
|
||||
fmt.Printf("Open(%s)\n", name)
|
||||
}
|
||||
|
||||
if b.IsEmbedded() {
|
||||
if Debug {
|
||||
fmt.Println("Box is embedded")
|
||||
}
|
||||
|
||||
// trim prefix (paths are relative to box)
|
||||
name = strings.TrimLeft(name, "/")
|
||||
if Debug {
|
||||
fmt.Printf("Trying %s\n", name)
|
||||
}
|
||||
|
||||
// search for file
|
||||
ef := b.embed.Files[name]
|
||||
if ef == nil {
|
||||
if Debug {
|
||||
fmt.Println("Didn't find file in embed")
|
||||
}
|
||||
// file not found, try dir
|
||||
ed := b.embed.Dirs[name]
|
||||
if ed == nil {
|
||||
if Debug {
|
||||
fmt.Println("Didn't find dir in embed")
|
||||
}
|
||||
// dir not found, error out
|
||||
return nil, &os.PathError{
|
||||
Op: "open",
|
||||
Path: name,
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
if Debug {
|
||||
fmt.Println("Found dir. Returning virtual dir")
|
||||
}
|
||||
vd := newVirtualDir(ed)
|
||||
return &File{virtualD: vd}, nil
|
||||
}
|
||||
|
||||
// box is embedded
|
||||
if Debug {
|
||||
fmt.Println("Found file. Returning virtual file")
|
||||
}
|
||||
vf := newVirtualFile(ef)
|
||||
return &File{virtualF: vf}, nil
|
||||
}
|
||||
|
||||
if b.IsAppended() {
|
||||
// trim prefix (paths are relative to box)
|
||||
name = strings.TrimLeft(name, "/")
|
||||
|
||||
// search for file
|
||||
appendedFile := b.appendd.Files[name]
|
||||
if appendedFile == nil {
|
||||
return nil, &os.PathError{
|
||||
Op: "open",
|
||||
Path: name,
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
|
||||
// create new file
|
||||
f := &File{
|
||||
appendedF: appendedFile,
|
||||
}
|
||||
|
||||
// if this file is a directory, we want to be able to read and seek
|
||||
if !appendedFile.dir {
|
||||
// looks like malformed data in zip, error now
|
||||
if appendedFile.content == nil {
|
||||
return nil, &os.PathError{
|
||||
Op: "open",
|
||||
Path: "name",
|
||||
Err: errors.New("error reading data from zip file"),
|
||||
}
|
||||
}
|
||||
// create new bytes.Reader
|
||||
f.appendedFileReader = bytes.NewReader(appendedFile.content)
|
||||
}
|
||||
|
||||
// all done
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// perform os open
|
||||
if Debug {
|
||||
fmt.Printf("Using os.Open(%s)", filepath.Join(b.absolutePath, name))
|
||||
}
|
||||
file, err := os.Open(filepath.Join(b.absolutePath, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &File{realF: file}, nil
|
||||
}
|
||||
|
||||
// Bytes returns the content of the file with given name as []byte.
|
||||
func (b *Box) Bytes(name string) ([]byte, error) {
|
||||
file, err := b.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
content, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return content, nil
|
||||
}
|
||||
|
||||
// MustBytes returns the content of the file with given name as []byte.
|
||||
// panic's on error.
|
||||
func (b *Box) MustBytes(name string) []byte {
|
||||
bts, err := b.Bytes(name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return bts
|
||||
}
|
||||
|
||||
// String returns the content of the file with given name as string.
|
||||
func (b *Box) String(name string) (string, error) {
|
||||
// check if box is embedded, optimized fast path
|
||||
if b.IsEmbedded() {
|
||||
// find file in embed
|
||||
ef := b.embed.Files[name]
|
||||
if ef == nil {
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
// return as string
|
||||
return ef.Content, nil
|
||||
}
|
||||
|
||||
bts, err := b.Bytes(name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(bts), nil
|
||||
}
|
||||
|
||||
// MustString returns the content of the file with given name as string.
|
||||
// panic's on error.
|
||||
func (b *Box) MustString(name string) string {
|
||||
str, err := b.String(name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
// Name returns the name of the box
|
||||
func (b *Box) Name() string {
|
||||
return b.name
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
package rice
|
||||
|
||||
// LocateMethod defines how a box is located.
|
||||
type LocateMethod int
|
||||
|
||||
const (
|
||||
LocateFS = LocateMethod(iota) // Locate on the filesystem according to package path.
|
||||
LocateAppended // Locate boxes appended to the executable.
|
||||
LocateEmbedded // Locate embedded boxes.
|
||||
LocateWorkingDirectory // Locate on the binary working directory
|
||||
)
|
||||
|
||||
// Config allows customizing the box lookup behavior.
|
||||
type Config struct {
|
||||
// LocateOrder defines the priority order that boxes are searched for. By
|
||||
// default, the package global FindBox searches for embedded boxes first,
|
||||
// then appended boxes, and then finally boxes on the filesystem. That
|
||||
// search order may be customized by provided the ordered list here. Leaving
|
||||
// out a particular method will omit that from the search space. For
|
||||
// example, []LocateMethod{LocateEmbedded, LocateAppended} will never search
|
||||
// the filesystem for boxes.
|
||||
LocateOrder []LocateMethod
|
||||
}
|
||||
|
||||
// FindBox searches for boxes using the LocateOrder of the config.
|
||||
func (c *Config) FindBox(boxName string) (*Box, error) {
|
||||
return findBox(boxName, c.LocateOrder)
|
||||
}
|
||||
|
||||
// MustFindBox searches for boxes using the LocateOrder of the config, like
|
||||
// FindBox does. It does not return an error, instead it panics when an error
|
||||
// occurs.
|
||||
func (c *Config) MustFindBox(boxName string) *Box {
|
||||
box, err := findBox(boxName, c.LocateOrder)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return box
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
package rice
|
||||
|
||||
// Debug can be set to true to enable debugging.
|
||||
var Debug = false
|
@ -0,0 +1,90 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/GeertJohan/go.rice/embedded"
|
||||
)
|
||||
|
||||
// re-type to make exported methods invisible to user (godoc)
|
||||
// they're not required for the user
|
||||
// embeddedDirInfo implements os.FileInfo
|
||||
type embeddedDirInfo embedded.EmbeddedDir
|
||||
|
||||
// Name returns the base name of the directory
|
||||
// (implementing os.FileInfo)
|
||||
func (ed *embeddedDirInfo) Name() string {
|
||||
return ed.Filename
|
||||
}
|
||||
|
||||
// Size always returns 0
|
||||
// (implementing os.FileInfo)
|
||||
func (ed *embeddedDirInfo) Size() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Mode returns the file mode bits
|
||||
// (implementing os.FileInfo)
|
||||
func (ed *embeddedDirInfo) Mode() os.FileMode {
|
||||
return os.FileMode(0555 | os.ModeDir) // dr-xr-xr-x
|
||||
}
|
||||
|
||||
// ModTime returns the modification time
|
||||
// (implementing os.FileInfo)
|
||||
func (ed *embeddedDirInfo) ModTime() time.Time {
|
||||
return ed.DirModTime
|
||||
}
|
||||
|
||||
// IsDir returns the abbreviation for Mode().IsDir() (always true)
|
||||
// (implementing os.FileInfo)
|
||||
func (ed *embeddedDirInfo) IsDir() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Sys returns the underlying data source (always nil)
|
||||
// (implementing os.FileInfo)
|
||||
func (ed *embeddedDirInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// re-type to make exported methods invisible to user (godoc)
|
||||
// they're not required for the user
|
||||
// embeddedFileInfo implements os.FileInfo
|
||||
type embeddedFileInfo embedded.EmbeddedFile
|
||||
|
||||
// Name returns the base name of the file
|
||||
// (implementing os.FileInfo)
|
||||
func (ef *embeddedFileInfo) Name() string {
|
||||
return ef.Filename
|
||||
}
|
||||
|
||||
// Size returns the length in bytes for regular files; system-dependent for others
|
||||
// (implementing os.FileInfo)
|
||||
func (ef *embeddedFileInfo) Size() int64 {
|
||||
return int64(len(ef.Content))
|
||||
}
|
||||
|
||||
// Mode returns the file mode bits
|
||||
// (implementing os.FileInfo)
|
||||
func (ef *embeddedFileInfo) Mode() os.FileMode {
|
||||
return os.FileMode(0555) // r-xr-xr-x
|
||||
}
|
||||
|
||||
// ModTime returns the modification time
|
||||
// (implementing os.FileInfo)
|
||||
func (ef *embeddedFileInfo) ModTime() time.Time {
|
||||
return ef.FileModTime
|
||||
}
|
||||
|
||||
// IsDir returns the abbreviation for Mode().IsDir() (always false)
|
||||
// (implementing os.FileInfo)
|
||||
func (ef *embeddedFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Sys returns the underlying data source (always nil)
|
||||
// (implementing os.FileInfo)
|
||||
func (ef *embeddedFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
// Package embedded defines embedded data types that are shared between the go.rice package and generated code.
|
||||
package embedded
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
EmbedTypeGo = 0
|
||||
)
|
||||
|
||||
// EmbeddedBox defines an embedded box
|
||||
type EmbeddedBox struct {
|
||||
Name string // box name
|
||||
Time time.Time // embed time
|
||||
EmbedType int // kind of embedding
|
||||
Files map[string]*EmbeddedFile // ALL embedded files by full path
|
||||
Dirs map[string]*EmbeddedDir // ALL embedded dirs by full path
|
||||
}
|
||||
|
||||
// Link creates the ChildDirs and ChildFiles links in all EmbeddedDir's
|
||||
func (e *EmbeddedBox) Link() {
|
||||
for _, ed := range e.Dirs {
|
||||
ed.ChildDirs = make([]*EmbeddedDir, 0)
|
||||
ed.ChildFiles = make([]*EmbeddedFile, 0)
|
||||
}
|
||||
for path, ed := range e.Dirs {
|
||||
// skip for root, it'll create a recursion
|
||||
if path == "" {
|
||||
continue
|
||||
}
|
||||
parentDirpath, _ := filepath.Split(path)
|
||||
if strings.HasSuffix(parentDirpath, "/") {
|
||||
parentDirpath = parentDirpath[:len(parentDirpath)-1]
|
||||
}
|
||||
parentDir := e.Dirs[parentDirpath]
|
||||
if parentDir == nil {
|
||||
panic("parentDir `" + parentDirpath + "` is missing in embedded box")
|
||||
}
|
||||
parentDir.ChildDirs = append(parentDir.ChildDirs, ed)
|
||||
}
|
||||
for path, ef := range e.Files {
|
||||
dirpath, _ := filepath.Split(path)
|
||||
if strings.HasSuffix(dirpath, "/") {
|
||||
dirpath = dirpath[:len(dirpath)-1]
|
||||
}
|
||||
dir := e.Dirs[dirpath]
|
||||
if dir == nil {
|
||||
panic("dir `" + dirpath + "` is missing in embedded box")
|
||||
}
|
||||
dir.ChildFiles = append(dir.ChildFiles, ef)
|
||||
}
|
||||
}
|
||||
|
||||
// EmbeddedDir is instanced in the code generated by the rice tool and contains all necicary information about an embedded file
|
||||
type EmbeddedDir struct {
|
||||
Filename string
|
||||
DirModTime time.Time
|
||||
ChildDirs []*EmbeddedDir // direct childs, as returned by virtualDir.Readdir()
|
||||
ChildFiles []*EmbeddedFile // direct childs, as returned by virtualDir.Readdir()
|
||||
}
|
||||
|
||||
// EmbeddedFile is instanced in the code generated by the rice tool and contains all necicary information about an embedded file
|
||||
type EmbeddedFile struct {
|
||||
Filename string // filename
|
||||
FileModTime time.Time
|
||||
Content string
|
||||
}
|
||||
|
||||
// EmbeddedBoxes is a public register of embedded boxes
|
||||
var EmbeddedBoxes = make(map[string]*EmbeddedBox)
|
||||
|
||||
// RegisterEmbeddedBox registers an EmbeddedBox
|
||||
func RegisterEmbeddedBox(name string, box *EmbeddedBox) {
|
||||
if _, exists := EmbeddedBoxes[name]; exists {
|
||||
panic(fmt.Sprintf("EmbeddedBox with name `%s` exists already", name))
|
||||
}
|
||||
EmbeddedBoxes[name] = box
|
||||
}
|
@ -0,0 +1,171 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// File implements the io.Reader, io.Seeker, io.Closer and http.File interfaces
|
||||
type File struct {
|
||||
// File abstracts file methods so the user doesn't see the difference between rice.virtualFile, rice.virtualDir and os.File
|
||||
// TODO: maybe use internal File interface and four implementations: *os.File, appendedFile, virtualFile, virtualDir
|
||||
|
||||
// real file on disk
|
||||
realF *os.File
|
||||
|
||||
// when embedded (go)
|
||||
virtualF *virtualFile
|
||||
virtualD *virtualDir
|
||||
|
||||
// when appended (zip)
|
||||
appendedF *appendedFile
|
||||
appendedFileReader *bytes.Reader
|
||||
// TODO: is appendedFileReader subject of races? Might need a lock here..
|
||||
}
|
||||
|
||||
// Close is like (*os.File).Close()
|
||||
// Visit http://golang.org/pkg/os/#File.Close for more information
|
||||
func (f *File) Close() error {
|
||||
if f.appendedF != nil {
|
||||
if f.appendedFileReader == nil {
|
||||
return errors.New("already closed")
|
||||
}
|
||||
f.appendedFileReader = nil
|
||||
return nil
|
||||
}
|
||||
if f.virtualF != nil {
|
||||
return f.virtualF.close()
|
||||
}
|
||||
if f.virtualD != nil {
|
||||
return f.virtualD.close()
|
||||
}
|
||||
return f.realF.Close()
|
||||
}
|
||||
|
||||
// Stat is like (*os.File).Stat()
|
||||
// Visit http://golang.org/pkg/os/#File.Stat for more information
|
||||
func (f *File) Stat() (os.FileInfo, error) {
|
||||
if f.appendedF != nil {
|
||||
if f.appendedF.dir {
|
||||
return f.appendedF.dirInfo, nil
|
||||
}
|
||||
if f.appendedFileReader == nil {
|
||||
return nil, errors.New("file is closed")
|
||||
}
|
||||
return f.appendedF.zipFile.FileInfo(), nil
|
||||
}
|
||||
if f.virtualF != nil {
|
||||
return f.virtualF.stat()
|
||||
}
|
||||
if f.virtualD != nil {
|
||||
return f.virtualD.stat()
|
||||
}
|
||||
return f.realF.Stat()
|
||||
}
|
||||
|
||||
// Readdir is like (*os.File).Readdir()
|
||||
// Visit http://golang.org/pkg/os/#File.Readdir for more information
|
||||
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if f.appendedF != nil {
|
||||
if f.appendedF.dir {
|
||||
fi := make([]os.FileInfo, 0, len(f.appendedF.children))
|
||||
for _, childAppendedFile := range f.appendedF.children {
|
||||
if childAppendedFile.dir {
|
||||
fi = append(fi, childAppendedFile.dirInfo)
|
||||
} else {
|
||||
fi = append(fi, childAppendedFile.zipFile.FileInfo())
|
||||
}
|
||||
}
|
||||
return fi, nil
|
||||
}
|
||||
//++ TODO: is os.ErrInvalid the correct error for Readdir on file?
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
if f.virtualF != nil {
|
||||
return f.virtualF.readdir(count)
|
||||
}
|
||||
if f.virtualD != nil {
|
||||
return f.virtualD.readdir(count)
|
||||
}
|
||||
return f.realF.Readdir(count)
|
||||
}
|
||||
|
||||
// Readdirnames is like (*os.File).Readdirnames()
|
||||
// Visit http://golang.org/pkg/os/#File.Readdirnames for more information
|
||||
func (f *File) Readdirnames(count int) ([]string, error) {
|
||||
if f.appendedF != nil {
|
||||
if f.appendedF.dir {
|
||||
names := make([]string, 0, len(f.appendedF.children))
|
||||
for _, childAppendedFile := range f.appendedF.children {
|
||||
if childAppendedFile.dir {
|
||||
names = append(names, childAppendedFile.dirInfo.name)
|
||||
} else {
|
||||
names = append(names, childAppendedFile.zipFile.FileInfo().Name())
|
||||
}
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
// os.ErrInvalid to match the os.SyscallError (readdirent: invalid argument) that os.File returns
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
if f.virtualF != nil {
|
||||
return f.virtualF.readdirnames(count)
|
||||
}
|
||||
if f.virtualD != nil {
|
||||
return f.virtualD.readdirnames(count)
|
||||
}
|
||||
return f.realF.Readdirnames(count)
|
||||
}
|
||||
|
||||
// Read is like (*os.File).Read()
|
||||
// Visit http://golang.org/pkg/os/#File.Read for more information
|
||||
func (f *File) Read(bts []byte) (int, error) {
|
||||
if f.appendedF != nil {
|
||||
if f.appendedFileReader == nil {
|
||||
return 0, &os.PathError{
|
||||
Op: "read",
|
||||
Path: filepath.Base(f.appendedF.zipFile.Name),
|
||||
Err: errors.New("file is closed"),
|
||||
}
|
||||
}
|
||||
if f.appendedF.dir {
|
||||
return 0, &os.PathError{
|
||||
Op: "read",
|
||||
Path: filepath.Base(f.appendedF.zipFile.Name),
|
||||
Err: errors.New("is a directory"),
|
||||
}
|
||||
}
|
||||
return f.appendedFileReader.Read(bts)
|
||||
}
|
||||
if f.virtualF != nil {
|
||||
return f.virtualF.read(bts)
|
||||
}
|
||||
if f.virtualD != nil {
|
||||
return f.virtualD.read(bts)
|
||||
}
|
||||
return f.realF.Read(bts)
|
||||
}
|
||||
|
||||
// Seek is like (*os.File).Seek()
|
||||
// Visit http://golang.org/pkg/os/#File.Seek for more information
|
||||
func (f *File) Seek(offset int64, whence int) (int64, error) {
|
||||
if f.appendedF != nil {
|
||||
if f.appendedFileReader == nil {
|
||||
return 0, &os.PathError{
|
||||
Op: "seek",
|
||||
Path: filepath.Base(f.appendedF.zipFile.Name),
|
||||
Err: errors.New("file is closed"),
|
||||
}
|
||||
}
|
||||
return f.appendedFileReader.Seek(offset, whence)
|
||||
}
|
||||
if f.virtualF != nil {
|
||||
return f.virtualF.seek(offset, whence)
|
||||
}
|
||||
if f.virtualD != nil {
|
||||
return f.virtualD.seek(offset, whence)
|
||||
}
|
||||
return f.realF.Seek(offset, whence)
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPBox implements http.FileSystem which allows the use of Box with a http.FileServer.
|
||||
// e.g.: http.Handle("/", http.FileServer(rice.MustFindBox("http-files").HTTPBox()))
|
||||
type HTTPBox struct {
|
||||
*Box
|
||||
}
|
||||
|
||||
// HTTPBox creates a new HTTPBox from an existing Box
|
||||
func (b *Box) HTTPBox() *HTTPBox {
|
||||
return &HTTPBox{b}
|
||||
}
|
||||
|
||||
// Open returns a File using the http.File interface
|
||||
func (hb *HTTPBox) Open(name string) (http.File, error) {
|
||||
return hb.Box.Open(name)
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
package rice
|
||||
|
||||
import "os"
|
||||
|
||||
// SortByName allows an array of os.FileInfo objects
|
||||
// to be easily sorted by filename using sort.Sort(SortByName(array))
|
||||
type SortByName []os.FileInfo
|
||||
|
||||
func (f SortByName) Len() int { return len(f) }
|
||||
func (f SortByName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
|
||||
func (f SortByName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
||||
|
||||
// SortByModified allows an array of os.FileInfo objects
|
||||
// to be easily sorted by modified date using sort.Sort(SortByModified(array))
|
||||
type SortByModified []os.FileInfo
|
||||
|
||||
func (f SortByModified) Len() int { return len(f) }
|
||||
func (f SortByModified) Less(i, j int) bool { return f[i].ModTime().Unix() > f[j].ModTime().Unix() }
|
||||
func (f SortByModified) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
|
@ -0,0 +1,304 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/GeertJohan/go.rice/embedded"
|
||||
)
|
||||
|
||||
//++ TODO: IDEA: merge virtualFile and virtualDir, this decreases work done by rice.File
|
||||
|
||||
// virtualFile is a 'stateful' virtual file.
|
||||
// virtualFile wraps an *EmbeddedFile for a call to Box.Open() and virtualizes 'read cursor' (offset) and 'closing'.
|
||||
// virtualFile is only internally visible and should be exposed through rice.File
|
||||
type virtualFile struct {
|
||||
*embedded.EmbeddedFile // the actual embedded file, embedded to obtain methods
|
||||
offset int64 // read position on the virtual file
|
||||
closed bool // closed when true
|
||||
}
|
||||
|
||||
// create a new virtualFile for given EmbeddedFile
|
||||
func newVirtualFile(ef *embedded.EmbeddedFile) *virtualFile {
|
||||
vf := &virtualFile{
|
||||
EmbeddedFile: ef,
|
||||
offset: 0,
|
||||
closed: false,
|
||||
}
|
||||
return vf
|
||||
}
|
||||
|
||||
//++ TODO check for nil pointers in all these methods. When so: return os.PathError with Err: os.ErrInvalid
|
||||
|
||||
func (vf *virtualFile) close() error {
|
||||
if vf.closed {
|
||||
return &os.PathError{
|
||||
Op: "close",
|
||||
Path: vf.EmbeddedFile.Filename,
|
||||
Err: errors.New("already closed"),
|
||||
}
|
||||
}
|
||||
vf.EmbeddedFile = nil
|
||||
vf.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vf *virtualFile) stat() (os.FileInfo, error) {
|
||||
if vf.closed {
|
||||
return nil, &os.PathError{
|
||||
Op: "stat",
|
||||
Path: vf.EmbeddedFile.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
return (*embeddedFileInfo)(vf.EmbeddedFile), nil
|
||||
}
|
||||
|
||||
func (vf *virtualFile) readdir(count int) ([]os.FileInfo, error) {
|
||||
if vf.closed {
|
||||
return nil, &os.PathError{
|
||||
Op: "readdir",
|
||||
Path: vf.EmbeddedFile.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
|
||||
func (vf *virtualFile) readdirnames(count int) ([]string, error) {
|
||||
if vf.closed {
|
||||
return nil, &os.PathError{
|
||||
Op: "readdirnames",
|
||||
Path: vf.EmbeddedFile.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
return nil, os.ErrInvalid
|
||||
}
|
||||
|
||||
func (vf *virtualFile) read(bts []byte) (int, error) {
|
||||
if vf.closed {
|
||||
return 0, &os.PathError{
|
||||
Op: "read",
|
||||
Path: vf.EmbeddedFile.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
|
||||
end := vf.offset + int64(len(bts))
|
||||
|
||||
if end >= int64(len(vf.Content)) {
|
||||
// end of file, so return what we have + EOF
|
||||
n := copy(bts, vf.Content[vf.offset:])
|
||||
vf.offset = 0
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
n := copy(bts, vf.Content[vf.offset:end])
|
||||
vf.offset += int64(n)
|
||||
return n, nil
|
||||
|
||||
}
|
||||
|
||||
func (vf *virtualFile) seek(offset int64, whence int) (int64, error) {
|
||||
if vf.closed {
|
||||
return 0, &os.PathError{
|
||||
Op: "seek",
|
||||
Path: vf.EmbeddedFile.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
var e error
|
||||
|
||||
//++ TODO: check if this is correct implementation for seek
|
||||
switch whence {
|
||||
case os.SEEK_SET:
|
||||
//++ check if new offset isn't out of bounds, set e when it is, then break out of switch
|
||||
vf.offset = offset
|
||||
case os.SEEK_CUR:
|
||||
//++ check if new offset isn't out of bounds, set e when it is, then break out of switch
|
||||
vf.offset += offset
|
||||
case os.SEEK_END:
|
||||
//++ check if new offset isn't out of bounds, set e when it is, then break out of switch
|
||||
vf.offset = int64(len(vf.EmbeddedFile.Content)) - offset
|
||||
}
|
||||
|
||||
if e != nil {
|
||||
return 0, &os.PathError{
|
||||
Op: "seek",
|
||||
Path: vf.Filename,
|
||||
Err: e,
|
||||
}
|
||||
}
|
||||
|
||||
return vf.offset, nil
|
||||
}
|
||||
|
||||
// virtualDir is a 'stateful' virtual directory.
|
||||
// virtualDir wraps an *EmbeddedDir for a call to Box.Open() and virtualizes 'closing'.
|
||||
// virtualDir is only internally visible and should be exposed through rice.File
|
||||
type virtualDir struct {
|
||||
*embedded.EmbeddedDir
|
||||
offset int // readdir position on the directory
|
||||
closed bool
|
||||
}
|
||||
|
||||
// create a new virtualDir for given EmbeddedDir
|
||||
func newVirtualDir(ed *embedded.EmbeddedDir) *virtualDir {
|
||||
vd := &virtualDir{
|
||||
EmbeddedDir: ed,
|
||||
offset: 0,
|
||||
closed: false,
|
||||
}
|
||||
return vd
|
||||
}
|
||||
|
||||
func (vd *virtualDir) close() error {
|
||||
//++ TODO: needs sync mutex?
|
||||
if vd.closed {
|
||||
return &os.PathError{
|
||||
Op: "close",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("already closed"),
|
||||
}
|
||||
}
|
||||
vd.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (vd *virtualDir) stat() (os.FileInfo, error) {
|
||||
if vd.closed {
|
||||
return nil, &os.PathError{
|
||||
Op: "stat",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
return (*embeddedDirInfo)(vd.EmbeddedDir), nil
|
||||
}
|
||||
|
||||
func (vd *virtualDir) readdir(n int) ([]os.FileInfo, error) {
|
||||
|
||||
if vd.closed {
|
||||
return nil, &os.PathError{
|
||||
Op: "readdir",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
|
||||
// Build up the array of our contents
|
||||
var files []os.FileInfo
|
||||
|
||||
// Add the child directories
|
||||
for _, child := range vd.ChildDirs {
|
||||
child.Filename = filepath.Base(child.Filename)
|
||||
files = append(files, (*embeddedDirInfo)(child))
|
||||
}
|
||||
|
||||
// Add the child files
|
||||
for _, child := range vd.ChildFiles {
|
||||
child.Filename = filepath.Base(child.Filename)
|
||||
files = append(files, (*embeddedFileInfo)(child))
|
||||
}
|
||||
|
||||
// Sort it by filename (lexical order)
|
||||
sort.Sort(SortByName(files))
|
||||
|
||||
// Return all contents if that's what is requested
|
||||
if n <= 0 {
|
||||
vd.offset = 0
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// If user has requested past the end of our list
|
||||
// return what we can and send an EOF
|
||||
if vd.offset+n >= len(files) {
|
||||
offset := vd.offset
|
||||
vd.offset = 0
|
||||
return files[offset:], io.EOF
|
||||
}
|
||||
|
||||
offset := vd.offset
|
||||
vd.offset += n
|
||||
return files[offset : offset+n], nil
|
||||
|
||||
}
|
||||
|
||||
func (vd *virtualDir) readdirnames(n int) ([]string, error) {
|
||||
|
||||
if vd.closed {
|
||||
return nil, &os.PathError{
|
||||
Op: "readdir",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
|
||||
// Build up the array of our contents
|
||||
var files []string
|
||||
|
||||
// Add the child directories
|
||||
for _, child := range vd.ChildDirs {
|
||||
files = append(files, filepath.Base(child.Filename))
|
||||
}
|
||||
|
||||
// Add the child files
|
||||
for _, child := range vd.ChildFiles {
|
||||
files = append(files, filepath.Base(child.Filename))
|
||||
}
|
||||
|
||||
// Sort it by filename (lexical order)
|
||||
sort.Strings(files)
|
||||
|
||||
// Return all contents if that's what is requested
|
||||
if n <= 0 {
|
||||
vd.offset = 0
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// If user has requested past the end of our list
|
||||
// return what we can and send an EOF
|
||||
if vd.offset+n >= len(files) {
|
||||
offset := vd.offset
|
||||
vd.offset = 0
|
||||
return files[offset:], io.EOF
|
||||
}
|
||||
|
||||
offset := vd.offset
|
||||
vd.offset += n
|
||||
return files[offset : offset+n], nil
|
||||
}
|
||||
|
||||
func (vd *virtualDir) read(bts []byte) (int, error) {
|
||||
if vd.closed {
|
||||
return 0, &os.PathError{
|
||||
Op: "read",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
return 0, &os.PathError{
|
||||
Op: "read",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("is a directory"),
|
||||
}
|
||||
}
|
||||
|
||||
func (vd *virtualDir) seek(offset int64, whence int) (int64, error) {
|
||||
if vd.closed {
|
||||
return 0, &os.PathError{
|
||||
Op: "seek",
|
||||
Path: vd.EmbeddedDir.Filename,
|
||||
Err: errors.New("bad file descriptor"),
|
||||
}
|
||||
}
|
||||
return 0, &os.PathError{
|
||||
Op: "seek",
|
||||
Path: vd.Filename,
|
||||
Err: errors.New("is a directory"),
|
||||
}
|
||||
}
|
@ -0,0 +1,122 @@
|
||||
package rice
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Walk is like filepath.Walk()
|
||||
// Visit http://golang.org/pkg/path/filepath/#Walk for more information
|
||||
func (b *Box) Walk(path string, walkFn filepath.WalkFunc) error {
|
||||
|
||||
pathFile, err := b.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer pathFile.Close()
|
||||
|
||||
pathInfo, err := pathFile.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.IsAppended() || b.IsEmbedded() {
|
||||
return b.walk(path, pathInfo, walkFn)
|
||||
}
|
||||
|
||||
// We don't have any embedded or appended box so use live filesystem mode
|
||||
return filepath.Walk(filepath.Join(b.absolutePath, path), func(path string, info os.FileInfo, err error) error {
|
||||
|
||||
// Strip out the box name from the returned paths
|
||||
path = strings.TrimPrefix(path, b.absolutePath+string(os.PathSeparator))
|
||||
return walkFn(path, info, err)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// walk recursively descends path.
|
||||
// See walk() in $GOROOT/src/pkg/path/filepath/path.go
|
||||
func (b *Box) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
|
||||
|
||||
err := walkFn(path, info, nil)
|
||||
if err != nil {
|
||||
if info.IsDir() && err == filepath.SkipDir {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
names, err := b.readDirNames(path)
|
||||
if err != nil {
|
||||
return walkFn(path, info, err)
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
filename := filepath.ToSlash(filepath.Join(path, name))
|
||||
fileObject, err := b.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fileObject.Close()
|
||||
|
||||
fileInfo, err := fileObject.Stat()
|
||||
if err != nil {
|
||||
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = b.walk(filename, fileInfo, walkFn)
|
||||
if err != nil {
|
||||
if !fileInfo.IsDir() || err != filepath.SkipDir {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// readDirNames reads the directory named by path and returns a sorted list of directory entries.
|
||||
// See readDirNames() in $GOROOT/pkg/path/filepath/path.go
|
||||
func (b *Box) readDirNames(path string) ([]string, error) {
|
||||
|
||||
f, err := b.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
stat, err := f.Stat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !stat.IsDir() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
infos, err := f.Readdir(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var names []string
|
||||
|
||||
for _, info := range infos {
|
||||
names = append(names, info.Name())
|
||||
}
|
||||
|
||||
sort.Strings(names)
|
||||
return names, nil
|
||||
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 shengxiang
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
@ -0,0 +1,101 @@
|
||||
package gops
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
mps "github.com/mitchellh/go-ps"
|
||||
)
|
||||
|
||||
type Process struct {
|
||||
mps.Process
|
||||
}
|
||||
|
||||
func NewProcess(pid int) (p Process, err error) {
|
||||
mp, err := mps.FindProcess(pid)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Process{
|
||||
Process: mp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// func (p *Process) Mem() (m sigar.ProcMem, err error) {
|
||||
// err = m.Get(p.Pid())
|
||||
// return
|
||||
// }
|
||||
|
||||
type ProcInfo struct {
|
||||
Pid int `json:"pid"`
|
||||
Pids []int `json:"pids"`
|
||||
Rss int `json:"rss"`
|
||||
PCpu float64 `json:"pcpu"`
|
||||
}
|
||||
|
||||
func (pi *ProcInfo) Add(add ProcInfo) {
|
||||
pi.Rss += add.Rss
|
||||
pi.PCpu += add.PCpu
|
||||
}
|
||||
|
||||
// CPU Percent * 100
|
||||
// only linux and darwin works
|
||||
func (p *Process) ProcInfo() (pi ProcInfo, err error) {
|
||||
pi.Pid = p.Pid()
|
||||
cmd := exec.Command("ps", "-o", "pcpu,rss", "-p", strconv.Itoa(p.Pid()))
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
err = errors.New("ps err: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
fields := strings.SplitN(string(output), "\n", 2)
|
||||
if len(fields) != 2 {
|
||||
err = errors.New("parse ps command out format error")
|
||||
return
|
||||
}
|
||||
_, err = fmt.Sscanf(fields[1], "%f %d", &pi.PCpu, &pi.Rss)
|
||||
pi.Rss *= 1024
|
||||
return
|
||||
}
|
||||
|
||||
// Get all child process
|
||||
func (p *Process) Children(recursive bool) (cps []Process) {
|
||||
pses, err := mps.Processes()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pidMap := make(map[int][]mps.Process, 0)
|
||||
for _, p := range pses {
|
||||
pidMap[p.PPid()] = append(pidMap[p.PPid()], p)
|
||||
}
|
||||
var travel func(int)
|
||||
travel = func(pid int) {
|
||||
for _, p := range pidMap[pid] {
|
||||
cps = append(cps, Process{p})
|
||||
if recursive {
|
||||
travel(p.Pid())
|
||||
}
|
||||
}
|
||||
}
|
||||
travel(p.Pid())
|
||||
return
|
||||
}
|
||||
|
||||
//Sum everything
|
||||
func (p *Process) ChildrenProcInfo(recursive bool) (pi ProcInfo) {
|
||||
cps := p.Children(recursive)
|
||||
for _, cp := range cps {
|
||||
info, er := cp.ProcInfo()
|
||||
if er != nil {
|
||||
continue
|
||||
}
|
||||
pi.Add(info)
|
||||
pi.Pids = append(pi.Pids, cp.Pid())
|
||||
}
|
||||
pi.Pid = p.Pid()
|
||||
return
|
||||
}
|
@ -0,0 +1,63 @@
|
||||
package pushover
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// API is the Pushover API endpoint.
|
||||
API = "https://api.pushover.net/1/messages.json"
|
||||
)
|
||||
|
||||
type apiResponse struct {
|
||||
Info string `json:"info"`
|
||||
Status int `json:"status"`
|
||||
Request string `json:"request"`
|
||||
Errors []string `json:"errors"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
type Params struct {
|
||||
Token string
|
||||
User string
|
||||
Title string
|
||||
Message string
|
||||
}
|
||||
|
||||
// Notify sends a push request to the Pushover API.
|
||||
func Notify(p Params) error {
|
||||
vals := make(url.Values)
|
||||
vals.Set("token", p.Token)
|
||||
vals.Set("user", p.User)
|
||||
vals.Set("message", p.Message)
|
||||
vals.Set("title", p.Title)
|
||||
|
||||
log.Println(vals.Encode())
|
||||
webClient := &http.Client{Timeout: 30 * time.Second}
|
||||
resp, err := webClient.PostForm(API, vals)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
log.Println("posted")
|
||||
|
||||
var r apiResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&r); err != nil {
|
||||
return fmt.Errorf("decoding response: %s", err)
|
||||
}
|
||||
|
||||
if r.Status != 1 {
|
||||
return errors.New(strings.Join(r.Errors, ": ")) //noti.APIError{Site: "Pushover", Msg: strings.Join(r.Errors, ": ")}
|
||||
} else if strings.Contains(r.Info, "no active devices") {
|
||||
return errors.New(r.Info) //noti.APIError{Site: "Pushover", Msg: r.Info}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,19 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/codeskyblue/kexec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
p := kexec.CommandString("python flask_main.py")
|
||||
p.Start()
|
||||
time.Sleep(3 * time.Second)
|
||||
err := p.Terminate(syscall.SIGKILL)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
import flask
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(port=46732, debug=True)
|
@ -1,65 +0,0 @@
|
||||
package kexec
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestCommand(t *testing.T) {
|
||||
Convey("1 should equal 1", t, func() {
|
||||
So(1, ShouldEqual, 1)
|
||||
})
|
||||
|
||||
Convey("kexec should work as normal os/exec", t, func() {
|
||||
cmd := Command("echo", "-n", "123")
|
||||
data, err := cmd.Output()
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldEqual, "123")
|
||||
})
|
||||
|
||||
Convey("the terminate should kill proc", t, func() {
|
||||
cmd := CommandString("sleep 51")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Start()
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
cmd.Terminate(syscall.SIGINT)
|
||||
err := cmd.Wait()
|
||||
So(err, ShouldNotBeNil)
|
||||
//So(err.Error(), ShouldEqual, "signal: interrupt")
|
||||
})
|
||||
|
||||
Convey("Should ok with call Wait twice", t, func() {
|
||||
cmd := CommandString("not-exists-command-xxl213 true")
|
||||
var err error
|
||||
err = cmd.Start()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err1 := cmd.Wait()
|
||||
So(err1, ShouldNotBeNil)
|
||||
err2 := cmd.Wait()
|
||||
So(err1, ShouldEqual, err2)
|
||||
})
|
||||
|
||||
Convey("Set user works", t, func() {
|
||||
u, err := user.Current()
|
||||
So(err, ShouldBeNil)
|
||||
// Set user must be root
|
||||
if u.Uid != "0" {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := Command("whoami")
|
||||
err = cmd.SetUser("qard2")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
output, err := cmd.Output()
|
||||
So(err, ShouldBeNil)
|
||||
So(string(output), ShouldEqual, "qard2\n")
|
||||
})
|
||||
}
|
@ -1 +0,0 @@
|
||||
web: python flask_main.py
|
@ -1,11 +0,0 @@
|
||||
import flask
|
||||
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
|
||||
@app.route('/')
|
||||
def homepage():
|
||||
return 'Home'
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True, host='0.0.0.0')
|
@ -1,22 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"kproc"
|
||||
"log"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
p := kproc.ProcString("python flask_main.py")
|
||||
p.Start()
|
||||
time.Sleep(10 * time.Second)
|
||||
err := p.Terminate(syscall.SIGKILL)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
out, _ := exec.Command("lsof", "-i:5000").CombinedOutput()
|
||||
fmt.Println(string(out))
|
||||
}
|
Binary file not shown.
@ -0,0 +1,14 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
// Render converts a markdown document into a roff formatted document.
|
||||
func Render(doc []byte) []byte {
|
||||
renderer := NewRoffRenderer()
|
||||
|
||||
return blackfriday.Run(doc,
|
||||
[]blackfriday.Option{blackfriday.WithRenderer(renderer),
|
||||
blackfriday.WithExtensions(renderer.GetExtensions())}...)
|
||||
}
|
@ -0,0 +1,336 @@
|
||||
package md2man
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/russross/blackfriday/v2"
|
||||
)
|
||||
|
||||
// roffRenderer implements the blackfriday.Renderer interface for creating
|
||||
// roff format (manpages) from markdown text
|
||||
type roffRenderer struct {
|
||||
extensions blackfriday.Extensions
|
||||
listCounters []int
|
||||
firstHeader bool
|
||||
firstDD bool
|
||||
listDepth int
|
||||
}
|
||||
|
||||
const (
|
||||
titleHeader = ".TH "
|
||||
topLevelHeader = "\n\n.SH "
|
||||
secondLevelHdr = "\n.SH "
|
||||
otherHeader = "\n.SS "
|
||||
crTag = "\n"
|
||||
emphTag = "\\fI"
|
||||
emphCloseTag = "\\fP"
|
||||
strongTag = "\\fB"
|
||||
strongCloseTag = "\\fP"
|
||||
breakTag = "\n.br\n"
|
||||
paraTag = "\n.PP\n"
|
||||
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
|
||||
linkTag = "\n\\[la]"
|
||||
linkCloseTag = "\\[ra]"
|
||||
codespanTag = "\\fB\\fC"
|
||||
codespanCloseTag = "\\fR"
|
||||
codeTag = "\n.PP\n.RS\n\n.nf\n"
|
||||
codeCloseTag = "\n.fi\n.RE\n"
|
||||
quoteTag = "\n.PP\n.RS\n"
|
||||
quoteCloseTag = "\n.RE\n"
|
||||
listTag = "\n.RS\n"
|
||||
listCloseTag = "\n.RE\n"
|
||||
dtTag = "\n.TP\n"
|
||||
dd2Tag = "\n"
|
||||
tableStart = "\n.TS\nallbox;\n"
|
||||
tableEnd = ".TE\n"
|
||||
tableCellStart = "T{\n"
|
||||
tableCellEnd = "\nT}\n"
|
||||
)
|
||||
|
||||
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
|
||||
// from markdown
|
||||
func NewRoffRenderer() *roffRenderer { // nolint: golint
|
||||
var extensions blackfriday.Extensions
|
||||
|
||||
extensions |= blackfriday.NoIntraEmphasis
|
||||
extensions |= blackfriday.Tables
|
||||
extensions |= blackfriday.FencedCode
|
||||
extensions |= blackfriday.SpaceHeadings
|
||||
extensions |= blackfriday.Footnotes
|
||||
extensions |= blackfriday.Titleblock
|
||||
extensions |= blackfriday.DefinitionLists
|
||||
return &roffRenderer{
|
||||
extensions: extensions,
|
||||
}
|
||||
}
|
||||
|
||||
// GetExtensions returns the list of extensions used by this renderer implementation
|
||||
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
|
||||
return r.extensions
|
||||
}
|
||||
|
||||
// RenderHeader handles outputting the header at document start
|
||||
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
|
||||
// disable hyphenation
|
||||
out(w, ".nh\n")
|
||||
}
|
||||
|
||||
// RenderFooter handles outputting the footer at the document end; the roff
|
||||
// renderer has no footer information
|
||||
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
|
||||
}
|
||||
|
||||
// RenderNode is called for each node in a markdown document; based on the node
|
||||
// type the equivalent roff output is sent to the writer
|
||||
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
|
||||
var walkAction = blackfriday.GoToNext
|
||||
|
||||
switch node.Type {
|
||||
case blackfriday.Text:
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
case blackfriday.Softbreak:
|
||||
out(w, crTag)
|
||||
case blackfriday.Hardbreak:
|
||||
out(w, breakTag)
|
||||
case blackfriday.Emph:
|
||||
if entering {
|
||||
out(w, emphTag)
|
||||
} else {
|
||||
out(w, emphCloseTag)
|
||||
}
|
||||
case blackfriday.Strong:
|
||||
if entering {
|
||||
out(w, strongTag)
|
||||
} else {
|
||||
out(w, strongCloseTag)
|
||||
}
|
||||
case blackfriday.Link:
|
||||
if !entering {
|
||||
out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag)
|
||||
}
|
||||
case blackfriday.Image:
|
||||
// ignore images
|
||||
walkAction = blackfriday.SkipChildren
|
||||
case blackfriday.Code:
|
||||
out(w, codespanTag)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
out(w, codespanCloseTag)
|
||||
case blackfriday.Document:
|
||||
break
|
||||
case blackfriday.Paragraph:
|
||||
// roff .PP markers break lists
|
||||
if r.listDepth > 0 {
|
||||
return blackfriday.GoToNext
|
||||
}
|
||||
if entering {
|
||||
out(w, paraTag)
|
||||
} else {
|
||||
out(w, crTag)
|
||||
}
|
||||
case blackfriday.BlockQuote:
|
||||
if entering {
|
||||
out(w, quoteTag)
|
||||
} else {
|
||||
out(w, quoteCloseTag)
|
||||
}
|
||||
case blackfriday.Heading:
|
||||
r.handleHeading(w, node, entering)
|
||||
case blackfriday.HorizontalRule:
|
||||
out(w, hruleTag)
|
||||
case blackfriday.List:
|
||||
r.handleList(w, node, entering)
|
||||
case blackfriday.Item:
|
||||
r.handleItem(w, node, entering)
|
||||
case blackfriday.CodeBlock:
|
||||
out(w, codeTag)
|
||||
escapeSpecialChars(w, node.Literal)
|
||||
out(w, codeCloseTag)
|
||||
case blackfriday.Table:
|
||||
r.handleTable(w, node, entering)
|
||||
case blackfriday.TableHead:
|
||||
case blackfriday.TableBody:
|
||||
case blackfriday.TableRow:
|
||||
// no action as cell entries do all the nroff formatting
|
||||
return blackfriday.GoToNext
|
||||
case blackfriday.TableCell:
|
||||
r.handleTableCell(w, node, entering)
|
||||
case blackfriday.HTMLSpan:
|
||||
// ignore other HTML tags
|
||||
default:
|
||||
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
|
||||
}
|
||||
return walkAction
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
switch node.Level {
|
||||
case 1:
|
||||
if !r.firstHeader {
|
||||
out(w, titleHeader)
|
||||
r.firstHeader = true
|
||||
break
|
||||
}
|
||||
out(w, topLevelHeader)
|
||||
case 2:
|
||||
out(w, secondLevelHdr)
|
||||
default:
|
||||
out(w, otherHeader)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
openTag := listTag
|
||||
closeTag := listCloseTag
|
||||
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// tags for definition lists handled within Item node
|
||||
openTag = ""
|
||||
closeTag = ""
|
||||
}
|
||||
if entering {
|
||||
r.listDepth++
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
r.listCounters = append(r.listCounters, 1)
|
||||
}
|
||||
out(w, openTag)
|
||||
} else {
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
r.listCounters = r.listCounters[:len(r.listCounters)-1]
|
||||
}
|
||||
out(w, closeTag)
|
||||
r.listDepth--
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
|
||||
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
|
||||
r.listCounters[len(r.listCounters)-1]++
|
||||
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
|
||||
// DT (definition term): line just before DD (see below).
|
||||
out(w, dtTag)
|
||||
r.firstDD = true
|
||||
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
|
||||
// DD (definition description): line that starts with ": ".
|
||||
//
|
||||
// We have to distinguish between the first DD and the
|
||||
// subsequent ones, as there should be no vertical
|
||||
// whitespace between the DT and the first DD.
|
||||
if r.firstDD {
|
||||
r.firstDD = false
|
||||
} else {
|
||||
out(w, dd2Tag)
|
||||
}
|
||||
} else {
|
||||
out(w, ".IP \\(bu 2\n")
|
||||
}
|
||||
} else {
|
||||
out(w, "\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
out(w, tableStart)
|
||||
// call walker to count cells (and rows?) so format section can be produced
|
||||
columns := countColumns(node)
|
||||
out(w, strings.Repeat("l ", columns)+"\n")
|
||||
out(w, strings.Repeat("l ", columns)+".\n")
|
||||
} else {
|
||||
out(w, tableEnd)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
|
||||
if entering {
|
||||
var start string
|
||||
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
|
||||
start = "\t"
|
||||
}
|
||||
if node.IsHeader {
|
||||
start += codespanTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
start += tableCellStart
|
||||
}
|
||||
out(w, start)
|
||||
} else {
|
||||
var end string
|
||||
if node.IsHeader {
|
||||
end = codespanCloseTag
|
||||
} else if nodeLiteralSize(node) > 30 {
|
||||
end = tableCellEnd
|
||||
}
|
||||
if node.Next == nil && end != tableCellEnd {
|
||||
// Last cell: need to carriage return if we are at the end of the
|
||||
// header row and content isn't wrapped in a "tablecell"
|
||||
end += crTag
|
||||
}
|
||||
out(w, end)
|
||||
}
|
||||
}
|
||||
|
||||
func nodeLiteralSize(node *blackfriday.Node) int {
|
||||
total := 0
|
||||
for n := node.FirstChild; n != nil; n = n.FirstChild {
|
||||
total += len(n.Literal)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// because roff format requires knowing the column count before outputting any table
|
||||
// data we need to walk a table tree and count the columns
|
||||
func countColumns(node *blackfriday.Node) int {
|
||||
var columns int
|
||||
|
||||
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
|
||||
switch node.Type {
|
||||
case blackfriday.TableRow:
|
||||
if !entering {
|
||||
return blackfriday.Terminate
|
||||
}
|
||||
case blackfriday.TableCell:
|
||||
if entering {
|
||||
columns++
|
||||
}
|
||||
default:
|
||||
}
|
||||
return blackfriday.GoToNext
|
||||
})
|
||||
return columns
|
||||
}
|
||||
|
||||
func out(w io.Writer, output string) {
|
||||
io.WriteString(w, output) // nolint: errcheck
|
||||
}
|
||||
|
||||
func escapeSpecialChars(w io.Writer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// escape initial apostrophe or period
|
||||
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
|
||||
out(w, "\\&")
|
||||
}
|
||||
|
||||
// directly copy normal characters
|
||||
org := i
|
||||
|
||||
for i < len(text) && text[i] != '\\' {
|
||||
i++
|
||||
}
|
||||
if i > org {
|
||||
w.Write(text[org:i]) // nolint: errcheck
|
||||
}
|
||||
|
||||
// escape a character
|
||||
if i >= len(text) {
|
||||
break
|
||||
}
|
||||
|
||||
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
|
||||
}
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright © 2012-2015 Carlos Castillo
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the “Software”), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -0,0 +1,5 @@
|
||||
go.zipexe
|
||||
=========
|
||||
|
||||
This module was taken as-is from https://github.com/cookieo9/resources-go.
|
||||
Documentation: https://godoc.org/github.com/daaku/go.zipexe
|
@ -0,0 +1,142 @@
|
||||
// Package zipexe attempts to open an executable binary file as a zip file.
|
||||
package zipexe
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"debug/elf"
|
||||
"debug/macho"
|
||||
"debug/pe"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Opens a zip file by path.
|
||||
func Open(path string) (*zip.Reader, error) {
|
||||
_, rd, err := OpenCloser(path)
|
||||
return rd, err
|
||||
}
|
||||
|
||||
// OpenCloser is like Open but returns an additional Closer to avoid leaking open files.
|
||||
func OpenCloser(path string) (io.Closer, *zip.Reader, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
finfo, err := file.Stat()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
zr, err := NewReader(file, finfo.Size())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return file, zr, nil
|
||||
}
|
||||
|
||||
// Open a zip file, specially handling various binaries that may have been
|
||||
// augmented with zip data.
|
||||
func NewReader(rda io.ReaderAt, size int64) (*zip.Reader, error) {
|
||||
handlers := []func(io.ReaderAt, int64) (*zip.Reader, error){
|
||||
zip.NewReader,
|
||||
zipExeReaderMacho,
|
||||
zipExeReaderElf,
|
||||
zipExeReaderPe,
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
zfile, err := handler(rda, size)
|
||||
if err == nil {
|
||||
return zfile, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("Couldn't Open As Executable")
|
||||
}
|
||||
|
||||
// zipExeReaderMacho treats the file as a Mach-O binary
|
||||
// (Mac OS X / Darwin executable) and attempts to find a zip archive.
|
||||
func zipExeReaderMacho(rda io.ReaderAt, size int64) (*zip.Reader, error) {
|
||||
file, err := macho.NewFile(rda)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var max int64
|
||||
for _, load := range file.Loads {
|
||||
seg, ok := load.(*macho.Segment)
|
||||
if ok {
|
||||
// Check if the segment contains a zip file
|
||||
if zfile, err := zip.NewReader(seg, int64(seg.Filesz)); err == nil {
|
||||
return zfile, nil
|
||||
}
|
||||
|
||||
// Otherwise move end of file pointer
|
||||
end := int64(seg.Offset + seg.Filesz)
|
||||
if end > max {
|
||||
max = end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No zip file within binary, try appended to end
|
||||
section := io.NewSectionReader(rda, max, size-max)
|
||||
return zip.NewReader(section, section.Size())
|
||||
}
|
||||
|
||||
// zipExeReaderPe treats the file as a Portable Exectuable binary
|
||||
// (Windows executable) and attempts to find a zip archive.
|
||||
func zipExeReaderPe(rda io.ReaderAt, size int64) (*zip.Reader, error) {
|
||||
file, err := pe.NewFile(rda)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var max int64
|
||||
for _, sec := range file.Sections {
|
||||
// Check if this section has a zip file
|
||||
if zfile, err := zip.NewReader(sec, int64(sec.Size)); err == nil {
|
||||
return zfile, nil
|
||||
}
|
||||
|
||||
// Otherwise move end of file pointer
|
||||
end := int64(sec.Offset + sec.Size)
|
||||
if end > max {
|
||||
max = end
|
||||
}
|
||||
}
|
||||
|
||||
// No zip file within binary, try appended to end
|
||||
section := io.NewSectionReader(rda, max, size-max)
|
||||
return zip.NewReader(section, section.Size())
|
||||
}
|
||||
|
||||
// zipExeReaderElf treats the file as a ELF binary
|
||||
// (linux/BSD/etc... executable) and attempts to find a zip archive.
|
||||
func zipExeReaderElf(rda io.ReaderAt, size int64) (*zip.Reader, error) {
|
||||
file, err := elf.NewFile(rda)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var max int64
|
||||
for _, sect := range file.Sections {
|
||||
if sect.Type == elf.SHT_NOBITS {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this section has a zip file
|
||||
if zfile, err := zip.NewReader(sect, int64(sect.Size)); err == nil {
|
||||
return zfile, nil
|
||||
}
|
||||
|
||||
// Otherwise move end of file pointer
|
||||
end := int64(sect.Offset + sect.Size)
|
||||
if end > max {
|
||||
max = end
|
||||
}
|
||||
}
|
||||
|
||||
// No zip file within binary, try appended to end
|
||||
section := io.NewSectionReader(rda, max, size-max)
|
||||
return zip.NewReader(section, section.Size())
|
||||
}
|
@ -1,5 +1,13 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- "1.4"
|
||||
- "1.5"
|
||||
- "1.7"
|
||||
- "1.9"
|
||||
- "1.10"
|
||||
- "1.11"
|
||||
- "tip"
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
script: go test -v github.com/equinox-io/equinox github.com/equinox-io/equinox/proto
|
||||
|
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Equinox
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -1,426 +0,0 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/equinox-io/equinox/internal/go-update/internal/binarydist"
|
||||
)
|
||||
|
||||
var (
|
||||
oldFile = []byte{0xDE, 0xAD, 0xBE, 0xEF}
|
||||
newFile = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
|
||||
newFileChecksum = sha256.Sum256(newFile)
|
||||
)
|
||||
|
||||
func cleanup(path string) {
|
||||
os.Remove(path)
|
||||
os.Remove(fmt.Sprintf(".%s.new", path))
|
||||
}
|
||||
|
||||
// we write with a separate name for each test so that we can run them in parallel
|
||||
func writeOldFile(path string, t *testing.T) {
|
||||
if err := ioutil.WriteFile(path, oldFile, 0777); err != nil {
|
||||
t.Fatalf("Failed to write file for testing preparation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateUpdate(path string, err error, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update: %v", err)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file post-update: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, newFile) {
|
||||
t.Fatalf("File was not updated! Bytes read: %v, Bytes expected: %v", buf, newFile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplySimple(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestApplySimple"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestApplyOldSavePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestApplyOldSavePath"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
oldfName := "OldSavePath"
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
OldSavePath: oldfName,
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
|
||||
if _, err := os.Stat(oldfName); os.IsNotExist(err) {
|
||||
t.Fatalf("Failed to find the old file: %v", err)
|
||||
}
|
||||
|
||||
cleanup(oldfName)
|
||||
}
|
||||
|
||||
func TestVerifyChecksum(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyChecksum"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
Checksum: newFileChecksum[:],
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestVerifyChecksumNegative(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyChecksumNegative"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
badChecksum := []byte{0x0A, 0x0B, 0x0C, 0xFF}
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
Checksum: badChecksum,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to detect bad checksum!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestApplyPatch"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
patch := new(bytes.Buffer)
|
||||
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(newFile), patch)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create patch: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(patch, Options{
|
||||
TargetPath: fName,
|
||||
Patcher: NewBSDiffPatcher(),
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestCorruptPatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestCorruptPatch"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
badPatch := []byte{0x44, 0x38, 0x86, 0x3c, 0x4f, 0x8d, 0x26, 0x54, 0xb, 0x11, 0xce, 0xfe, 0xc1, 0xc0, 0xf8, 0x31, 0x38, 0xa0, 0x12, 0x1a, 0xa2, 0x57, 0x2a, 0xe1, 0x3a, 0x48, 0x62, 0x40, 0x2b, 0x81, 0x12, 0xb1, 0x21, 0xa5, 0x16, 0xed, 0x73, 0xd6, 0x54, 0x84, 0x29, 0xa6, 0xd6, 0xb2, 0x1b, 0xfb, 0xe6, 0xbe, 0x7b, 0x70}
|
||||
err := Apply(bytes.NewReader(badPatch), Options{
|
||||
TargetPath: fName,
|
||||
Patcher: NewBSDiffPatcher(),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to detect corrupt patch!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyChecksumPatchNegative(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyChecksumPatchNegative"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
patch := new(bytes.Buffer)
|
||||
anotherFile := []byte{0x77, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
|
||||
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(anotherFile), patch)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create patch: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(patch, Options{
|
||||
TargetPath: fName,
|
||||
Checksum: newFileChecksum[:],
|
||||
Patcher: NewBSDiffPatcher(),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to detect patch to wrong file!")
|
||||
}
|
||||
}
|
||||
|
||||
const ecdsaPublicKey = `
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEL8ThbSyEucsCxnd4dCZR2hIy5nea54ko
|
||||
O+jUUfIjkvwhCWzASm0lpCVdVpXKZXIe+NZ+44RQRv3+OqJkCCGzUgJkPNI3lxdG
|
||||
9zu8rbrnxISV06VQ8No7Ei9wiTpqmTBB
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
|
||||
const ecdsaPrivateKey = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDBttCB/1NOY4T+WrG4FSV49Ayn3gK1DNzfGaJ01JUXeiNFCWQM2pqpU
|
||||
om8ATPP/dkegBwYFK4EEACKhZANiAAQvxOFtLIS5ywLGd3h0JlHaEjLmd5rniSg7
|
||||
6NRR8iOS/CEJbMBKbSWkJV1Wlcplch741n7jhFBG/f46omQIIbNSAmQ80jeXF0b3
|
||||
O7ytuufEhJXTpVDw2jsSL3CJOmqZMEE=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
|
||||
const rsaPublicKey = `
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxSWmu7trWKAwDFjiCN2D
|
||||
Tk2jj2sgcr/CMlI4cSSiIOHrXCFxP1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKab
|
||||
b9ead+kD0kxk7i2bFYvKX43oq66IW0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4
|
||||
y20C59dPr9Dpcz8DZkdLsBV6YKF6Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjT
|
||||
x4xRnjgTRRRlZvRtALHMUkIChgxDOhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv5
|
||||
5fhJ08Rz7mmZmtH5JxTK5XTquo59sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7Nrf
|
||||
fQIDAQAB
|
||||
-----END PUBLIC KEY-----`
|
||||
|
||||
const rsaPrivateKey = `
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAxSWmu7trWKAwDFjiCN2DTk2jj2sgcr/CMlI4cSSiIOHrXCFx
|
||||
P1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKabb9ead+kD0kxk7i2bFYvKX43oq66I
|
||||
W0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4y20C59dPr9Dpcz8DZkdLsBV6YKF6
|
||||
Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjTx4xRnjgTRRRlZvRtALHMUkIChgxD
|
||||
OhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv55fhJ08Rz7mmZmtH5JxTK5XTquo59
|
||||
sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7NrffQIDAQABAoIBAAkN+6RvrTR61voa
|
||||
Mvd5RQiZpEN4Bht/Fyo8gH8h0Zh1B9xJZOwlmMZLS5fdtHlfLEhR8qSrGDBL61vq
|
||||
I8KkhEsUufF78EL+YzxVN+Q7cWYGHIOWFokqza7hzpSxUQO6lPOMQ1eIZaNueJTB
|
||||
Zu07/47ISPPg/bXzgGVcpYlTCPTjUwKjtfyMqvX9AD7fIyYRm6zfE7EHj1J2sBFt
|
||||
Yz1OGELg6HfJwXfpnPfBvftD0hWGzJ78Bp71fPJe6n5gnqmSqRvrcXNWFnH/yqkN
|
||||
d6vPIxD6Z3LjvyZpkA7JillLva2L/zcIFhg4HZvQnWd8/PpDnUDonu36hcj4SC5j
|
||||
W4aVPLkCgYEA4XzNKWxqYcajzFGZeSxlRHupSAl2MT7Cc5085MmE7dd31wK2T8O4
|
||||
n7N4bkm/rjTbX85NsfWdKtWb6mpp8W3VlLP0rp4a/12OicVOkg4pv9LZDmY0sRlE
|
||||
YuDJk1FeCZ50UrwTZI3rZ9IhZHhkgVA6uWAs7tYndONkxNHG0pjqs4sCgYEA39MZ
|
||||
JwMqo3qsPntpgP940cCLflEsjS9hYNO3+Sv8Dq3P0HLVhBYajJnotf8VuU0fsQZG
|
||||
grmtVn1yThFbMq7X1oY4F0XBA+paSiU18c4YyUnwax2u4sw9U/Q9tmQUZad5+ueT
|
||||
qriMBwGv+ewO+nQxqvAsMUmemrVzrfwA5Oct+hcCgYAfiyXoNZJsOy2O15twqBVC
|
||||
j0oPGcO+/9iT89sg5lACNbI+EdMPNYIOVTzzsL1v0VUfAe08h++Enn1BPcG0VHkc
|
||||
ZFBGXTfJoXzfKQrkw7ZzbzuOGB4m6DH44xlP0oIlNlVvfX/5ASF9VJf3RiBJNsAA
|
||||
TsP6ZVr/rw/ZuL7nlxy+IQKBgDhL/HOXlE3yOQiuOec8WsNHTs7C1BXe6PtVxVxi
|
||||
988pYK/pclL6zEq5G5NLSceF4obAMVQIJ9UtUGbabrncyGUo9UrFPLsjYvprSZo8
|
||||
YHegpVwL50UcYgCP2kXZ/ldjPIcjYDz8lhvdDMor2cidGTEJn9P11HLNWP9V91Ob
|
||||
4jCZAoGAPNRSC5cC8iP/9j+s2/kdkfWJiNaolPYAUrmrkL6H39PYYZM5tnhaIYJV
|
||||
Oh9AgABamU0eb3p3vXTISClVgV7ifq1HyZ7BSUhMfaY2Jk/s3sUHCWFxPZe9sgEG
|
||||
KinIY/373KIkIV/5g4h2v1w330IWcfptxKcY/Er3DJr38f695GE=
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
|
||||
func signec(privatePEM string, source []byte, t *testing.T) []byte {
|
||||
parseFn := func(p []byte) (crypto.Signer, error) { return x509.ParseECPrivateKey(p) }
|
||||
return sign(parseFn, privatePEM, source, t)
|
||||
}
|
||||
|
||||
func signrsa(privatePEM string, source []byte, t *testing.T) []byte {
|
||||
parseFn := func(p []byte) (crypto.Signer, error) { return x509.ParsePKCS1PrivateKey(p) }
|
||||
return sign(parseFn, privatePEM, source, t)
|
||||
}
|
||||
|
||||
func sign(parsePrivKey func([]byte) (crypto.Signer, error), privatePEM string, source []byte, t *testing.T) []byte {
|
||||
block, _ := pem.Decode([]byte(privatePEM))
|
||||
if block == nil {
|
||||
t.Fatalf("Failed to parse private key PEM")
|
||||
}
|
||||
|
||||
priv, err := parsePrivKey(block.Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse private key DER: %v", err)
|
||||
}
|
||||
|
||||
checksum := sha256.Sum256(source)
|
||||
sig, err := priv.Sign(rand.Reader, checksum[:], crypto.SHA256)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to sign: %v", sig)
|
||||
}
|
||||
|
||||
return sig
|
||||
}
|
||||
|
||||
func TestVerifyECSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifySignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
opts.Signature = signec(ecdsaPrivateKey, newFile, t)
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestVerifyRSASignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifySignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{
|
||||
TargetPath: fName,
|
||||
Verifier: NewRSAVerifier(),
|
||||
}
|
||||
err := opts.SetPublicKeyPEM([]byte(rsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
opts.Signature = signrsa(rsaPrivateKey, newFile, t)
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestVerifyFailBadSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyFailBadSignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{
|
||||
TargetPath: fName,
|
||||
Signature: []byte{0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA},
|
||||
}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Did not fail with bad signature")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyFailNoSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifySignatureWithPEM"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Did not fail with empty signature")
|
||||
}
|
||||
}
|
||||
|
||||
const wrongKey = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDBzqYp6N2s8YWYifBjS03/fFfmGeIPcxQEi+bbFeekIYt8NIKIkhD+r
|
||||
hpaIwSmot+qgBwYFK4EEACKhZANiAAR0EC8Usbkc4k30frfEB2ECmsIghu9DJSqE
|
||||
RbH7jfq2ULNv8tN/clRjxf2YXgp+iP3SQF1R1EYERKpWr8I57pgfIZtoZXjwpbQC
|
||||
VBbP/Ff+05HOqwPC7rJMy1VAJLKg7Cw=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
|
||||
func TestVerifyFailWrongSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyFailWrongSignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
opts.Signature = signec(wrongKey, newFile, t)
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Verified an update that was signed by an untrusted key!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignatureButNoPublicKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestSignatureButNoPublicKey"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
Signature: signec(ecdsaPrivateKey, newFile, t),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Allowed an update with a signautre verification when no public key was specified!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicKeyButNoSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestPublicKeyButNoSignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
if err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey)); err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
err := Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Allowed an update with no signautre when a public key was specified!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
t.Parallel()
|
||||
fName := "TestWriteError"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
openFile = func(name string, flags int, perm os.FileMode) (*os.File, error) {
|
||||
f, err := os.OpenFile(name, flags, perm)
|
||||
|
||||
// simulate Write() error by closing the file prematurely
|
||||
f.Close()
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{TargetPath: fName})
|
||||
if err == nil {
|
||||
t.Fatalf("Allowed an update to an empty file")
|
||||
}
|
||||
}
|
@ -1,93 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
func mustOpen(path string) *os.File {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func mustReadAll(r io.Reader) []byte {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func fileCmp(a, b *os.File) int64 {
|
||||
sa, err := a.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sb, err := b.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if sa != sb {
|
||||
return sa
|
||||
}
|
||||
|
||||
_, err = a.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = b.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pa, err := ioutil.ReadAll(a)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pb, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for i := range pa {
|
||||
if pa[i] != pb[i] {
|
||||
return int64(i)
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func mustWriteRandFile(path string, size int) *os.File {
|
||||
p := make([]byte, size)
|
||||
_, err := rand.Read(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = f.Write(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = f.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
67
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/diff_test.go
generated
vendored
67
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/diff_test.go
generated
vendored
@ -1,67 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var diffT = []struct {
|
||||
old *os.File
|
||||
new *os.File
|
||||
}{
|
||||
{
|
||||
old: mustWriteRandFile("test.old", 1e3),
|
||||
new: mustWriteRandFile("test.new", 1e3),
|
||||
},
|
||||
{
|
||||
old: mustOpen("testdata/sample.old"),
|
||||
new: mustOpen("testdata/sample.new"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
for _, s := range diffT {
|
||||
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(got.Name())
|
||||
|
||||
exp, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("bsdiff", s.old.Name(), s.new.Name(), exp.Name())
|
||||
cmd.Stdout = os.Stdout
|
||||
err = cmd.Run()
|
||||
os.Remove(exp.Name())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = Diff(s.old, s.new, got)
|
||||
if err != nil {
|
||||
t.Fatal("err", err)
|
||||
}
|
||||
|
||||
_, err = got.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
gotBuf := mustReadAll(got)
|
||||
expBuf := mustReadAll(exp)
|
||||
|
||||
if !bytes.Equal(gotBuf, expBuf) {
|
||||
t.Fail()
|
||||
t.Logf("diff %s %s", s.old.Name(), s.new.Name())
|
||||
t.Logf("%s: len(got) = %d", got.Name(), len(gotBuf))
|
||||
t.Logf("%s: len(exp) = %d", exp.Name(), len(expBuf))
|
||||
i := matchlen(gotBuf, expBuf)
|
||||
t.Logf("produced different output at pos %d; %d != %d", i, gotBuf[i], expBuf[i])
|
||||
}
|
||||
}
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
mustWriteRandFile("test.old", 1e3)
|
||||
mustWriteRandFile("test.new", 1e3)
|
||||
|
||||
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(got.Name())
|
||||
|
||||
err = exec.Command("bsdiff", "test.old", "test.new", "test.patch").Run()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = Patch(mustOpen("test.old"), got, mustOpen("test.patch"))
|
||||
if err != nil {
|
||||
t.Fatal("err", err)
|
||||
}
|
||||
|
||||
ref, err := got.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("got %d bytes", ref)
|
||||
if n := fileCmp(got, mustOpen("test.new")); n > -1 {
|
||||
t.Fatalf("produced different output at pos %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchHk(t *testing.T) {
|
||||
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(got.Name())
|
||||
|
||||
err = Patch(mustOpen("testdata/sample.old"), got, mustOpen("testdata/sample.patch"))
|
||||
if err != nil {
|
||||
t.Fatal("err", err)
|
||||
}
|
||||
|
||||
ref, err := got.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("got %d bytes", ref)
|
||||
if n := fileCmp(got, mustOpen("testdata/sample.new")); n > -1 {
|
||||
t.Fatalf("produced different output at pos %d", n)
|
||||
}
|
||||
}
|
33
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/sort_test.go
generated
vendored
33
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/sort_test.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var sortT = [][]byte{
|
||||
mustRandBytes(1000),
|
||||
mustReadAll(mustOpen("test.old")),
|
||||
[]byte("abcdefabcdef"),
|
||||
}
|
||||
|
||||
func TestQsufsort(t *testing.T) {
|
||||
for _, s := range sortT {
|
||||
I := qsufsort(s)
|
||||
for i := 1; i < len(I); i++ {
|
||||
if bytes.Compare(s[I[i-1]:], s[I[i]:]) > 0 {
|
||||
t.Fatalf("unsorted at %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustRandBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
203
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_test.go
generated
vendored
203
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_test.go
generated
vendored
@ -1,203 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
|
||||
|
||||
executableEnvValueMatch = "match"
|
||||
executableEnvValueDelete = "delete"
|
||||
)
|
||||
|
||||
func TestPrintExecutable(t *testing.T) {
|
||||
ef, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
t.Log("Executable:", ef)
|
||||
}
|
||||
func TestPrintExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
t.Log("Executable Folder:", ef)
|
||||
}
|
||||
func TestExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
if ef[len(ef)-1] == filepath.Separator {
|
||||
t.Fatal("ExecutableFolder ends with a trailing slash.")
|
||||
}
|
||||
}
|
||||
func TestExecutableMatch(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
// fullpath to be of the form "dir/prog".
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fullpath, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
// Make child start with a relative program path.
|
||||
// Alter argv[0] for child to verify getting real path without argv[0].
|
||||
cmd := &exec.Cmd{
|
||||
Dir: dir,
|
||||
Path: fullpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutableDelete(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip()
|
||||
}
|
||||
fpath, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
stderrBuff := &bytes.Buffer{}
|
||||
stdoutBuff := &bytes.Buffer{}
|
||||
cmd := &exec.Cmd{
|
||||
Path: fpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
|
||||
Stdin: r,
|
||||
Stderr: stderrBuff,
|
||||
Stdout: stdoutBuff,
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) start failed: %v", err)
|
||||
}
|
||||
|
||||
tempPath := fpath + "_copy"
|
||||
_ = os.Remove(tempPath)
|
||||
|
||||
err = copyFile(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("copy file failed: %v", err)
|
||||
}
|
||||
err = os.Remove(fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("remove running test file failed: %v", err)
|
||||
}
|
||||
err = os.Rename(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("rename copy to previous name failed: %v", err)
|
||||
}
|
||||
|
||||
w.Write([]byte{0})
|
||||
w.Close()
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
t.Fatalf("exec wait failed: %v", err)
|
||||
}
|
||||
|
||||
childPath := stderrBuff.String()
|
||||
if !filepath.IsAbs(childPath) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", childPath)
|
||||
}
|
||||
if !sameFile(childPath, fpath) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
func copyFile(dest, src string) error {
|
||||
df, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer df.Close()
|
||||
|
||||
sf, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sf.Close()
|
||||
|
||||
_, err = io.Copy(df, sf)
|
||||
return err
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
env := os.Getenv(executableEnvVar)
|
||||
switch env {
|
||||
case "":
|
||||
os.Exit(m.Run())
|
||||
case executableEnvValueMatch:
|
||||
// First chdir to another path.
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
case executableEnvValueDelete:
|
||||
bb := make([]byte, 1)
|
||||
var err error
|
||||
n, err := os.Stdin.Read(bb)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if n != 1 {
|
||||
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
|
||||
os.Exit(2)
|
||||
}
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
@ -1,203 +0,0 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
|
||||
|
||||
executableEnvValueMatch = "match"
|
||||
executableEnvValueDelete = "delete"
|
||||
)
|
||||
|
||||
func TestPrintExecutable(t *testing.T) {
|
||||
ef, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
t.Log("Executable:", ef)
|
||||
}
|
||||
func TestPrintExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
t.Log("Executable Folder:", ef)
|
||||
}
|
||||
func TestExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
if ef[len(ef)-1] == filepath.Separator {
|
||||
t.Fatal("ExecutableFolder ends with a trailing slash.")
|
||||
}
|
||||
}
|
||||
func TestExecutableMatch(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
// fullpath to be of the form "dir/prog".
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fullpath, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
// Make child start with a relative program path.
|
||||
// Alter argv[0] for child to verify getting real path without argv[0].
|
||||
cmd := &exec.Cmd{
|
||||
Dir: dir,
|
||||
Path: fullpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutableDelete(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip()
|
||||
}
|
||||
fpath, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
stderrBuff := &bytes.Buffer{}
|
||||
stdoutBuff := &bytes.Buffer{}
|
||||
cmd := &exec.Cmd{
|
||||
Path: fpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
|
||||
Stdin: r,
|
||||
Stderr: stderrBuff,
|
||||
Stdout: stdoutBuff,
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) start failed: %v", err)
|
||||
}
|
||||
|
||||
tempPath := fpath + "_copy"
|
||||
_ = os.Remove(tempPath)
|
||||
|
||||
err = copyFile(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("copy file failed: %v", err)
|
||||
}
|
||||
err = os.Remove(fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("remove running test file failed: %v", err)
|
||||
}
|
||||
err = os.Rename(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("rename copy to previous name failed: %v", err)
|
||||
}
|
||||
|
||||
w.Write([]byte{0})
|
||||
w.Close()
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
t.Fatalf("exec wait failed: %v", err)
|
||||
}
|
||||
|
||||
childPath := stderrBuff.String()
|
||||
if !filepath.IsAbs(childPath) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", childPath)
|
||||
}
|
||||
if !sameFile(childPath, fpath) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
func copyFile(dest, src string) error {
|
||||
df, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer df.Close()
|
||||
|
||||
sf, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sf.Close()
|
||||
|
||||
_, err = io.Copy(df, sf)
|
||||
return err
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
env := os.Getenv(executableEnvVar)
|
||||
switch env {
|
||||
case "":
|
||||
os.Exit(m.Run())
|
||||
case executableEnvValueMatch:
|
||||
// First chdir to another path.
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
case executableEnvValueDelete:
|
||||
bb := make([]byte, 1)
|
||||
var err error
|
||||
n, err := os.Stdin.Read(bb)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if n != 1 {
|
||||
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
|
||||
os.Exit(2)
|
||||
}
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
// +build go1.7
|
||||
|
||||
package equinox
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// CheckContext is like Check but includes a context.
|
||||
func CheckContext(ctx context.Context, appID string, opts Options) (Response, error) {
|
||||
var req, err = checkRequest(appID, &opts)
|
||||
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
|
||||
return doCheckRequest(opts, req.WithContext(ctx))
|
||||
}
|
||||
|
||||
// ApplyContext is like Apply but includes a context.
|
||||
func (r Response) ApplyContext(ctx context.Context) error {
|
||||
var req, opts, err = r.applyRequest()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.applyUpdate(req.WithContext(ctx), opts)
|
||||
}
|
@ -1,183 +0,0 @@
|
||||
package equinox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/equinox-io/equinox/proto"
|
||||
)
|
||||
|
||||
const fakeAppID = "fake_app_id"
|
||||
|
||||
var (
|
||||
fakeBinary = []byte{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
|
||||
newFakeBinary = []byte{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2}
|
||||
ts *httptest.Server
|
||||
key *ecdsa.PrivateKey
|
||||
sha string
|
||||
newSHA string
|
||||
signature string
|
||||
)
|
||||
|
||||
func init() {
|
||||
shaBytes := sha256.Sum256(fakeBinary)
|
||||
sha = hex.EncodeToString(shaBytes[:])
|
||||
newSHABytes := sha256.Sum256(newFakeBinary)
|
||||
newSHA = hex.EncodeToString(newSHABytes[:])
|
||||
|
||||
var err error
|
||||
key, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to generate ecdsa key: %v", err))
|
||||
}
|
||||
sig, err := key.Sign(rand.Reader, newSHABytes[:], nil)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to sign new binary: %v", err))
|
||||
}
|
||||
signature = hex.EncodeToString(sig)
|
||||
}
|
||||
|
||||
func TestNotAvailable(t *testing.T) {
|
||||
opts := setup(t, "TestNotAvailable", proto.Response{
|
||||
Available: false,
|
||||
})
|
||||
defer cleanup(opts)
|
||||
|
||||
_, err := Check(fakeAppID, opts)
|
||||
if err != NotAvailableErr {
|
||||
t.Fatalf("Expected not available error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndToEnd(t *testing.T) {
|
||||
opts := setup(t, "TestEndtoEnd", proto.Response{
|
||||
Available: true,
|
||||
Release: proto.Release{
|
||||
Version: "0.1.2.3",
|
||||
Title: "Release Title",
|
||||
Description: "Release Description",
|
||||
CreateDate: time.Now(),
|
||||
},
|
||||
Checksum: newSHA,
|
||||
Signature: signature,
|
||||
})
|
||||
defer cleanup(opts)
|
||||
|
||||
resp, err := Check(fakeAppID, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed check: %v", err)
|
||||
}
|
||||
err = resp.Apply()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed apply: %v", err)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadFile(opts.TargetPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, newFakeBinary) {
|
||||
t.Fatalf("Binary did not update to new expected value. Got %v, expected %v", buf, newFakeBinary)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPatch(t *testing.T) {
|
||||
opts := setup(t, "TestInavlidPatch", proto.Response{
|
||||
Available: true,
|
||||
Release: proto.Release{
|
||||
Version: "0.1.2.3",
|
||||
Title: "Release Title",
|
||||
Description: "Release Description",
|
||||
CreateDate: time.Now(),
|
||||
},
|
||||
DownloadURL: "bad-request",
|
||||
Checksum: newSHA,
|
||||
Signature: signature,
|
||||
Patch: proto.PatchBSDiff,
|
||||
})
|
||||
defer cleanup(opts)
|
||||
|
||||
resp, err := Check(fakeAppID, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed check: %v", err)
|
||||
}
|
||||
err = resp.Apply()
|
||||
if err == nil {
|
||||
t.Fatalf("Apply succeeded")
|
||||
}
|
||||
if err.Error() != "error downloading patch: bad-request" {
|
||||
t.Fatalf("Expected a different error message: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t *testing.T, name string, resp proto.Response) Options {
|
||||
checkUserAgent := func(req *http.Request) {
|
||||
if req.Header.Get("User-Agent") != userAgent {
|
||||
t.Errorf("Expected user agent to be %s, not %s", userAgent, req.Header.Get("User-Agent"))
|
||||
}
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/check", func(w http.ResponseWriter, r *http.Request) {
|
||||
checkUserAgent(r)
|
||||
var req proto.Request
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode proto request: %v", err)
|
||||
}
|
||||
if resp.Available {
|
||||
if req.AppID != fakeAppID {
|
||||
t.Fatalf("Unexpected app ID. Got %v, expected %v", err)
|
||||
}
|
||||
if req.CurrentSHA256 != sha {
|
||||
t.Fatalf("Unexpected request SHA: %v", sha)
|
||||
}
|
||||
}
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
// Keying off the download URL may not be the best idea...
|
||||
if resp.DownloadURL == "bad-request" {
|
||||
mux.HandleFunc("/bin", func(w http.ResponseWriter, r *http.Request) {
|
||||
checkUserAgent(r)
|
||||
http.Error(w, "bad-request", http.StatusBadRequest)
|
||||
})
|
||||
} else {
|
||||
mux.HandleFunc("/bin", func(w http.ResponseWriter, r *http.Request) {
|
||||
checkUserAgent(r)
|
||||
w.Write(newFakeBinary)
|
||||
})
|
||||
}
|
||||
|
||||
ts = httptest.NewServer(mux)
|
||||
resp.DownloadURL = ts.URL + "/bin"
|
||||
|
||||
var opts Options
|
||||
opts.CheckURL = ts.URL + "/check"
|
||||
opts.PublicKey = key.Public()
|
||||
|
||||
if name != "" {
|
||||
opts.TargetPath = name
|
||||
ioutil.WriteFile(name, fakeBinary, 0644)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func cleanup(opts Options) {
|
||||
if opts.TargetPath != "" {
|
||||
os.Remove(opts.TargetPath)
|
||||
}
|
||||
ts.Close()
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,136 +0,0 @@
|
||||
package rbuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
cv "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// new tests just for atomic version
|
||||
|
||||
// same set of tests for non-atomic rbuf:
|
||||
func TestAtomicRingBufReadWrite(t *testing.T) {
|
||||
b := NewAtomicFixedSizeRingBuf(5)
|
||||
|
||||
data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
cv.Convey("Given a AtomicFixedSizeRingBuf of size 5", t, func() {
|
||||
cv.Convey("Write(), Bytes(), and Read() should put and get bytes", func() {
|
||||
n, err := b.Write(data[0:5])
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.readable, cv.ShouldEqual, 5)
|
||||
if n != 5 {
|
||||
fmt.Printf("should have been able to write 5 bytes.\n")
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cv.So(b.Bytes(false), cv.ShouldResemble, data[0:5])
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink)
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(b.Bytes(false), cv.ShouldResemble, data[3:5])
|
||||
cv.So(sink, cv.ShouldResemble, data[0:3])
|
||||
})
|
||||
|
||||
cv.Convey("Write() more than 5 should give back ErrShortWrite", func() {
|
||||
b.Reset()
|
||||
cv.So(b.readable, cv.ShouldEqual, 0)
|
||||
n, err := b.Write(data[0:10])
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, io.ErrShortWrite)
|
||||
cv.So(b.readable, cv.ShouldEqual, 5)
|
||||
if n != 5 {
|
||||
fmt.Printf("should have been able to write 5 bytes.\n")
|
||||
}
|
||||
cv.So(b.Bytes(false), cv.ShouldResemble, data[0:5])
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink)
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(b.Bytes(false), cv.ShouldResemble, data[3:5])
|
||||
cv.So(sink, cv.ShouldResemble, data[0:3])
|
||||
})
|
||||
|
||||
cv.Convey("we should be able to wrap data and then get it back in Bytes(false)", func() {
|
||||
b.Reset()
|
||||
|
||||
n, err := b.Write(data[0:3])
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink) // put b.beg at 3
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.readable, cv.ShouldEqual, 0)
|
||||
|
||||
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
by := b.Bytes(false)
|
||||
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
|
||||
|
||||
})
|
||||
|
||||
cv.Convey("AtomicFixedSizeRingBuf::WriteTo() should work with wrapped data", func() {
|
||||
b.Reset()
|
||||
|
||||
n, err := b.Write(data[0:3])
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink) // put b.beg at 3
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.readable, cv.ShouldEqual, 0)
|
||||
|
||||
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
|
||||
|
||||
var bb bytes.Buffer
|
||||
m, err := b.WriteTo(&bb)
|
||||
|
||||
cv.So(m, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
by := bb.Bytes()
|
||||
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
|
||||
|
||||
})
|
||||
|
||||
cv.Convey("AtomicFixedSizeRingBuf::ReadFrom() should work with wrapped data", func() {
|
||||
b.Reset()
|
||||
var bb bytes.Buffer
|
||||
n, err := b.ReadFrom(&bb)
|
||||
cv.So(n, cv.ShouldEqual, 0)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
// write 4, then read 4 bytes
|
||||
m, err := b.Write(data[0:4])
|
||||
cv.So(m, cv.ShouldEqual, 4)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
sink := make([]byte, 4)
|
||||
k, err := b.Read(sink) // put b.beg at 4
|
||||
cv.So(k, cv.ShouldEqual, 4)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.readable, cv.ShouldEqual, 0)
|
||||
cv.So(b.Beg, cv.ShouldEqual, 4)
|
||||
|
||||
bbread := bytes.NewBuffer(data[4:9])
|
||||
n, err = b.ReadFrom(bbread) // wrap 4 bytes around to the front, 5 bytes total.
|
||||
|
||||
by := b.Bytes(false)
|
||||
cv.So(by, cv.ShouldResemble, data[4:9]) // but still get them back continguous from the ping-pong buffering
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
}
|
@ -0,0 +1,232 @@
|
||||
package rbuf
|
||||
|
||||
// copyright (c) 2016, Jason E. Aten
|
||||
// license: MIT
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Float64RingBuf:
|
||||
//
|
||||
// a fixed-size circular ring buffer of float64
|
||||
//
|
||||
type Float64RingBuf struct {
|
||||
A []float64
|
||||
N int // MaxView, the total size of A, whether or not in use.
|
||||
Beg int // start of in-use data in A
|
||||
Readable int // number of float64 available in A (in use)
|
||||
}
|
||||
|
||||
// constructor. NewFloat64RingBuf will allocate internally
|
||||
// a slice of maxViewItems float64.
|
||||
func NewFloat64RingBuf(maxViewItems int) *Float64RingBuf {
|
||||
n := maxViewItems
|
||||
r := &Float64RingBuf{
|
||||
N: n,
|
||||
Beg: 0,
|
||||
Readable: 0,
|
||||
}
|
||||
r.A = make([]float64, n, n)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// TwoContig returns all readable float64, but in two separate slices,
|
||||
// to avoid copying. The two slices are from the same buffer, but
|
||||
// are not contiguous. Either or both may be empty slices.
|
||||
func (b *Float64RingBuf) TwoContig(makeCopy bool) (first []float64, second []float64) {
|
||||
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other.
|
||||
// Let second stay an empty slice.
|
||||
return b.A[b.Beg:(b.Beg + b.Readable)], second
|
||||
}
|
||||
|
||||
return b.A[b.Beg:b.N], b.A[0:(extent % b.N)]
|
||||
}
|
||||
|
||||
// Earliest returns the earliest written value v. ok will be
|
||||
// true unless the ring is empty, in which case ok will be false,
|
||||
// and v will be zero.
|
||||
func (b *Float64RingBuf) Earliest() (v float64, ok bool) {
|
||||
if b.Readable == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
return b.A[b.Beg], true
|
||||
}
|
||||
|
||||
// Values returns all readable float64 in a single buffer. Calling this function
|
||||
// might allocate a new buffer to store the elements contiguously.
|
||||
func (b *Float64RingBuf) Values() []float64 {
|
||||
first, second := b.TwoContig(false)
|
||||
|
||||
if len(first) == 0 {
|
||||
return second
|
||||
}
|
||||
|
||||
if len(second) == 0 {
|
||||
return first
|
||||
}
|
||||
|
||||
out := make([]float64, len(first) + len(second))
|
||||
|
||||
copy(out, first)
|
||||
copy(out[len(first):], second)
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
// ReadFloat64():
|
||||
//
|
||||
// from bytes.Buffer.Read(): Read reads the next len(p) float64
|
||||
// pointers from the buffer or until the buffer is drained. The return
|
||||
// value n is the number of bytes read. If the buffer has no data
|
||||
// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil.
|
||||
func (b *Float64RingBuf) ReadFloat64(p []float64) (n int, err error) {
|
||||
return b.readAndMaybeAdvance(p, true)
|
||||
}
|
||||
|
||||
// ReadWithoutAdvance(): if you want to Read the data and leave
|
||||
// it in the buffer, so as to peek ahead for example.
|
||||
func (b *Float64RingBuf) ReadWithoutAdvance(p []float64) (n int, err error) {
|
||||
return b.readAndMaybeAdvance(p, false)
|
||||
}
|
||||
|
||||
func (b *Float64RingBuf) readAndMaybeAdvance(p []float64, doAdvance bool) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.Readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
n += copy(p, b.A[b.Beg:extent])
|
||||
} else {
|
||||
n += copy(p, b.A[b.Beg:b.N])
|
||||
if n < len(p) {
|
||||
n += copy(p[n:], b.A[0:(extent%b.N)])
|
||||
}
|
||||
}
|
||||
if doAdvance {
|
||||
b.Advance(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
// WriteAndMaybeOverwriteOldestData always consumes the full
|
||||
// buffer p, even if that means blowing away the oldest
|
||||
// unread bytes in the ring to make room. In reality, only the last
|
||||
// min(len(p),b.N) bytes of p will end up being written to the ring.
|
||||
//
|
||||
// This allows the ring to act as a record of the most recent
|
||||
// b.N bytes of data -- a kind of temporal LRU cache, so the
|
||||
// speak. The linux kernel's dmesg ring buffer is similar.
|
||||
//
|
||||
func (b *Float64RingBuf) WriteAndMaybeOverwriteOldestData(p []float64) (n int, err error) {
|
||||
writeCapacity := b.N - b.Readable
|
||||
if len(p) > writeCapacity {
|
||||
b.Advance(len(p) - writeCapacity)
|
||||
}
|
||||
startPos := 0
|
||||
if len(p) > b.N {
|
||||
startPos = len(p) - b.N
|
||||
}
|
||||
n, err = b.Write(p[startPos:])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
//
|
||||
// Write writes len(p) float64 values from p to
|
||||
// the underlying data stream.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// Write must return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
func (b *Float64RingBuf) Write(p []float64) (n int, err error) {
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// nothing (left) to copy in; notice we shorten our
|
||||
// local copy p (below) as we read from it.
|
||||
return
|
||||
}
|
||||
|
||||
writeCapacity := b.N - b.Readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full up already.
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
if len(p) > writeCapacity {
|
||||
err = io.ErrShortWrite
|
||||
// leave err set and
|
||||
// keep going, write what we can.
|
||||
}
|
||||
|
||||
writeStart := (b.Beg + b.Readable) % b.N
|
||||
|
||||
upperLim := intMin(writeStart+writeCapacity, b.N)
|
||||
|
||||
k := copy(b.A[writeStart:upperLim], p)
|
||||
|
||||
n += k
|
||||
b.Readable += k
|
||||
p = p[k:]
|
||||
|
||||
// we can fill from b.A[0:something] from
|
||||
// p's remainder, so loop
|
||||
}
|
||||
}
|
||||
|
||||
// Reset quickly forgets any data stored in the ring buffer. The
|
||||
// data is still there, but the ring buffer will ignore it and
|
||||
// overwrite those buffers as new data comes in.
|
||||
func (b *Float64RingBuf) Reset() {
|
||||
b.Beg = 0
|
||||
b.Readable = 0
|
||||
}
|
||||
|
||||
// Advance(): non-standard, but better than Next(),
|
||||
// because we don't have to unwrap our buffer and pay the cpu time
|
||||
// for the copy that unwrapping may need.
|
||||
// Useful in conjuction/after ReadWithoutAdvance() above.
|
||||
func (b *Float64RingBuf) Advance(n int) {
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
if n > b.Readable {
|
||||
n = b.Readable
|
||||
}
|
||||
b.Readable -= n
|
||||
b.Beg = (b.Beg + n) % b.N
|
||||
}
|
||||
|
||||
// Adopt(): non-standard.
|
||||
//
|
||||
// For efficiency's sake, (possibly) take ownership of
|
||||
// already allocated slice offered in me.
|
||||
//
|
||||
// If me is large we will adopt it, and we will potentially then
|
||||
// write to the me buffer.
|
||||
// If we already have a bigger buffer, copy me into the existing
|
||||
// buffer instead.
|
||||
func (b *Float64RingBuf) Adopt(me []float64) {
|
||||
n := len(me)
|
||||
if n > b.N {
|
||||
b.A = me
|
||||
b.N = n
|
||||
b.Beg = 0
|
||||
b.Readable = n
|
||||
} else {
|
||||
// we already have a larger buffer, reuse it.
|
||||
copy(b.A, me)
|
||||
b.Beg = 0
|
||||
b.Readable = n
|
||||
}
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
package rbuf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
cv "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestPointerReadWrite(t *testing.T) {
|
||||
b := NewPointerRingBuf(5)
|
||||
|
||||
data := []interface{}{}
|
||||
for i := 0; i < 10; i++ {
|
||||
data = append(data, interface{}(i))
|
||||
}
|
||||
|
||||
cv.Convey("PointerRingBuf::PushAndMaybeOverwriteOldestData() should auto advance", t, func() {
|
||||
b.Reset()
|
||||
n, err := b.PushAndMaybeOverwriteOldestData(data[:3])
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 3)
|
||||
|
||||
n, err = b.PushAndMaybeOverwriteOldestData(data[3:5])
|
||||
cv.So(n, cv.ShouldEqual, 2)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 5)
|
||||
check := make([]interface{}, 5)
|
||||
n, err = b.ReadPtrs(check)
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(check, cv.ShouldResemble, data[:5])
|
||||
|
||||
n, err = b.PushAndMaybeOverwriteOldestData(data[5:10])
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
|
||||
n, err = b.ReadWithoutAdvance(check)
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(check, cv.ShouldResemble, data[5:10])
|
||||
|
||||
// check TwoConfig
|
||||
q, r := b.TwoContig()
|
||||
|
||||
//p("len q = %v", len(q))
|
||||
//p("len r = %v", len(r))
|
||||
|
||||
found := make([]bool, 10)
|
||||
for _, iface := range q {
|
||||
q0 := iface.(int)
|
||||
found[q0] = true
|
||||
}
|
||||
|
||||
for _, iface := range r {
|
||||
r0 := iface.(int)
|
||||
found[r0] = true
|
||||
}
|
||||
|
||||
totTrue := 0
|
||||
for i := range found {
|
||||
if found[i] {
|
||||
totTrue++
|
||||
}
|
||||
}
|
||||
cv.So(totTrue, cv.ShouldEqual, 5)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func p(format string, a ...interface{}) {
|
||||
fmt.Printf("\n"+format+"\n", a...)
|
||||
}
|
@ -1,191 +0,0 @@
|
||||
package rbuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
cv "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestRingBufReadWrite(t *testing.T) {
|
||||
b := NewFixedSizeRingBuf(5)
|
||||
|
||||
data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
cv.Convey("Given a FixedSizeRingBuf of size 5", t, func() {
|
||||
cv.Convey("Write(), Bytes(), and Read() should put and get bytes", func() {
|
||||
n, err := b.Write(data[0:5])
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 5)
|
||||
if n != 5 {
|
||||
fmt.Printf("should have been able to write 5 bytes.\n")
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
cv.So(b.Bytes(), cv.ShouldResemble, data[0:5])
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink)
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(b.Bytes(), cv.ShouldResemble, data[3:5])
|
||||
cv.So(sink, cv.ShouldResemble, data[0:3])
|
||||
})
|
||||
|
||||
cv.Convey("Write() more than 5 should give back ErrShortWrite", func() {
|
||||
b.Reset()
|
||||
cv.So(b.Readable, cv.ShouldEqual, 0)
|
||||
n, err := b.Write(data[0:10])
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, io.ErrShortWrite)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 5)
|
||||
if n != 5 {
|
||||
fmt.Printf("should have been able to write 5 bytes.\n")
|
||||
}
|
||||
cv.So(b.Bytes(), cv.ShouldResemble, data[0:5])
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink)
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(b.Bytes(), cv.ShouldResemble, data[3:5])
|
||||
cv.So(sink, cv.ShouldResemble, data[0:3])
|
||||
})
|
||||
|
||||
cv.Convey("we should be able to wrap data and then get it back in Bytes()", func() {
|
||||
b.Reset()
|
||||
|
||||
n, err := b.Write(data[0:3])
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink) // put b.beg at 3
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 0)
|
||||
|
||||
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
by := b.Bytes()
|
||||
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
|
||||
|
||||
})
|
||||
|
||||
cv.Convey("FixedSizeRingBuf::WriteTo() should work with wrapped data", func() {
|
||||
b.Reset()
|
||||
|
||||
n, err := b.Write(data[0:3])
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
sink := make([]byte, 3)
|
||||
n, err = b.Read(sink) // put b.beg at 3
|
||||
cv.So(n, cv.ShouldEqual, 3)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 0)
|
||||
|
||||
n, err = b.Write(data[3:8]) // wrap 3 bytes around to the front
|
||||
|
||||
var bb bytes.Buffer
|
||||
m, err := b.WriteTo(&bb)
|
||||
|
||||
cv.So(m, cv.ShouldEqual, 5)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
by := bb.Bytes()
|
||||
cv.So(by, cv.ShouldResemble, data[3:8]) // but still get them back from the ping-pong buffering
|
||||
|
||||
})
|
||||
|
||||
cv.Convey("FixedSizeRingBuf::ReadFrom() should work with wrapped data", func() {
|
||||
b.Reset()
|
||||
var bb bytes.Buffer
|
||||
n, err := b.ReadFrom(&bb)
|
||||
cv.So(n, cv.ShouldEqual, 0)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
// write 4, then read 4 bytes
|
||||
m, err := b.Write(data[0:4])
|
||||
cv.So(m, cv.ShouldEqual, 4)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
|
||||
sink := make([]byte, 4)
|
||||
k, err := b.Read(sink) // put b.beg at 4
|
||||
cv.So(k, cv.ShouldEqual, 4)
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(b.Readable, cv.ShouldEqual, 0)
|
||||
cv.So(b.Beg, cv.ShouldEqual, 4)
|
||||
|
||||
bbread := bytes.NewBuffer(data[4:9])
|
||||
n, err = b.ReadFrom(bbread) // wrap 4 bytes around to the front, 5 bytes total.
|
||||
|
||||
by := b.Bytes()
|
||||
cv.So(by, cv.ShouldResemble, data[4:9]) // but still get them back continguous from the ping-pong buffering
|
||||
})
|
||||
cv.Convey("FixedSizeRingBuf::WriteAndMaybeOverwriteOldestData() should auto advance", func() {
|
||||
b.Reset()
|
||||
n, err := b.WriteAndMaybeOverwriteOldestData(data[:5])
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(n, cv.ShouldEqual, 5)
|
||||
|
||||
n, err = b.WriteAndMaybeOverwriteOldestData(data[5:7])
|
||||
cv.So(n, cv.ShouldEqual, 2)
|
||||
cv.So(b.Bytes(), cv.ShouldResemble, data[2:7])
|
||||
|
||||
n, err = b.WriteAndMaybeOverwriteOldestData(data[0:9])
|
||||
cv.So(err, cv.ShouldEqual, nil)
|
||||
cv.So(n, cv.ShouldEqual, 9)
|
||||
cv.So(b.Bytes(), cv.ShouldResemble, data[4:9])
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestNextPrev(t *testing.T) {
|
||||
b := NewFixedSizeRingBuf(6)
|
||||
data := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
|
||||
|
||||
cv.Convey("Given a FixedSizeRingBuf of size 6, filled with 4 elements at various begin points, then Nextpos() and Prev() should return the correct positions or <0 if done", t, func() {
|
||||
k := b.N
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.Reset()
|
||||
b.Beg = i
|
||||
_, err := b.Write(data[0:k])
|
||||
panicOn(err)
|
||||
// cannot go prev to first
|
||||
cv.So(b.Prevpos(i), cv.ShouldEqual, -1)
|
||||
// cannot go after last
|
||||
cv.So(b.Nextpos((i+k-1)%b.N), cv.ShouldEqual, -1)
|
||||
// in the middle we should be okay
|
||||
for j := 1; j < k-1; j++ {
|
||||
r := (i + j) % b.N
|
||||
prev := b.Prevpos(r)
|
||||
next := b.Nextpos(r)
|
||||
cv.So(prev >= 0, cv.ShouldBeTrue)
|
||||
cv.So(next >= 0, cv.ShouldBeTrue)
|
||||
if next > r {
|
||||
cv.So(next, cv.ShouldEqual, r+1)
|
||||
} else {
|
||||
cv.So(next, cv.ShouldEqual, 0)
|
||||
}
|
||||
if prev < r {
|
||||
cv.So(prev, cv.ShouldEqual, r-1)
|
||||
} else {
|
||||
cv.So(prev, cv.ShouldEqual, b.N-1)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func panicOn(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
@ -1,13 +1,201 @@
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
@ -0,0 +1,13 @@
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
File diff suppressed because it is too large
Load Diff
@ -1,501 +0,0 @@
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "gopkg.in/check.v1"
|
||||
"gopkg.in/yaml.v2"
|
||||
"net"
|
||||
"os"
|
||||
)
|
||||
|
||||
var marshalIntTest = 123
|
||||
|
||||
var marshalTests = []struct {
|
||||
value interface{}
|
||||
data string
|
||||
}{
|
||||
{
|
||||
nil,
|
||||
"null\n",
|
||||
}, {
|
||||
&struct{}{},
|
||||
"{}\n",
|
||||
}, {
|
||||
map[string]string{"v": "hi"},
|
||||
"v: hi\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": "hi"},
|
||||
"v: hi\n",
|
||||
}, {
|
||||
map[string]string{"v": "true"},
|
||||
"v: \"true\"\n",
|
||||
}, {
|
||||
map[string]string{"v": "false"},
|
||||
"v: \"false\"\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": true},
|
||||
"v: true\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": false},
|
||||
"v: false\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": 10},
|
||||
"v: 10\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": -10},
|
||||
"v: -10\n",
|
||||
}, {
|
||||
map[string]uint{"v": 42},
|
||||
"v: 42\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": int64(4294967296)},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]int64{"v": int64(4294967296)},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]uint64{"v": 4294967296},
|
||||
"v: 4294967296\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": "10"},
|
||||
"v: \"10\"\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": 0.1},
|
||||
"v: 0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": float64(0.1)},
|
||||
"v: 0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": -0.1},
|
||||
"v: -0.1\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.Inf(+1)},
|
||||
"v: .inf\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.Inf(-1)},
|
||||
"v: -.inf\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": math.NaN()},
|
||||
"v: .nan\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": nil},
|
||||
"v: null\n",
|
||||
}, {
|
||||
map[string]interface{}{"v": ""},
|
||||
"v: \"\"\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B"}},
|
||||
"v:\n- A\n- B\n",
|
||||
}, {
|
||||
map[string][]string{"v": []string{"A", "B\nC"}},
|
||||
"v:\n- A\n- |-\n B\n C\n",
|
||||
}, {
|
||||
map[string][]interface{}{"v": []interface{}{"A", 1, map[string][]int{"B": []int{2, 3}}}},
|
||||
"v:\n- A\n- 1\n- B:\n - 2\n - 3\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": map[interface{}]interface{}{"b": "c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
map[string]interface{}{"a": "-"},
|
||||
"a: '-'\n",
|
||||
},
|
||||
|
||||
// Simple values.
|
||||
{
|
||||
&marshalIntTest,
|
||||
"123\n",
|
||||
},
|
||||
|
||||
// Structures
|
||||
{
|
||||
&struct{ Hello string }{"world"},
|
||||
"hello: world\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct {
|
||||
B string
|
||||
}
|
||||
}{struct{ B string }{"c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct {
|
||||
B string
|
||||
}
|
||||
}{&struct{ B string }{"c"}},
|
||||
"a:\n b: c\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct {
|
||||
B string
|
||||
}
|
||||
}{},
|
||||
"a: null\n",
|
||||
}, {
|
||||
&struct{ A int }{1},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct{ A []int }{[]int{1, 2}},
|
||||
"a:\n- 1\n- 2\n",
|
||||
}, {
|
||||
&struct {
|
||||
B int "a"
|
||||
}{1},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct{ A bool }{true},
|
||||
"a: true\n",
|
||||
},
|
||||
|
||||
// Conditional flag
|
||||
{
|
||||
&struct {
|
||||
A int "a,omitempty"
|
||||
B int "b,omitempty"
|
||||
}{1, 0},
|
||||
"a: 1\n",
|
||||
}, {
|
||||
&struct {
|
||||
A int "a,omitempty"
|
||||
B int "b,omitempty"
|
||||
}{0, 0},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{&struct{ X, y int }{1, 2}},
|
||||
"a: {x: 1}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{nil},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A *struct{ X, y int } "a,omitempty,flow"
|
||||
}{&struct{ X, y int }{}},
|
||||
"a: {x: 0}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct{ X, y int } "a,omitempty,flow"
|
||||
}{struct{ X, y int }{1, 2}},
|
||||
"a: {x: 1}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct{ X, y int } "a,omitempty,flow"
|
||||
}{struct{ X, y int }{0, 1}},
|
||||
"{}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A float64 "a,omitempty"
|
||||
B float64 "b,omitempty"
|
||||
}{1, 0},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Flow flag
|
||||
{
|
||||
&struct {
|
||||
A []int "a,flow"
|
||||
}{[]int{1, 2}},
|
||||
"a: [1, 2]\n",
|
||||
}, {
|
||||
&struct {
|
||||
A map[string]string "a,flow"
|
||||
}{map[string]string{"b": "c", "d": "e"}},
|
||||
"a: {b: c, d: e}\n",
|
||||
}, {
|
||||
&struct {
|
||||
A struct {
|
||||
B, D string
|
||||
} "a,flow"
|
||||
}{struct{ B, D string }{"c", "e"}},
|
||||
"a: {b: c, d: e}\n",
|
||||
},
|
||||
|
||||
// Unexported field
|
||||
{
|
||||
&struct {
|
||||
u int
|
||||
A int
|
||||
}{0, 1},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Ignored field
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
B int "-"
|
||||
}{1, 2},
|
||||
"a: 1\n",
|
||||
},
|
||||
|
||||
// Struct inlining
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
C inlineB `yaml:",inline"`
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Map inlining
|
||||
{
|
||||
&struct {
|
||||
A int
|
||||
C map[string]int `yaml:",inline"`
|
||||
}{1, map[string]int{"b": 2, "c": 3}},
|
||||
"a: 1\nb: 2\nc: 3\n",
|
||||
},
|
||||
|
||||
// Duration
|
||||
{
|
||||
map[string]time.Duration{"a": 3 * time.Second},
|
||||
"a: 3s\n",
|
||||
},
|
||||
|
||||
// Issue #24: bug in map merging logic.
|
||||
{
|
||||
map[string]string{"a": "<foo>"},
|
||||
"a: <foo>\n",
|
||||
},
|
||||
|
||||
// Issue #34: marshal unsupported base 60 floats quoted for compatibility
|
||||
// with old YAML 1.1 parsers.
|
||||
{
|
||||
map[string]string{"a": "1:1"},
|
||||
"a: \"1:1\"\n",
|
||||
},
|
||||
|
||||
// Binary data.
|
||||
{
|
||||
map[string]string{"a": "\x00"},
|
||||
"a: \"\\0\"\n",
|
||||
}, {
|
||||
map[string]string{"a": "\x80\x81\x82"},
|
||||
"a: !!binary gIGC\n",
|
||||
}, {
|
||||
map[string]string{"a": strings.Repeat("\x90", 54)},
|
||||
"a: !!binary |\n " + strings.Repeat("kJCQ", 17) + "kJ\n CQ\n",
|
||||
},
|
||||
|
||||
// Ordered maps.
|
||||
{
|
||||
&yaml.MapSlice{{"b", 2}, {"a", 1}, {"d", 4}, {"c", 3}, {"sub", yaml.MapSlice{{"e", 5}}}},
|
||||
"b: 2\na: 1\nd: 4\nc: 3\nsub:\n e: 5\n",
|
||||
},
|
||||
|
||||
// Encode unicode as utf-8 rather than in escaped form.
|
||||
{
|
||||
map[string]string{"a": "你好"},
|
||||
"a: 你好\n",
|
||||
},
|
||||
|
||||
// Support encoding.TextMarshaler.
|
||||
{
|
||||
map[string]net.IP{"a": net.IPv4(1, 2, 3, 4)},
|
||||
"a: 1.2.3.4\n",
|
||||
},
|
||||
{
|
||||
map[string]time.Time{"a": time.Unix(1424801979, 0)},
|
||||
"a: 2015-02-24T18:19:39Z\n",
|
||||
},
|
||||
|
||||
// Ensure strings containing ": " are quoted (reported as PR #43, but not reproducible).
|
||||
{
|
||||
map[string]string{"a": "b: c"},
|
||||
"a: 'b: c'\n",
|
||||
},
|
||||
|
||||
// Containing hash mark ('#') in string should be quoted
|
||||
{
|
||||
map[string]string{"a": "Hello #comment"},
|
||||
"a: 'Hello #comment'\n",
|
||||
},
|
||||
{
|
||||
map[string]string{"a": "你好 #comment"},
|
||||
"a: '你好 #comment'\n",
|
||||
},
|
||||
}
|
||||
|
||||
func (s *S) TestMarshal(c *C) {
|
||||
defer os.Setenv("TZ", os.Getenv("TZ"))
|
||||
os.Setenv("TZ", "UTC")
|
||||
for _, item := range marshalTests {
|
||||
data, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, item.data)
|
||||
}
|
||||
}
|
||||
|
||||
var marshalErrorTests = []struct {
|
||||
value interface{}
|
||||
error string
|
||||
panic string
|
||||
}{{
|
||||
value: &struct {
|
||||
B int
|
||||
inlineB ",inline"
|
||||
}{1, inlineB{2, inlineC{3}}},
|
||||
panic: `Duplicated key 'b' in struct struct \{ B int; .*`,
|
||||
}, {
|
||||
value: &struct {
|
||||
A int
|
||||
B map[string]int ",inline"
|
||||
}{1, map[string]int{"a": 2}},
|
||||
panic: `Can't have key "a" in inlined map; conflicts with struct field`,
|
||||
}}
|
||||
|
||||
func (s *S) TestMarshalErrors(c *C) {
|
||||
for _, item := range marshalErrorTests {
|
||||
if item.panic != "" {
|
||||
c.Assert(func() { yaml.Marshal(item.value) }, PanicMatches, item.panic)
|
||||
} else {
|
||||
_, err := yaml.Marshal(item.value)
|
||||
c.Assert(err, ErrorMatches, item.error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalTypeCache(c *C) {
|
||||
var data []byte
|
||||
var err error
|
||||
func() {
|
||||
type T struct{ A int }
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
func() {
|
||||
type T struct{ B int }
|
||||
data, err = yaml.Marshal(&T{})
|
||||
c.Assert(err, IsNil)
|
||||
}()
|
||||
c.Assert(string(data), Equals, "b: 0\n")
|
||||
}
|
||||
|
||||
var marshalerTests = []struct {
|
||||
data string
|
||||
value interface{}
|
||||
}{
|
||||
{"_:\n hi: there\n", map[interface{}]interface{}{"hi": "there"}},
|
||||
{"_:\n- 1\n- A\n", []interface{}{1, "A"}},
|
||||
{"_: 10\n", 10},
|
||||
{"_: null\n", nil},
|
||||
{"_: BAR!\n", "BAR!"},
|
||||
}
|
||||
|
||||
type marshalerType struct {
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (o marshalerType) MarshalText() ([]byte, error) {
|
||||
panic("MarshalText called on type with MarshalYAML")
|
||||
}
|
||||
|
||||
func (o marshalerType) MarshalYAML() (interface{}, error) {
|
||||
return o.value, nil
|
||||
}
|
||||
|
||||
type marshalerValue struct {
|
||||
Field marshalerType "_"
|
||||
}
|
||||
|
||||
func (s *S) TestMarshaler(c *C) {
|
||||
for _, item := range marshalerTests {
|
||||
obj := &marshalerValue{}
|
||||
obj.Field.value = item.value
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, string(item.data))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalerWholeDocument(c *C) {
|
||||
obj := &marshalerType{}
|
||||
obj.value = map[string]string{"hello": "world!"}
|
||||
data, err := yaml.Marshal(obj)
|
||||
c.Assert(err, IsNil)
|
||||
c.Assert(string(data), Equals, "hello: world!\n")
|
||||
}
|
||||
|
||||
type failingMarshaler struct{}
|
||||
|
||||
func (ft *failingMarshaler) MarshalYAML() (interface{}, error) {
|
||||
return nil, failingErr
|
||||
}
|
||||
|
||||
func (s *S) TestMarshalerError(c *C) {
|
||||
_, err := yaml.Marshal(&failingMarshaler{})
|
||||
c.Assert(err, Equals, failingErr)
|
||||
}
|
||||
|
||||
func (s *S) TestSortedOutput(c *C) {
|
||||
order := []interface{}{
|
||||
false,
|
||||
true,
|
||||
1,
|
||||
uint(1),
|
||||
1.0,
|
||||
1.1,
|
||||
1.2,
|
||||
2,
|
||||
uint(2),
|
||||
2.0,
|
||||
2.1,
|
||||
"",
|
||||
".1",
|
||||
".2",
|
||||
".a",
|
||||
"1",
|
||||
"2",
|
||||
"a!10",
|
||||
"a/2",
|
||||
"a/10",
|
||||
"a~10",
|
||||
"ab/1",
|
||||
"b/1",
|
||||
"b/01",
|
||||
"b/2",
|
||||
"b/02",
|
||||
"b/3",
|
||||
"b/03",
|
||||
"b1",
|
||||
"b01",
|
||||
"b3",
|
||||
"c2.10",
|
||||
"c10.2",
|
||||
"d1",
|
||||
"d12",
|
||||
"d12a",
|
||||
}
|
||||
m := make(map[interface{}]int)
|
||||
for _, k := range order {
|
||||
m[k] = 1
|
||||
}
|
||||
data, err := yaml.Marshal(m)
|
||||
c.Assert(err, IsNil)
|
||||
out := "\n" + string(data)
|
||||
last := 0
|
||||
for i, k := range order {
|
||||
repr := fmt.Sprint(k)
|
||||
if s, ok := k.(string); ok {
|
||||
if _, err = strconv.ParseFloat(repr, 32); s == "" || err == nil {
|
||||
repr = `"` + repr + `"`
|
||||
}
|
||||
}
|
||||
index := strings.Index(out, "\n"+repr+":")
|
||||
if index == -1 {
|
||||
c.Fatalf("%#v is not in the output: %#v", k, out)
|
||||
}
|
||||
if index < last {
|
||||
c.Fatalf("%#v was generated before %#v: %q", k, order[i-1], out)
|
||||
}
|
||||
last = index
|
||||
}
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// An example showing how to unmarshal embedded
|
||||
// structs from YAML.
|
||||
|
||||
type StructA struct {
|
||||
A string `yaml:"a"`
|
||||
}
|
||||
|
||||
type StructB struct {
|
||||
// Embedded structs are not treated as embedded in YAML by default. To do that,
|
||||
// add the ",inline" annotation below
|
||||
StructA `yaml:",inline"`
|
||||
B string `yaml:"b"`
|
||||
}
|
||||
|
||||
var data = `
|
||||
a: a string from struct A
|
||||
b: a string from struct B
|
||||
`
|
||||
|
||||
func ExampleUnmarshal_embedded() {
|
||||
var b StructB
|
||||
|
||||
err := yaml.Unmarshal([]byte(data), &b)
|
||||
if err != nil {
|
||||
log.Fatal("cannot unmarshal data: %v", err)
|
||||
}
|
||||
fmt.Println(b.A)
|
||||
fmt.Println(b.B)
|
||||
// Output:
|
||||
// a string from struct A
|
||||
// a string from struct B
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
package yaml_test
|
||||
|
||||
import (
|
||||
. "gopkg.in/check.v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) { TestingT(t) }
|
||||
|
||||
type S struct{}
|
||||
|
||||
var _ = Suite(&S{})
|
@ -1,116 +0,0 @@
|
||||
package httpauth
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBasicAuthAuthenticateWithFunc(t *testing.T) {
|
||||
requiredUser := "jqpublic"
|
||||
requiredPass := "secret.sauce"
|
||||
|
||||
r := &http.Request{Method: "GET"}
|
||||
|
||||
// Dumb test function
|
||||
fn := func(u, p string, req *http.Request) bool {
|
||||
return u == requiredUser && p == requiredPass && req == r
|
||||
}
|
||||
|
||||
// Provide a minimal test implementation.
|
||||
authOpts := AuthOptions{
|
||||
Realm: "Restricted",
|
||||
AuthFunc: fn,
|
||||
}
|
||||
|
||||
b := &basicAuth{opts: authOpts}
|
||||
|
||||
if b.authenticate(nil) {
|
||||
t.Fatal("Should not succeed when http.Request is nil")
|
||||
}
|
||||
|
||||
// Provide auth data, but no Authorization header
|
||||
if b.authenticate(r) != false {
|
||||
t.Fatal("No Authorization header supplied.")
|
||||
}
|
||||
|
||||
// Initialise the map for HTTP headers
|
||||
r.Header = http.Header(make(map[string][]string))
|
||||
|
||||
// Set a malformed/bad header
|
||||
r.Header.Set("Authorization", " Basic")
|
||||
if b.authenticate(r) != false {
|
||||
t.Fatal("Malformed Authorization header supplied.")
|
||||
}
|
||||
|
||||
// Test correct credentials
|
||||
auth := base64.StdEncoding.EncodeToString([]byte("jqpublic:secret.sauce"))
|
||||
r.Header.Set("Authorization", "Basic "+auth)
|
||||
if b.authenticate(r) != true {
|
||||
t.Fatal("Failed on correct credentials")
|
||||
}
|
||||
|
||||
// Test incorrect credentials
|
||||
auth = base64.StdEncoding.EncodeToString([]byte("jqpublic:hackydoo"))
|
||||
r.Header.Set("Authorization", "Basic "+auth)
|
||||
if b.authenticate(r) == true {
|
||||
t.Fatal("Success when expecting failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicAuthAuthenticate(t *testing.T) {
|
||||
// Provide a minimal test implementation.
|
||||
authOpts := AuthOptions{
|
||||
Realm: "Restricted",
|
||||
User: "test-user",
|
||||
Password: "plain-text-password",
|
||||
}
|
||||
|
||||
b := &basicAuth{
|
||||
opts: authOpts,
|
||||
}
|
||||
|
||||
r := &http.Request{Method: "GET"}
|
||||
|
||||
// Provide auth data, but no Authorization header
|
||||
if b.authenticate(r) != false {
|
||||
t.Fatal("No Authorization header supplied.")
|
||||
}
|
||||
|
||||
// Initialise the map for HTTP headers
|
||||
r.Header = http.Header(make(map[string][]string))
|
||||
|
||||
// Set a malformed/bad header
|
||||
r.Header.Set("Authorization", " Basic")
|
||||
if b.authenticate(r) != false {
|
||||
t.Fatal("Malformed Authorization header supplied.")
|
||||
}
|
||||
|
||||
// Test correct credentials
|
||||
auth := base64.StdEncoding.EncodeToString([]byte(b.opts.User + ":" + b.opts.Password))
|
||||
r.Header.Set("Authorization", "Basic "+auth)
|
||||
if b.authenticate(r) != true {
|
||||
t.Fatal("Failed on correct credentials")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicAuthAuthenticateWithoutUserAndPass(t *testing.T) {
|
||||
b := basicAuth{opts: AuthOptions{}}
|
||||
|
||||
r := &http.Request{Method: "GET"}
|
||||
|
||||
// Provide auth data, but no Authorization header
|
||||
if b.authenticate(r) != false {
|
||||
t.Fatal("No Authorization header supplied.")
|
||||
}
|
||||
|
||||
// Initialise the map for HTTP headers
|
||||
r.Header = http.Header(make(map[string][]string))
|
||||
|
||||
// Test correct credentials
|
||||
auth := base64.StdEncoding.EncodeToString([]byte(b.opts.User + ":" + b.opts.Password))
|
||||
r.Header.Set("Authorization", "Basic "+auth)
|
||||
if b.authenticate(r) != false {
|
||||
t.Fatal("Success when expecting failure")
|
||||
}
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
/node-syscall/build
|
||||
/node_modules
|
@ -1,140 +0,0 @@
|
||||
GopherJS - A compiler from Go to JavaScript
|
||||
-------------------------------------------
|
||||
|
||||
[![Sourcegraph](https://sourcegraph.com/github.com/gopherjs/gopherjs/-/badge.svg)](https://sourcegraph.com/github.com/gopherjs/gopherjs?badge)
|
||||
[![Circle CI](https://circleci.com/gh/gopherjs/gopherjs.svg?style=svg)](https://circleci.com/gh/gopherjs/gopherjs)
|
||||
|
||||
GopherJS compiles Go code ([golang.org](https://golang.org/)) to pure JavaScript code. Its main purpose is to give you the opportunity to write front-end code in Go which will still run in all browsers.
|
||||
|
||||
### Playground
|
||||
Give GopherJS a try on the [GopherJS Playground](http://gopherjs.github.io/playground/).
|
||||
|
||||
### What is supported?
|
||||
Nearly everything, including Goroutines ([compatibility table](https://github.com/gopherjs/gopherjs/blob/master/doc/packages.md)). Performance is quite good in most cases, see [HTML5 game engine benchmark](https://ajhager.github.io/engi/demos/botmark.html). Cgo is not supported. Using a vendored copy of GopherJS is currently not supported, see [#415](https://github.com/gopherjs/gopherjs/issues/415).
|
||||
|
||||
### Installation and Usage
|
||||
Get or update GopherJS and dependencies with:
|
||||
|
||||
```
|
||||
go get -u github.com/gopherjs/gopherjs
|
||||
```
|
||||
|
||||
Now you can use `gopherjs build [package]`, `gopherjs build [files]` or `gopherjs install [package]` which behave similar to the `go` tool. For `main` packages, these commands create a `.js` file and `.js.map` source map in the current directory or in `$GOPATH/bin`. The generated JavaScript file can be used as usual in a website. Use `gopherjs help [command]` to get a list of possible command line flags, e.g. for minification and automatically watching for changes.
|
||||
|
||||
*Note: GopherJS will try to write compiled object files of the core packages to your $GOROOT/pkg directory. If that fails, it will fall back to $GOPATH/pkg.*
|
||||
|
||||
#### gopherjs run, gopherjs test
|
||||
|
||||
If you want to use `gopherjs run` or `gopherjs test` to run the generated code locally, install Node.js 4.x (or newer), and the `source-map-support` module:
|
||||
|
||||
```
|
||||
npm install --global source-map-support
|
||||
```
|
||||
|
||||
For system calls (file system access, etc.), see [this page](https://github.com/gopherjs/gopherjs/blob/master/doc/syscalls.md).
|
||||
|
||||
#### gopherjs serve
|
||||
|
||||
`gopherjs serve` is a useful command you can use during development. It will start an HTTP server serving on ":8080" by default, and dynamically compile Go packages with GopherJS and serve them.
|
||||
|
||||
For example, navigating to `http://localhost:8080/example.com/user/project/` should compile and run the Go package `example.com/user/project`. The generated JavaScript output will be served at `http://localhost:8080/example.com/user/project/project.js`. If the directory contains `index.html` it will be served, otherwise a minimal `index.html` that includes `<script src="{{base}}.js"></script>` will be provided, causing the JavaScript to be executed. All other static files will be served too.
|
||||
|
||||
Refreshing in the browser will rebuild the served files if needed. Compilation errors will be displayed in terminal, and in browser console. Additionally, it will serve $GOROOT and $GOPATH for sourcemaps.
|
||||
|
||||
If you include an argument, it will be the root from which everything is served. For example, if you run gopherjs serve github.com/user/project then the generated JavaScript for the package github.com/user/project/mypkg will be served at http://localhost:8080/mypkg/mypkg.js.
|
||||
|
||||
### Performance Tips
|
||||
|
||||
- Use the `-m` command line flag to generate minified code.
|
||||
- Apply gzip compression (https://en.wikipedia.org/wiki/HTTP_compression).
|
||||
- Use `int` instead of `(u)int8/16/32/64`.
|
||||
- Use `float64` instead of `float32`.
|
||||
|
||||
### Community
|
||||
- [#gopherjs Channel on Gophers Slack](https://gophers.slack.com/messages/gopherjs/) (invites to Gophers Slack are available [here](http://blog.gopheracademy.com/gophers-slack-community/#how-can-i-be-invited-to-join:2facdc921b2310f18cb851c36fa92369))
|
||||
- [Bindings to JavaScript APIs and libraries](https://github.com/gopherjs/gopherjs/wiki/bindings)
|
||||
- [GopherJS Blog](https://medium.com/gopherjs)
|
||||
- [GopherJS on Twitter](https://twitter.com/GopherJS)
|
||||
|
||||
### Getting started
|
||||
|
||||
#### Interacting with the DOM
|
||||
The package `github.com/gopherjs/gopherjs/js` (see [documentation](https://godoc.org/github.com/gopherjs/gopherjs/js)) provides functions for interacting with native JavaScript APIs. For example the line
|
||||
|
||||
```js
|
||||
document.write("Hello world!");
|
||||
```
|
||||
|
||||
would look like this in Go:
|
||||
|
||||
```go
|
||||
js.Global.Get("document").Call("write", "Hello world!")
|
||||
```
|
||||
|
||||
You may also want use the [DOM bindings](http://dominik.honnef.co/go/js/dom), the [jQuery bindings](https://github.com/gopherjs/jquery) (see [TodoMVC Example](https://github.com/gopherjs/todomvc)) or the [AngularJS bindings](https://github.com/wvell/go-angularjs). Those are some of the [bindings to JavaScript APIs and libraries](https://github.com/gopherjs/gopherjs/wiki/bindings) by community members.
|
||||
|
||||
#### Providing library functions for use in other JavaScript code
|
||||
Set a global variable to a map that contains the functions:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "github.com/gopherjs/gopherjs/js"
|
||||
|
||||
func main() {
|
||||
js.Global.Set("pet", map[string]interface{}{
|
||||
"New": New,
|
||||
})
|
||||
}
|
||||
|
||||
type Pet struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func New(name string) *js.Object {
|
||||
return js.MakeWrapper(&Pet{name})
|
||||
}
|
||||
|
||||
func (p *Pet) Name() string {
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *Pet) SetName(name string) {
|
||||
p.name = name
|
||||
}
|
||||
```
|
||||
|
||||
For more details see [Jason Stone's blog post](http://legacytotheedge.blogspot.de/2014/03/gopherjs-go-to-javascript-transpiler.html) about GopherJS.
|
||||
|
||||
### Architecture
|
||||
|
||||
#### General
|
||||
GopherJS emulates a 32-bit environment. This means that `int`, `uint` and `uintptr` have a precision of 32 bits. However, the explicit 64-bit integer types `int64` and `uint64` are supported. The `GOARCH` value of GopherJS is "js". You may use it as a build constraint: `// +build js`.
|
||||
|
||||
#### Application Lifecycle
|
||||
|
||||
The `main` function is executed as usual after all `init` functions have run. JavaScript callbacks can also invoke Go functions, even after the `main` function has exited. Therefore the end of the `main` function should not be regarded as the end of the application and does not end the execution of other goroutines.
|
||||
|
||||
In the browser, calling `os.Exit` (e.g. indirectly by `log.Fatal`) also does not terminate the execution of the program. For convenience, it calls `runtime.Goexit` to immediately terminate the calling goroutine.
|
||||
|
||||
#### Goroutines
|
||||
Goroutines are fully supported by GopherJS. The only restriction is that you need to start a new goroutine if you want to use blocking code called from external JavaScript:
|
||||
|
||||
```go
|
||||
js.Global.Get("myButton").Call("addEventListener", "click", func() {
|
||||
go func() {
|
||||
[...]
|
||||
someBlockingFunction()
|
||||
[...]
|
||||
}()
|
||||
})
|
||||
```
|
||||
|
||||
How it works:
|
||||
|
||||
JavaScript has no concept of concurrency (except web workers, but those are too strictly separated to be used for goroutines). Because of that, instructions in JavaScript are never blocking. A blocking call would effectively freeze the responsiveness of your web page, so calls with callback arguments are used instead.
|
||||
|
||||
GopherJS does some heavy lifting to work around this restriction: Whenever an instruction is blocking (e.g. communicating with a channel that isn't ready), the whole stack will unwind (= all functions return) and the goroutine will be put to sleep. Then another goroutine which is ready to resume gets picked and its stack with all local variables will be restored.
|
||||
|
||||
### GopherJS Development
|
||||
If you're looking to make changes to the GopherJS compiler, see [Developer Guidelines](https://github.com/gopherjs/gopherjs/wiki/Developer-Guidelines) for additional developer information.
|
@ -1,779 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/gopherjs/gopherjs/compiler"
|
||||
"github.com/gopherjs/gopherjs/compiler/natives"
|
||||
"github.com/neelance/sourcemap"
|
||||
)
|
||||
|
||||
type ImportCError struct {
|
||||
pkgPath string
|
||||
}
|
||||
|
||||
func (e *ImportCError) Error() string {
|
||||
return e.pkgPath + `: importing "C" is not supported by GopherJS`
|
||||
}
|
||||
|
||||
func NewBuildContext(installSuffix string, buildTags []string) *build.Context {
|
||||
return &build.Context{
|
||||
GOROOT: build.Default.GOROOT,
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOOS: build.Default.GOOS,
|
||||
GOARCH: "js",
|
||||
InstallSuffix: installSuffix,
|
||||
Compiler: "gc",
|
||||
BuildTags: append(buildTags, "netgo"),
|
||||
ReleaseTags: build.Default.ReleaseTags,
|
||||
CgoEnabled: true, // detect `import "C"` to throw proper error
|
||||
}
|
||||
}
|
||||
|
||||
// Import returns details about the Go package named by the import path. If the
|
||||
// path is a local import path naming a package that can be imported using
|
||||
// a standard import path, the returned package will set p.ImportPath to
|
||||
// that path.
|
||||
//
|
||||
// In the directory containing the package, .go and .inc.js files are
|
||||
// considered part of the package except for:
|
||||
//
|
||||
// - .go files in package documentation
|
||||
// - files starting with _ or . (likely editor temporary files)
|
||||
// - files with build constraints not satisfied by the context
|
||||
//
|
||||
// If an error occurs, Import returns a non-nil error and a nil
|
||||
// *PackageData.
|
||||
func Import(path string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
// Getwd may fail if we're in GOARCH=js mode. That's okay, handle
|
||||
// it by falling back to empty working directory. It just means
|
||||
// Import will not be able to resolve relative import paths.
|
||||
wd = ""
|
||||
}
|
||||
return importWithSrcDir(path, wd, mode, installSuffix, buildTags)
|
||||
}
|
||||
|
||||
func importWithSrcDir(path string, srcDir string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) {
|
||||
buildContext := NewBuildContext(installSuffix, buildTags)
|
||||
if path == "syscall" { // syscall needs to use a typical GOARCH like amd64 to pick up definitions for _Socklen, BpfInsn, IFNAMSIZ, Timeval, BpfStat, SYS_FCNTL, Flock_t, etc.
|
||||
buildContext.GOARCH = runtime.GOARCH
|
||||
buildContext.InstallSuffix = "js"
|
||||
if installSuffix != "" {
|
||||
buildContext.InstallSuffix += "_" + installSuffix
|
||||
}
|
||||
}
|
||||
pkg, err := buildContext.Import(path, srcDir, mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: Resolve issue #415 and remove this temporary workaround.
|
||||
if strings.HasSuffix(pkg.ImportPath, "/vendor/github.com/gopherjs/gopherjs/js") {
|
||||
return nil, fmt.Errorf("vendoring github.com/gopherjs/gopherjs/js package is not supported, see https://github.com/gopherjs/gopherjs/issues/415")
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "os":
|
||||
pkg.GoFiles = stripExecutable(pkg.GoFiles) // Need to strip executable implementation files, because some of them contain package scope variables that perform (indirectly) syscalls on init.
|
||||
case "runtime":
|
||||
pkg.GoFiles = []string{"error.go"}
|
||||
case "runtime/internal/sys":
|
||||
pkg.GoFiles = []string{fmt.Sprintf("zgoos_%s.go", buildContext.GOOS), "zversion.go"}
|
||||
case "runtime/pprof":
|
||||
pkg.GoFiles = nil
|
||||
case "crypto/rand":
|
||||
pkg.GoFiles = []string{"rand.go", "util.go"}
|
||||
case "crypto/x509":
|
||||
pkg.CgoFiles = nil
|
||||
}
|
||||
|
||||
if len(pkg.CgoFiles) > 0 {
|
||||
return nil, &ImportCError{path}
|
||||
}
|
||||
|
||||
if pkg.IsCommand() {
|
||||
pkg.PkgObj = filepath.Join(pkg.BinDir, filepath.Base(pkg.ImportPath)+".js")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(pkg.PkgObj); os.IsNotExist(err) && strings.HasPrefix(pkg.PkgObj, build.Default.GOROOT) {
|
||||
// fall back to GOPATH
|
||||
firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0] // TODO: Need to check inside all GOPATH workspaces.
|
||||
gopathPkgObj := filepath.Join(firstGopathWorkspace, pkg.PkgObj[len(build.Default.GOROOT):])
|
||||
if _, err := os.Stat(gopathPkgObj); err == nil {
|
||||
pkg.PkgObj = gopathPkgObj
|
||||
}
|
||||
}
|
||||
|
||||
jsFiles, err := jsFilesFromDir(pkg.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PackageData{Package: pkg, JSFiles: jsFiles}, nil
|
||||
}
|
||||
|
||||
// stripExecutable strips all executable implementation .go files.
|
||||
// They have "executable_" prefix.
|
||||
func stripExecutable(goFiles []string) []string {
|
||||
var s []string
|
||||
for _, f := range goFiles {
|
||||
if strings.HasPrefix(f, "executable_") {
|
||||
continue
|
||||
}
|
||||
s = append(s, f)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// ImportDir is like Import but processes the Go package found in the named
|
||||
// directory.
|
||||
func ImportDir(dir string, mode build.ImportMode, installSuffix string, buildTags []string) (*PackageData, error) {
|
||||
pkg, err := NewBuildContext(installSuffix, buildTags).ImportDir(dir, mode)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsFiles, err := jsFilesFromDir(pkg.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &PackageData{Package: pkg, JSFiles: jsFiles}, nil
|
||||
}
|
||||
|
||||
// parseAndAugment parses and returns all .go files of given pkg.
|
||||
// Standard Go library packages are augmented with files in compiler/natives folder.
|
||||
// If isTest is true and pkg.ImportPath has no _test suffix, package is built for running internal tests.
|
||||
// If isTest is true and pkg.ImportPath has _test suffix, package is built for running external tests.
|
||||
//
|
||||
// The native packages are augmented by the contents of natives.FS in the following way.
|
||||
// The file names do not matter except the usual `_test` suffix. The files for
|
||||
// native overrides get added to the package (even if they have the same name
|
||||
// as an existing file from the standard library). For all identifiers that exist
|
||||
// in the original AND the overrides, the original identifier in the AST gets
|
||||
// replaced by `_`. New identifiers that don't exist in original package get added.
|
||||
func parseAndAugment(pkg *build.Package, isTest bool, fileSet *token.FileSet) ([]*ast.File, error) {
|
||||
var files []*ast.File
|
||||
replacedDeclNames := make(map[string]bool)
|
||||
funcName := func(d *ast.FuncDecl) string {
|
||||
if d.Recv == nil || len(d.Recv.List) == 0 {
|
||||
return d.Name.Name
|
||||
}
|
||||
recv := d.Recv.List[0].Type
|
||||
if star, ok := recv.(*ast.StarExpr); ok {
|
||||
recv = star.X
|
||||
}
|
||||
return recv.(*ast.Ident).Name + "." + d.Name.Name
|
||||
}
|
||||
isXTest := strings.HasSuffix(pkg.ImportPath, "_test")
|
||||
importPath := pkg.ImportPath
|
||||
if isXTest {
|
||||
importPath = importPath[:len(importPath)-5]
|
||||
}
|
||||
|
||||
nativesContext := &build.Context{
|
||||
GOROOT: "/",
|
||||
GOOS: build.Default.GOOS,
|
||||
GOARCH: "js",
|
||||
Compiler: "gc",
|
||||
JoinPath: path.Join,
|
||||
SplitPathList: func(list string) []string {
|
||||
if list == "" {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(list, "/")
|
||||
},
|
||||
IsAbsPath: path.IsAbs,
|
||||
IsDir: func(name string) bool {
|
||||
dir, err := natives.FS.Open(name)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer dir.Close()
|
||||
info, err := dir.Stat()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return info.IsDir()
|
||||
},
|
||||
HasSubdir: func(root, name string) (rel string, ok bool) {
|
||||
panic("not implemented")
|
||||
},
|
||||
ReadDir: func(name string) (fi []os.FileInfo, err error) {
|
||||
dir, err := natives.FS.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dir.Close()
|
||||
return dir.Readdir(0)
|
||||
},
|
||||
OpenFile: func(name string) (r io.ReadCloser, err error) {
|
||||
return natives.FS.Open(name)
|
||||
},
|
||||
}
|
||||
if nativesPkg, err := nativesContext.Import(importPath, "", 0); err == nil {
|
||||
names := nativesPkg.GoFiles
|
||||
if isTest {
|
||||
names = append(names, nativesPkg.TestGoFiles...)
|
||||
}
|
||||
if isXTest {
|
||||
names = nativesPkg.XTestGoFiles
|
||||
}
|
||||
for _, name := range names {
|
||||
fullPath := path.Join(nativesPkg.Dir, name)
|
||||
r, err := nativesContext.OpenFile(fullPath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
file, err := parser.ParseFile(fileSet, fullPath, r, parser.ParseComments)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r.Close()
|
||||
for _, decl := range file.Decls {
|
||||
switch d := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
replacedDeclNames[funcName(d)] = true
|
||||
case *ast.GenDecl:
|
||||
switch d.Tok {
|
||||
case token.TYPE:
|
||||
for _, spec := range d.Specs {
|
||||
replacedDeclNames[spec.(*ast.TypeSpec).Name.Name] = true
|
||||
}
|
||||
case token.VAR, token.CONST:
|
||||
for _, spec := range d.Specs {
|
||||
for _, name := range spec.(*ast.ValueSpec).Names {
|
||||
replacedDeclNames[name.Name] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
}
|
||||
delete(replacedDeclNames, "init")
|
||||
|
||||
var errList compiler.ErrorList
|
||||
for _, name := range pkg.GoFiles {
|
||||
if !filepath.IsAbs(name) {
|
||||
name = filepath.Join(pkg.Dir, name)
|
||||
}
|
||||
r, err := os.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file, err := parser.ParseFile(fileSet, name, r, parser.ParseComments)
|
||||
r.Close()
|
||||
if err != nil {
|
||||
if list, isList := err.(scanner.ErrorList); isList {
|
||||
if len(list) > 10 {
|
||||
list = append(list[:10], &scanner.Error{Pos: list[9].Pos, Msg: "too many errors"})
|
||||
}
|
||||
for _, entry := range list {
|
||||
errList = append(errList, entry)
|
||||
}
|
||||
continue
|
||||
}
|
||||
errList = append(errList, err)
|
||||
continue
|
||||
}
|
||||
|
||||
switch pkg.ImportPath {
|
||||
case "crypto/rand", "encoding/gob", "encoding/json", "expvar", "go/token", "log", "math/big", "math/rand", "regexp", "testing", "time":
|
||||
for _, spec := range file.Imports {
|
||||
path, _ := strconv.Unquote(spec.Path.Value)
|
||||
if path == "sync" {
|
||||
if spec.Name == nil {
|
||||
spec.Name = ast.NewIdent("sync")
|
||||
}
|
||||
spec.Path.Value = `"github.com/gopherjs/gopherjs/nosync"`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, decl := range file.Decls {
|
||||
switch d := decl.(type) {
|
||||
case *ast.FuncDecl:
|
||||
if replacedDeclNames[funcName(d)] {
|
||||
d.Name = ast.NewIdent("_")
|
||||
}
|
||||
case *ast.GenDecl:
|
||||
switch d.Tok {
|
||||
case token.TYPE:
|
||||
for _, spec := range d.Specs {
|
||||
s := spec.(*ast.TypeSpec)
|
||||
if replacedDeclNames[s.Name.Name] {
|
||||
s.Name = ast.NewIdent("_")
|
||||
}
|
||||
}
|
||||
case token.VAR, token.CONST:
|
||||
for _, spec := range d.Specs {
|
||||
s := spec.(*ast.ValueSpec)
|
||||
for i, name := range s.Names {
|
||||
if replacedDeclNames[name.Name] {
|
||||
s.Names[i] = ast.NewIdent("_")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
files = append(files, file)
|
||||
}
|
||||
if errList != nil {
|
||||
return nil, errList
|
||||
}
|
||||
return files, nil
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
GOROOT string
|
||||
GOPATH string
|
||||
Verbose bool
|
||||
Quiet bool
|
||||
Watch bool
|
||||
CreateMapFile bool
|
||||
MapToLocalDisk bool
|
||||
Minify bool
|
||||
Color bool
|
||||
BuildTags []string
|
||||
}
|
||||
|
||||
func (o *Options) PrintError(format string, a ...interface{}) {
|
||||
if o.Color {
|
||||
format = "\x1B[31m" + format + "\x1B[39m"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, format, a...)
|
||||
}
|
||||
|
||||
func (o *Options) PrintSuccess(format string, a ...interface{}) {
|
||||
if o.Color {
|
||||
format = "\x1B[32m" + format + "\x1B[39m"
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, format, a...)
|
||||
}
|
||||
|
||||
type PackageData struct {
|
||||
*build.Package
|
||||
JSFiles []string
|
||||
IsTest bool // IsTest is true if the package is being built for running tests.
|
||||
SrcModTime time.Time
|
||||
UpToDate bool
|
||||
}
|
||||
|
||||
type Session struct {
|
||||
options *Options
|
||||
Archives map[string]*compiler.Archive
|
||||
Types map[string]*types.Package
|
||||
Watcher *fsnotify.Watcher
|
||||
}
|
||||
|
||||
func NewSession(options *Options) *Session {
|
||||
if options.GOROOT == "" {
|
||||
options.GOROOT = build.Default.GOROOT
|
||||
}
|
||||
if options.GOPATH == "" {
|
||||
options.GOPATH = build.Default.GOPATH
|
||||
}
|
||||
options.Verbose = options.Verbose || options.Watch
|
||||
|
||||
s := &Session{
|
||||
options: options,
|
||||
Archives: make(map[string]*compiler.Archive),
|
||||
}
|
||||
s.Types = make(map[string]*types.Package)
|
||||
if options.Watch {
|
||||
if out, err := exec.Command("ulimit", "-n").Output(); err == nil {
|
||||
if n, err := strconv.Atoi(strings.TrimSpace(string(out))); err == nil && n < 1024 {
|
||||
fmt.Printf("Warning: The maximum number of open file descriptors is very low (%d). Change it with 'ulimit -n 8192'.\n", n)
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
s.Watcher, err = fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *Session) InstallSuffix() string {
|
||||
if s.options.Minify {
|
||||
return "min"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (s *Session) BuildDir(packagePath string, importPath string, pkgObj string) error {
|
||||
if s.Watcher != nil {
|
||||
s.Watcher.Add(packagePath)
|
||||
}
|
||||
buildPkg, err := NewBuildContext(s.InstallSuffix(), s.options.BuildTags).ImportDir(packagePath, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg := &PackageData{Package: buildPkg}
|
||||
jsFiles, err := jsFilesFromDir(pkg.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg.JSFiles = jsFiles
|
||||
archive, err := s.BuildPackage(pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pkgObj == "" {
|
||||
pkgObj = filepath.Base(packagePath) + ".js"
|
||||
}
|
||||
if pkg.IsCommand() && !pkg.UpToDate {
|
||||
if err := s.WriteCommandPackage(archive, pkgObj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Session) BuildFiles(filenames []string, pkgObj string, packagePath string) error {
|
||||
pkg := &PackageData{
|
||||
Package: &build.Package{
|
||||
Name: "main",
|
||||
ImportPath: "main",
|
||||
Dir: packagePath,
|
||||
},
|
||||
}
|
||||
|
||||
for _, file := range filenames {
|
||||
if strings.HasSuffix(file, ".inc.js") {
|
||||
pkg.JSFiles = append(pkg.JSFiles, file)
|
||||
continue
|
||||
}
|
||||
pkg.GoFiles = append(pkg.GoFiles, file)
|
||||
}
|
||||
|
||||
archive, err := s.BuildPackage(pkg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.Types["main"].Name() != "main" {
|
||||
return fmt.Errorf("cannot build/run non-main package")
|
||||
}
|
||||
return s.WriteCommandPackage(archive, pkgObj)
|
||||
}
|
||||
|
||||
func (s *Session) BuildImportPath(path string) (*compiler.Archive, error) {
|
||||
_, archive, err := s.buildImportPathWithSrcDir(path, "")
|
||||
return archive, err
|
||||
}
|
||||
|
||||
func (s *Session) buildImportPathWithSrcDir(path string, srcDir string) (*PackageData, *compiler.Archive, error) {
|
||||
pkg, err := importWithSrcDir(path, srcDir, 0, s.InstallSuffix(), s.options.BuildTags)
|
||||
if s.Watcher != nil && pkg != nil { // add watch even on error
|
||||
s.Watcher.Add(pkg.Dir)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
archive, err := s.BuildPackage(pkg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return pkg, archive, nil
|
||||
}
|
||||
|
||||
func (s *Session) BuildPackage(pkg *PackageData) (*compiler.Archive, error) {
|
||||
if archive, ok := s.Archives[pkg.ImportPath]; ok {
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
if pkg.PkgObj != "" {
|
||||
var fileInfo os.FileInfo
|
||||
gopherjsBinary, err := os.Executable()
|
||||
if err == nil {
|
||||
fileInfo, err = os.Stat(gopherjsBinary)
|
||||
if err == nil {
|
||||
pkg.SrcModTime = fileInfo.ModTime()
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
os.Stderr.WriteString("Could not get GopherJS binary's modification timestamp. Please report issue.\n")
|
||||
pkg.SrcModTime = time.Now()
|
||||
}
|
||||
|
||||
for _, importedPkgPath := range pkg.Imports {
|
||||
// Ignore all imports that aren't mentioned in import specs of pkg.
|
||||
// For example, this ignores imports such as runtime/internal/sys and runtime/internal/atomic.
|
||||
ignored := true
|
||||
for _, pos := range pkg.ImportPos[importedPkgPath] {
|
||||
importFile := filepath.Base(pos.Filename)
|
||||
for _, file := range pkg.GoFiles {
|
||||
if importFile == file {
|
||||
ignored = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ignored {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if importedPkgPath == "unsafe" || ignored {
|
||||
continue
|
||||
}
|
||||
importedPkg, _, err := s.buildImportPathWithSrcDir(importedPkgPath, pkg.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
impModTime := importedPkg.SrcModTime
|
||||
if impModTime.After(pkg.SrcModTime) {
|
||||
pkg.SrcModTime = impModTime
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range append(pkg.GoFiles, pkg.JSFiles...) {
|
||||
fileInfo, err := os.Stat(filepath.Join(pkg.Dir, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fileInfo.ModTime().After(pkg.SrcModTime) {
|
||||
pkg.SrcModTime = fileInfo.ModTime()
|
||||
}
|
||||
}
|
||||
|
||||
pkgObjFileInfo, err := os.Stat(pkg.PkgObj)
|
||||
if err == nil && !pkg.SrcModTime.After(pkgObjFileInfo.ModTime()) {
|
||||
// package object is up to date, load from disk if library
|
||||
pkg.UpToDate = true
|
||||
if pkg.IsCommand() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
objFile, err := os.Open(pkg.PkgObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer objFile.Close()
|
||||
|
||||
archive, err := compiler.ReadArchive(pkg.PkgObj, pkg.ImportPath, objFile, s.Types)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.Archives[pkg.ImportPath] = archive
|
||||
return archive, err
|
||||
}
|
||||
}
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
files, err := parseAndAugment(pkg.Package, pkg.IsTest, fileSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localImportPathCache := make(map[string]*compiler.Archive)
|
||||
importContext := &compiler.ImportContext{
|
||||
Packages: s.Types,
|
||||
Import: func(path string) (*compiler.Archive, error) {
|
||||
if archive, ok := localImportPathCache[path]; ok {
|
||||
return archive, nil
|
||||
}
|
||||
_, archive, err := s.buildImportPathWithSrcDir(path, pkg.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
localImportPathCache[path] = archive
|
||||
return archive, nil
|
||||
},
|
||||
}
|
||||
archive, err := compiler.Compile(pkg.ImportPath, files, fileSet, importContext, s.options.Minify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, jsFile := range pkg.JSFiles {
|
||||
code, err := ioutil.ReadFile(filepath.Join(pkg.Dir, jsFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive.IncJSCode = append(archive.IncJSCode, []byte("\t(function() {\n")...)
|
||||
archive.IncJSCode = append(archive.IncJSCode, code...)
|
||||
archive.IncJSCode = append(archive.IncJSCode, []byte("\n\t}).call($global);\n")...)
|
||||
}
|
||||
|
||||
if s.options.Verbose {
|
||||
fmt.Println(pkg.ImportPath)
|
||||
}
|
||||
|
||||
s.Archives[pkg.ImportPath] = archive
|
||||
|
||||
if pkg.PkgObj == "" || pkg.IsCommand() {
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
if err := s.writeLibraryPackage(archive, pkg.PkgObj); err != nil {
|
||||
if strings.HasPrefix(pkg.PkgObj, s.options.GOROOT) {
|
||||
// fall back to first GOPATH workspace
|
||||
firstGopathWorkspace := filepath.SplitList(s.options.GOPATH)[0]
|
||||
if err := s.writeLibraryPackage(archive, filepath.Join(firstGopathWorkspace, pkg.PkgObj[len(s.options.GOROOT):])); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return archive, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return archive, nil
|
||||
}
|
||||
|
||||
func (s *Session) writeLibraryPackage(archive *compiler.Archive, pkgObj string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(pkgObj), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
objFile, err := os.Create(pkgObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer objFile.Close()
|
||||
|
||||
return compiler.WriteArchive(archive, objFile)
|
||||
}
|
||||
|
||||
func (s *Session) WriteCommandPackage(archive *compiler.Archive, pkgObj string) error {
|
||||
if err := os.MkdirAll(filepath.Dir(pkgObj), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
codeFile, err := os.Create(pkgObj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer codeFile.Close()
|
||||
|
||||
sourceMapFilter := &compiler.SourceMapFilter{Writer: codeFile}
|
||||
if s.options.CreateMapFile {
|
||||
m := &sourcemap.Map{File: filepath.Base(pkgObj)}
|
||||
mapFile, err := os.Create(pkgObj + ".map")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
m.WriteTo(mapFile)
|
||||
mapFile.Close()
|
||||
fmt.Fprintf(codeFile, "//# sourceMappingURL=%s.map\n", filepath.Base(pkgObj))
|
||||
}()
|
||||
|
||||
sourceMapFilter.MappingCallback = NewMappingCallback(m, s.options.GOROOT, s.options.GOPATH, s.options.MapToLocalDisk)
|
||||
}
|
||||
|
||||
deps, err := compiler.ImportDependencies(archive, func(path string) (*compiler.Archive, error) {
|
||||
if archive, ok := s.Archives[path]; ok {
|
||||
return archive, nil
|
||||
}
|
||||
_, archive, err := s.buildImportPathWithSrcDir(path, "")
|
||||
return archive, err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return compiler.WriteProgramCode(deps, sourceMapFilter)
|
||||
}
|
||||
|
||||
func NewMappingCallback(m *sourcemap.Map, goroot, gopath string, localMap bool) func(generatedLine, generatedColumn int, originalPos token.Position) {
|
||||
return func(generatedLine, generatedColumn int, originalPos token.Position) {
|
||||
if !originalPos.IsValid() {
|
||||
m.AddMapping(&sourcemap.Mapping{GeneratedLine: generatedLine, GeneratedColumn: generatedColumn})
|
||||
return
|
||||
}
|
||||
|
||||
file := originalPos.Filename
|
||||
|
||||
switch hasGopathPrefix, prefixLen := hasGopathPrefix(file, gopath); {
|
||||
case localMap:
|
||||
// no-op: keep file as-is
|
||||
case hasGopathPrefix:
|
||||
file = filepath.ToSlash(file[prefixLen+4:])
|
||||
case strings.HasPrefix(file, goroot):
|
||||
file = filepath.ToSlash(file[len(goroot)+4:])
|
||||
default:
|
||||
file = filepath.Base(file)
|
||||
}
|
||||
|
||||
m.AddMapping(&sourcemap.Mapping{GeneratedLine: generatedLine, GeneratedColumn: generatedColumn, OriginalFile: file, OriginalLine: originalPos.Line, OriginalColumn: originalPos.Column})
|
||||
}
|
||||
}
|
||||
|
||||
func jsFilesFromDir(dir string) ([]string, error) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var jsFiles []string
|
||||
for _, file := range files {
|
||||
if strings.HasSuffix(file.Name(), ".inc.js") && file.Name()[0] != '_' && file.Name()[0] != '.' {
|
||||
jsFiles = append(jsFiles, file.Name())
|
||||
}
|
||||
}
|
||||
return jsFiles, nil
|
||||
}
|
||||
|
||||
// hasGopathPrefix returns true and the length of the matched GOPATH workspace,
|
||||
// iff file has a prefix that matches one of the GOPATH workspaces.
|
||||
func hasGopathPrefix(file, gopath string) (hasGopathPrefix bool, prefixLen int) {
|
||||
gopathWorkspaces := filepath.SplitList(gopath)
|
||||
for _, gopathWorkspace := range gopathWorkspaces {
|
||||
gopathWorkspace = filepath.Clean(gopathWorkspace)
|
||||
if strings.HasPrefix(file, gopathWorkspace) {
|
||||
return true, len(gopathWorkspace)
|
||||
}
|
||||
}
|
||||
return false, 0
|
||||
}
|
||||
|
||||
func (s *Session) WaitForChange() {
|
||||
s.options.PrintSuccess("watching for changes...\n")
|
||||
for {
|
||||
select {
|
||||
case ev := <-s.Watcher.Events:
|
||||
if ev.Op&(fsnotify.Create|fsnotify.Write|fsnotify.Remove|fsnotify.Rename) == 0 || filepath.Base(ev.Name)[0] == '.' {
|
||||
continue
|
||||
}
|
||||
if !strings.HasSuffix(ev.Name, ".go") && !strings.HasSuffix(ev.Name, ".inc.js") {
|
||||
continue
|
||||
}
|
||||
s.options.PrintSuccess("change detected: %s\n", ev.Name)
|
||||
case err := <-s.Watcher.Errors:
|
||||
s.options.PrintError("watcher error: %s\n", err.Error())
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
go func() {
|
||||
for range s.Watcher.Events {
|
||||
// consume, else Close() may deadlock
|
||||
}
|
||||
}()
|
||||
s.Watcher.Close()
|
||||
}
|
@ -1,199 +0,0 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
gobuild "go/build"
|
||||
"go/token"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kisielk/gotool"
|
||||
"github.com/shurcooL/go/importgraphutil"
|
||||
)
|
||||
|
||||
// Natives augment the standard library with GopherJS-specific changes.
|
||||
// This test ensures that none of the standard library packages are modified
|
||||
// in a way that adds imports which the original upstream standard library package
|
||||
// does not already import. Doing that can increase generated output size or cause
|
||||
// other unexpected issues (since the cmd/go tool does not know about these extra imports),
|
||||
// so it's best to avoid it.
|
||||
//
|
||||
// It checks all standard library packages. Each package is considered as a normal
|
||||
// package, as a test package, and as an external test package.
|
||||
func TestNativesDontImportExtraPackages(t *testing.T) {
|
||||
// Calculate the forward import graph for all standard library packages.
|
||||
// It's needed for populateImportSet.
|
||||
stdOnly := gobuild.Default
|
||||
stdOnly.GOPATH = "" // We only care about standard library, so skip all GOPATH packages.
|
||||
forward, _, err := importgraphutil.BuildNoTests(&stdOnly)
|
||||
if err != nil {
|
||||
t.Fatalf("importgraphutil.BuildNoTests: %v", err)
|
||||
}
|
||||
|
||||
// populateImportSet takes a slice of imports, and populates set with those
|
||||
// imports, as well as their transitive dependencies. That way, the set can
|
||||
// be quickly queried to check if a package is in the import graph of imports.
|
||||
//
|
||||
// Note, this does not include transitive imports of test/xtest packages,
|
||||
// which could cause some false positives. It currently doesn't, but if it does,
|
||||
// then support for that should be added here.
|
||||
populateImportSet := func(imports []string, set *stringSet) {
|
||||
for _, p := range imports {
|
||||
(*set)[p] = struct{}{}
|
||||
switch p {
|
||||
case "sync":
|
||||
(*set)["github.com/gopherjs/gopherjs/nosync"] = struct{}{}
|
||||
}
|
||||
transitiveImports := forward.Search(p)
|
||||
for p := range transitiveImports {
|
||||
(*set)[p] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check all standard library packages.
|
||||
//
|
||||
// The general strategy is to first import each standard library package using the
|
||||
// normal build.Import, which returns a *build.Package. That contains Imports, TestImports,
|
||||
// and XTestImports values that are considered the "real imports".
|
||||
//
|
||||
// That list of direct imports is then expanded to the transitive closure by populateImportSet,
|
||||
// meaning all packages that are indirectly imported are also added to the set.
|
||||
//
|
||||
// Then, github.com/gopherjs/gopherjs/build.parseAndAugment(*build.Package) returns []*ast.File.
|
||||
// Those augmented parsed Go files of the package are checked, one file at at time, one import
|
||||
// at a time. Each import is verified to belong in the set of allowed real imports.
|
||||
for _, pkg := range gotool.ImportPaths([]string{"std"}) {
|
||||
// Normal package.
|
||||
{
|
||||
// Import the real normal package, and populate its real import set.
|
||||
bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
|
||||
if err != nil {
|
||||
t.Fatalf("gobuild.Import: %v", err)
|
||||
}
|
||||
realImports := make(stringSet)
|
||||
populateImportSet(bpkg.Imports, &realImports)
|
||||
|
||||
// Use parseAndAugment to get a list of augmented AST files.
|
||||
fset := token.NewFileSet()
|
||||
files, err := parseAndAugment(bpkg, false, fset)
|
||||
if err != nil {
|
||||
t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
|
||||
}
|
||||
|
||||
// Verify imports of normal augmented AST files.
|
||||
for _, f := range files {
|
||||
fileName := fset.File(f.Pos()).Name()
|
||||
normalFile := !strings.HasSuffix(fileName, "_test.go")
|
||||
if !normalFile {
|
||||
continue
|
||||
}
|
||||
for _, imp := range f.Imports {
|
||||
importPath, err := strconv.Unquote(imp.Path.Value)
|
||||
if err != nil {
|
||||
t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
|
||||
}
|
||||
if importPath == "github.com/gopherjs/gopherjs/js" {
|
||||
continue
|
||||
}
|
||||
if _, ok := realImports[importPath]; !ok {
|
||||
t.Errorf("augmented normal package %q imports %q in file %v, but real %q doesn't:\nrealImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realImports)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test package.
|
||||
{
|
||||
// Import the real test package, and populate its real import set.
|
||||
bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
|
||||
if err != nil {
|
||||
t.Fatalf("gobuild.Import: %v", err)
|
||||
}
|
||||
realTestImports := make(stringSet)
|
||||
populateImportSet(bpkg.TestImports, &realTestImports)
|
||||
|
||||
// Use parseAndAugment to get a list of augmented AST files.
|
||||
fset := token.NewFileSet()
|
||||
files, err := parseAndAugment(bpkg, true, fset)
|
||||
if err != nil {
|
||||
t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
|
||||
}
|
||||
|
||||
// Verify imports of test augmented AST files.
|
||||
for _, f := range files {
|
||||
fileName, pkgName := fset.File(f.Pos()).Name(), f.Name.String()
|
||||
testFile := strings.HasSuffix(fileName, "_test.go") && !strings.HasSuffix(pkgName, "_test")
|
||||
if !testFile {
|
||||
continue
|
||||
}
|
||||
for _, imp := range f.Imports {
|
||||
importPath, err := strconv.Unquote(imp.Path.Value)
|
||||
if err != nil {
|
||||
t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
|
||||
}
|
||||
if importPath == "github.com/gopherjs/gopherjs/js" {
|
||||
continue
|
||||
}
|
||||
if _, ok := realTestImports[importPath]; !ok {
|
||||
t.Errorf("augmented test package %q imports %q in file %v, but real %q doesn't:\nrealTestImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realTestImports)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// External test package.
|
||||
{
|
||||
// Import the real external test package, and populate its real import set.
|
||||
bpkg, err := gobuild.Import(pkg, "", gobuild.ImportComment)
|
||||
if err != nil {
|
||||
t.Fatalf("gobuild.Import: %v", err)
|
||||
}
|
||||
realXTestImports := make(stringSet)
|
||||
populateImportSet(bpkg.XTestImports, &realXTestImports)
|
||||
|
||||
// Add _test suffix to import path to cause parseAndAugment to use external test mode.
|
||||
bpkg.ImportPath += "_test"
|
||||
|
||||
// Use parseAndAugment to get a list of augmented AST files, then check only the external test files.
|
||||
fset := token.NewFileSet()
|
||||
files, err := parseAndAugment(bpkg, true, fset)
|
||||
if err != nil {
|
||||
t.Fatalf("github.com/gopherjs/gopherjs/build.parseAndAugment: %v", err)
|
||||
}
|
||||
|
||||
// Verify imports of external test augmented AST files.
|
||||
for _, f := range files {
|
||||
fileName, pkgName := fset.File(f.Pos()).Name(), f.Name.String()
|
||||
xTestFile := strings.HasSuffix(fileName, "_test.go") && strings.HasSuffix(pkgName, "_test")
|
||||
if !xTestFile {
|
||||
continue
|
||||
}
|
||||
for _, imp := range f.Imports {
|
||||
importPath, err := strconv.Unquote(imp.Path.Value)
|
||||
if err != nil {
|
||||
t.Fatalf("strconv.Unquote(%v): %v", imp.Path.Value, err)
|
||||
}
|
||||
if importPath == "github.com/gopherjs/gopherjs/js" {
|
||||
continue
|
||||
}
|
||||
if _, ok := realXTestImports[importPath]; !ok {
|
||||
t.Errorf("augmented external test package %q imports %q in file %v, but real %q doesn't:\nrealXTestImports = %v", bpkg.ImportPath, importPath, fileName, bpkg.ImportPath, realXTestImports)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stringSet is used to print a set of strings in a more readable way.
|
||||
type stringSet map[string]struct{}
|
||||
|
||||
func (m stringSet) String() string {
|
||||
s := make([]string, 0, len(m))
|
||||
for v := range m {
|
||||
s = append(s, v)
|
||||
}
|
||||
return fmt.Sprintf("%q", s)
|
||||
}
|
@ -1,129 +0,0 @@
|
||||
machine:
|
||||
node:
|
||||
version: 6.2.2
|
||||
environment:
|
||||
SOURCE_MAP_SUPPORT: false
|
||||
|
||||
dependencies:
|
||||
pre:
|
||||
- cd /usr/local && sudo rm -rf go && curl https://storage.googleapis.com/golang/go1.8.linux-amd64.tar.gz | sudo tar -xz && sudo chmod a+w go/src/path/filepath
|
||||
post:
|
||||
- mv ./gopherjs $HOME/bin
|
||||
- npm install --global node-gyp
|
||||
- cd node-syscall && node-gyp rebuild && mkdir -p ~/.node_libraries/ && cp build/Release/syscall.node ~/.node_libraries/syscall.node
|
||||
|
||||
test:
|
||||
override:
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet *.go # Go package in root directory.
|
||||
- for d in */; do echo $d; done | grep -v tests/ | grep -v third_party/ | xargs go tool vet # All subdirectories except "tests", "third_party".
|
||||
- >
|
||||
gopherjs test --short --minify
|
||||
github.com/gopherjs/gopherjs/tests
|
||||
github.com/gopherjs/gopherjs/tests/main
|
||||
github.com/gopherjs/gopherjs/js
|
||||
archive/tar
|
||||
archive/zip
|
||||
bufio
|
||||
bytes
|
||||
compress/bzip2
|
||||
compress/flate
|
||||
compress/gzip
|
||||
compress/lzw
|
||||
compress/zlib
|
||||
container/heap
|
||||
container/list
|
||||
container/ring
|
||||
crypto/aes
|
||||
crypto/cipher
|
||||
crypto/des
|
||||
crypto/dsa
|
||||
crypto/ecdsa
|
||||
crypto/elliptic
|
||||
crypto/hmac
|
||||
crypto/md5
|
||||
crypto/rand
|
||||
crypto/rc4
|
||||
crypto/rsa
|
||||
crypto/sha1
|
||||
crypto/sha256
|
||||
crypto/sha512
|
||||
crypto/subtle
|
||||
crypto/x509
|
||||
database/sql
|
||||
database/sql/driver
|
||||
debug/dwarf
|
||||
debug/elf
|
||||
debug/macho
|
||||
debug/pe
|
||||
encoding/ascii85
|
||||
encoding/asn1
|
||||
encoding/base32
|
||||
encoding/base64
|
||||
encoding/binary
|
||||
encoding/csv
|
||||
encoding/gob
|
||||
encoding/hex
|
||||
encoding/json
|
||||
encoding/pem
|
||||
encoding/xml
|
||||
errors
|
||||
expvar
|
||||
flag
|
||||
fmt
|
||||
go/ast
|
||||
go/constant
|
||||
go/doc
|
||||
go/format
|
||||
go/parser
|
||||
go/printer
|
||||
go/scanner
|
||||
go/token
|
||||
hash/adler32
|
||||
hash/crc32
|
||||
hash/crc64
|
||||
hash/fnv
|
||||
html
|
||||
html/template
|
||||
image
|
||||
image/color
|
||||
image/draw
|
||||
image/gif
|
||||
image/jpeg
|
||||
image/png
|
||||
index/suffixarray
|
||||
io
|
||||
io/ioutil
|
||||
math
|
||||
math/big
|
||||
math/cmplx
|
||||
math/rand
|
||||
mime
|
||||
mime/multipart
|
||||
mime/quotedprintable
|
||||
net/http/cookiejar
|
||||
net/http/fcgi
|
||||
net/mail
|
||||
net/rpc/jsonrpc
|
||||
net/textproto
|
||||
net/url
|
||||
path
|
||||
path/filepath
|
||||
reflect
|
||||
regexp
|
||||
regexp/syntax
|
||||
sort
|
||||
strconv
|
||||
strings
|
||||
sync
|
||||
sync/atomic
|
||||
testing/quick
|
||||
text/scanner
|
||||
text/tabwriter
|
||||
text/template
|
||||
text/template/parse
|
||||
time
|
||||
unicode
|
||||
unicode/utf16
|
||||
unicode/utf8
|
||||
- go test -v -race ./...
|
@ -1,43 +0,0 @@
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func BoolValue(expr ast.Expr, info *types.Info) (bool, bool) {
|
||||
v := info.Types[expr].Value
|
||||
if v != nil && v.Kind() == constant.Bool {
|
||||
return constant.BoolVal(v), true
|
||||
}
|
||||
switch e := expr.(type) {
|
||||
case *ast.BinaryExpr:
|
||||
switch e.Op {
|
||||
case token.LAND:
|
||||
if b, ok := BoolValue(e.X, info); ok {
|
||||
if !b {
|
||||
return false, true
|
||||
}
|
||||
return BoolValue(e.Y, info)
|
||||
}
|
||||
case token.LOR:
|
||||
if b, ok := BoolValue(e.X, info); ok {
|
||||
if b {
|
||||
return true, true
|
||||
}
|
||||
return BoolValue(e.Y, info)
|
||||
}
|
||||
}
|
||||
case *ast.UnaryExpr:
|
||||
if e.Op == token.NOT {
|
||||
if b, ok := BoolValue(e.X, info); ok {
|
||||
return !b, true
|
||||
}
|
||||
}
|
||||
case *ast.ParenExpr:
|
||||
return BoolValue(e.X, info)
|
||||
}
|
||||
return false, false
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
func HasBreak(n ast.Node) bool {
|
||||
v := hasBreakVisitor{}
|
||||
ast.Walk(&v, n)
|
||||
return v.hasBreak
|
||||
}
|
||||
|
||||
type hasBreakVisitor struct {
|
||||
hasBreak bool
|
||||
}
|
||||
|
||||
func (v *hasBreakVisitor) Visit(node ast.Node) (w ast.Visitor) {
|
||||
if v.hasBreak {
|
||||
return nil
|
||||
}
|
||||
switch n := node.(type) {
|
||||
case *ast.BranchStmt:
|
||||
if n.Tok == token.BREAK && n.Label == nil {
|
||||
v.hasBreak = true
|
||||
return nil
|
||||
}
|
||||
case *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.TypeSwitchStmt, *ast.SelectStmt, ast.Expr:
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
@ -1,70 +0,0 @@
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func EscapingObjects(n ast.Node, info *types.Info) []*types.Var {
|
||||
v := escapeAnalysis{
|
||||
info: info,
|
||||
escaping: make(map[*types.Var]bool),
|
||||
topScope: info.Scopes[n],
|
||||
bottomScopes: make(map[*types.Scope]bool),
|
||||
}
|
||||
ast.Walk(&v, n)
|
||||
var list []*types.Var
|
||||
for obj := range v.escaping {
|
||||
list = append(list, obj)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
type escapeAnalysis struct {
|
||||
info *types.Info
|
||||
escaping map[*types.Var]bool
|
||||
topScope *types.Scope
|
||||
bottomScopes map[*types.Scope]bool
|
||||
}
|
||||
|
||||
func (v *escapeAnalysis) Visit(node ast.Node) (w ast.Visitor) {
|
||||
// huge overapproximation
|
||||
switch n := node.(type) {
|
||||
case *ast.UnaryExpr:
|
||||
if n.Op == token.AND {
|
||||
if _, ok := n.X.(*ast.Ident); ok {
|
||||
return &escapingObjectCollector{v}
|
||||
}
|
||||
}
|
||||
case *ast.FuncLit:
|
||||
v.bottomScopes[v.info.Scopes[n.Type]] = true
|
||||
return &escapingObjectCollector{v}
|
||||
case *ast.ForStmt:
|
||||
v.bottomScopes[v.info.Scopes[n.Body]] = true
|
||||
case *ast.RangeStmt:
|
||||
v.bottomScopes[v.info.Scopes[n.Body]] = true
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
type escapingObjectCollector struct {
|
||||
analysis *escapeAnalysis
|
||||
}
|
||||
|
||||
func (v *escapingObjectCollector) Visit(node ast.Node) (w ast.Visitor) {
|
||||
if id, ok := node.(*ast.Ident); ok {
|
||||
if obj, ok := v.analysis.info.Uses[id].(*types.Var); ok {
|
||||
for s := obj.Parent(); s != nil; s = s.Parent() {
|
||||
if s == v.analysis.topScope {
|
||||
v.analysis.escaping[obj] = true
|
||||
break
|
||||
}
|
||||
if v.analysis.bottomScopes[s] {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
@ -1,254 +0,0 @@
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
|
||||
"github.com/gopherjs/gopherjs/compiler/astutil"
|
||||
"github.com/gopherjs/gopherjs/compiler/typesutil"
|
||||
)
|
||||
|
||||
type continueStmt struct {
|
||||
forStmt *ast.ForStmt
|
||||
analyzeStack []ast.Node
|
||||
}
|
||||
|
||||
type Info struct {
|
||||
*types.Info
|
||||
Pkg *types.Package
|
||||
IsBlocking func(*types.Func) bool
|
||||
HasPointer map[*types.Var]bool
|
||||
FuncDeclInfos map[*types.Func]*FuncInfo
|
||||
FuncLitInfos map[*ast.FuncLit]*FuncInfo
|
||||
InitFuncInfo *FuncInfo
|
||||
allInfos []*FuncInfo
|
||||
comments ast.CommentMap
|
||||
}
|
||||
|
||||
type FuncInfo struct {
|
||||
HasDefer bool
|
||||
Flattened map[ast.Node]bool
|
||||
Blocking map[ast.Node]bool
|
||||
GotoLabel map[*types.Label]bool
|
||||
LocalCalls map[*types.Func][][]ast.Node
|
||||
ContinueStmts []continueStmt
|
||||
p *Info
|
||||
analyzeStack []ast.Node
|
||||
}
|
||||
|
||||
func (info *Info) newFuncInfo() *FuncInfo {
|
||||
funcInfo := &FuncInfo{
|
||||
p: info,
|
||||
Flattened: make(map[ast.Node]bool),
|
||||
Blocking: make(map[ast.Node]bool),
|
||||
GotoLabel: make(map[*types.Label]bool),
|
||||
LocalCalls: make(map[*types.Func][][]ast.Node),
|
||||
}
|
||||
info.allInfos = append(info.allInfos, funcInfo)
|
||||
return funcInfo
|
||||
}
|
||||
|
||||
func AnalyzePkg(files []*ast.File, fileSet *token.FileSet, typesInfo *types.Info, typesPkg *types.Package, isBlocking func(*types.Func) bool) *Info {
|
||||
info := &Info{
|
||||
Info: typesInfo,
|
||||
Pkg: typesPkg,
|
||||
HasPointer: make(map[*types.Var]bool),
|
||||
comments: make(ast.CommentMap),
|
||||
IsBlocking: isBlocking,
|
||||
FuncDeclInfos: make(map[*types.Func]*FuncInfo),
|
||||
FuncLitInfos: make(map[*ast.FuncLit]*FuncInfo),
|
||||
}
|
||||
info.InitFuncInfo = info.newFuncInfo()
|
||||
|
||||
for _, file := range files {
|
||||
for k, v := range ast.NewCommentMap(fileSet, file, file.Comments) {
|
||||
info.comments[k] = v
|
||||
}
|
||||
ast.Walk(info.InitFuncInfo, file)
|
||||
}
|
||||
|
||||
for {
|
||||
done := true
|
||||
for _, funcInfo := range info.allInfos {
|
||||
for obj, calls := range funcInfo.LocalCalls {
|
||||
if len(info.FuncDeclInfos[obj].Blocking) != 0 {
|
||||
for _, call := range calls {
|
||||
funcInfo.markBlocking(call)
|
||||
}
|
||||
delete(funcInfo.LocalCalls, obj)
|
||||
done = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if done {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, funcInfo := range info.allInfos {
|
||||
for _, continueStmt := range funcInfo.ContinueStmts {
|
||||
if funcInfo.Blocking[continueStmt.forStmt.Post] {
|
||||
funcInfo.markBlocking(continueStmt.analyzeStack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return info
|
||||
}
|
||||
|
||||
func (c *FuncInfo) Visit(node ast.Node) ast.Visitor {
|
||||
if node == nil {
|
||||
if len(c.analyzeStack) != 0 {
|
||||
c.analyzeStack = c.analyzeStack[:len(c.analyzeStack)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
c.analyzeStack = append(c.analyzeStack, node)
|
||||
|
||||
switch n := node.(type) {
|
||||
case *ast.FuncDecl:
|
||||
newInfo := c.p.newFuncInfo()
|
||||
c.p.FuncDeclInfos[c.p.Defs[n.Name].(*types.Func)] = newInfo
|
||||
return newInfo
|
||||
case *ast.FuncLit:
|
||||
newInfo := c.p.newFuncInfo()
|
||||
c.p.FuncLitInfos[n] = newInfo
|
||||
return newInfo
|
||||
case *ast.BranchStmt:
|
||||
switch n.Tok {
|
||||
case token.GOTO:
|
||||
for _, n2 := range c.analyzeStack {
|
||||
c.Flattened[n2] = true
|
||||
}
|
||||
c.GotoLabel[c.p.Uses[n.Label].(*types.Label)] = true
|
||||
case token.CONTINUE:
|
||||
if n.Label != nil {
|
||||
label := c.p.Uses[n.Label].(*types.Label)
|
||||
for i := len(c.analyzeStack) - 1; i >= 0; i-- {
|
||||
if labelStmt, ok := c.analyzeStack[i].(*ast.LabeledStmt); ok && c.p.Defs[labelStmt.Label] == label {
|
||||
if _, ok := labelStmt.Stmt.(*ast.RangeStmt); ok {
|
||||
return nil
|
||||
}
|
||||
stack := make([]ast.Node, len(c.analyzeStack))
|
||||
copy(stack, c.analyzeStack)
|
||||
c.ContinueStmts = append(c.ContinueStmts, continueStmt{labelStmt.Stmt.(*ast.ForStmt), stack})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
for i := len(c.analyzeStack) - 1; i >= 0; i-- {
|
||||
if _, ok := c.analyzeStack[i].(*ast.RangeStmt); ok {
|
||||
return nil
|
||||
}
|
||||
if forStmt, ok := c.analyzeStack[i].(*ast.ForStmt); ok {
|
||||
stack := make([]ast.Node, len(c.analyzeStack))
|
||||
copy(stack, c.analyzeStack)
|
||||
c.ContinueStmts = append(c.ContinueStmts, continueStmt{forStmt, stack})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case *ast.CallExpr:
|
||||
callTo := func(obj types.Object) {
|
||||
switch o := obj.(type) {
|
||||
case *types.Func:
|
||||
if recv := o.Type().(*types.Signature).Recv(); recv != nil {
|
||||
if _, ok := recv.Type().Underlying().(*types.Interface); ok {
|
||||
c.markBlocking(c.analyzeStack)
|
||||
return
|
||||
}
|
||||
}
|
||||
if o.Pkg() != c.p.Pkg {
|
||||
if c.p.IsBlocking(o) {
|
||||
c.markBlocking(c.analyzeStack)
|
||||
}
|
||||
return
|
||||
}
|
||||
stack := make([]ast.Node, len(c.analyzeStack))
|
||||
copy(stack, c.analyzeStack)
|
||||
c.LocalCalls[o] = append(c.LocalCalls[o], stack)
|
||||
case *types.Var:
|
||||
c.markBlocking(c.analyzeStack)
|
||||
}
|
||||
}
|
||||
switch f := astutil.RemoveParens(n.Fun).(type) {
|
||||
case *ast.Ident:
|
||||
callTo(c.p.Uses[f])
|
||||
case *ast.SelectorExpr:
|
||||
if sel := c.p.Selections[f]; sel != nil && typesutil.IsJsObject(sel.Recv()) {
|
||||
break
|
||||
}
|
||||
callTo(c.p.Uses[f.Sel])
|
||||
case *ast.FuncLit:
|
||||
ast.Walk(c, n.Fun)
|
||||
for _, arg := range n.Args {
|
||||
ast.Walk(c, arg)
|
||||
}
|
||||
if len(c.p.FuncLitInfos[f].Blocking) != 0 {
|
||||
c.markBlocking(c.analyzeStack)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
if !astutil.IsTypeExpr(f, c.p.Info) {
|
||||
c.markBlocking(c.analyzeStack)
|
||||
}
|
||||
}
|
||||
case *ast.SendStmt:
|
||||
c.markBlocking(c.analyzeStack)
|
||||
case *ast.UnaryExpr:
|
||||
switch n.Op {
|
||||
case token.AND:
|
||||
if id, ok := astutil.RemoveParens(n.X).(*ast.Ident); ok {
|
||||
c.p.HasPointer[c.p.Uses[id].(*types.Var)] = true
|
||||
}
|
||||
case token.ARROW:
|
||||
c.markBlocking(c.analyzeStack)
|
||||
}
|
||||
case *ast.RangeStmt:
|
||||
if _, ok := c.p.TypeOf(n.X).Underlying().(*types.Chan); ok {
|
||||
c.markBlocking(c.analyzeStack)
|
||||
}
|
||||
case *ast.SelectStmt:
|
||||
for _, s := range n.Body.List {
|
||||
if s.(*ast.CommClause).Comm == nil { // default clause
|
||||
return c
|
||||
}
|
||||
}
|
||||
c.markBlocking(c.analyzeStack)
|
||||
case *ast.CommClause:
|
||||
switch comm := n.Comm.(type) {
|
||||
case *ast.SendStmt:
|
||||
ast.Walk(c, comm.Chan)
|
||||
ast.Walk(c, comm.Value)
|
||||
case *ast.ExprStmt:
|
||||
ast.Walk(c, comm.X.(*ast.UnaryExpr).X)
|
||||
case *ast.AssignStmt:
|
||||
ast.Walk(c, comm.Rhs[0].(*ast.UnaryExpr).X)
|
||||
}
|
||||
for _, s := range n.Body {
|
||||
ast.Walk(c, s)
|
||||
}
|
||||
return nil
|
||||
case *ast.GoStmt:
|
||||
ast.Walk(c, n.Call.Fun)
|
||||
for _, arg := range n.Call.Args {
|
||||
ast.Walk(c, arg)
|
||||
}
|
||||
return nil
|
||||
case *ast.DeferStmt:
|
||||
c.HasDefer = true
|
||||
if funcLit, ok := n.Call.Fun.(*ast.FuncLit); ok {
|
||||
ast.Walk(c, funcLit.Body)
|
||||
}
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *FuncInfo) markBlocking(stack []ast.Node) {
|
||||
for _, n := range stack {
|
||||
c.Blocking[n] = true
|
||||
c.Flattened[n] = true
|
||||
}
|
||||
}
|
@ -1,37 +0,0 @@
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func HasSideEffect(n ast.Node, info *types.Info) bool {
|
||||
v := hasSideEffectVisitor{info: info}
|
||||
ast.Walk(&v, n)
|
||||
return v.hasSideEffect
|
||||
}
|
||||
|
||||
type hasSideEffectVisitor struct {
|
||||
info *types.Info
|
||||
hasSideEffect bool
|
||||
}
|
||||
|
||||
func (v *hasSideEffectVisitor) Visit(node ast.Node) (w ast.Visitor) {
|
||||
if v.hasSideEffect {
|
||||
return nil
|
||||
}
|
||||
switch n := node.(type) {
|
||||
case *ast.CallExpr:
|
||||
if _, isSig := v.info.TypeOf(n.Fun).(*types.Signature); isSig { // skip conversions
|
||||
v.hasSideEffect = true
|
||||
return nil
|
||||
}
|
||||
case *ast.UnaryExpr:
|
||||
if n.Op == token.ARROW {
|
||||
v.hasSideEffect = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
package astutil
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func RemoveParens(e ast.Expr) ast.Expr {
|
||||
for {
|
||||
p, isParen := e.(*ast.ParenExpr)
|
||||
if !isParen {
|
||||
return e
|
||||
}
|
||||
e = p.X
|
||||
}
|
||||
}
|
||||
|
||||
func SetType(info *types.Info, t types.Type, e ast.Expr) ast.Expr {
|
||||
info.Types[e] = types.TypeAndValue{Type: t}
|
||||
return e
|
||||
}
|
||||
|
||||
func NewIdent(name string, t types.Type, info *types.Info, pkg *types.Package) *ast.Ident {
|
||||
ident := ast.NewIdent(name)
|
||||
info.Types[ident] = types.TypeAndValue{Type: t}
|
||||
obj := types.NewVar(0, pkg, name, t)
|
||||
info.Uses[ident] = obj
|
||||
return ident
|
||||
}
|
||||
|
||||
func IsTypeExpr(expr ast.Expr, info *types.Info) bool {
|
||||
switch e := expr.(type) {
|
||||
case *ast.ArrayType, *ast.ChanType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.StructType:
|
||||
return true
|
||||
case *ast.StarExpr:
|
||||
return IsTypeExpr(e.X, info)
|
||||
case *ast.Ident:
|
||||
_, ok := info.Uses[e].(*types.TypeName)
|
||||
return ok
|
||||
case *ast.SelectorExpr:
|
||||
_, ok := info.Uses[e.Sel].(*types.TypeName)
|
||||
return ok
|
||||
case *ast.ParenExpr:
|
||||
return IsTypeExpr(e.X, info)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
@ -1,293 +0,0 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/gopherjs/gopherjs/compiler/prelude"
|
||||
"github.com/gopherjs/gopherjs/third_party/importer"
|
||||
)
|
||||
|
||||
var sizes32 = &types.StdSizes{WordSize: 4, MaxAlign: 8}
|
||||
var reservedKeywords = make(map[string]bool)
|
||||
var _ = ___GOPHERJS_REQUIRES_GO_VERSION_1_8___ // Compile error on other Go versions, because they're not supported.
|
||||
|
||||
func init() {
|
||||
for _, keyword := range []string{"abstract", "arguments", "boolean", "break", "byte", "case", "catch", "char", "class", "const", "continue", "debugger", "default", "delete", "do", "double", "else", "enum", "eval", "export", "extends", "false", "final", "finally", "float", "for", "function", "goto", "if", "implements", "import", "in", "instanceof", "int", "interface", "let", "long", "native", "new", "null", "package", "private", "protected", "public", "return", "short", "static", "super", "switch", "synchronized", "this", "throw", "throws", "transient", "true", "try", "typeof", "undefined", "var", "void", "volatile", "while", "with", "yield"} {
|
||||
reservedKeywords[keyword] = true
|
||||
}
|
||||
}
|
||||
|
||||
type ErrorList []error
|
||||
|
||||
func (err ErrorList) Error() string {
|
||||
return err[0].Error()
|
||||
}
|
||||
|
||||
type Archive struct {
|
||||
ImportPath string
|
||||
Name string
|
||||
Imports []string
|
||||
ExportData []byte
|
||||
Declarations []*Decl
|
||||
IncJSCode []byte
|
||||
FileSet []byte
|
||||
Minified bool
|
||||
}
|
||||
|
||||
type Decl struct {
|
||||
FullName string
|
||||
Vars []string
|
||||
DeclCode []byte
|
||||
MethodListCode []byte
|
||||
TypeInitCode []byte
|
||||
InitCode []byte
|
||||
DceObjectFilter string
|
||||
DceMethodFilter string
|
||||
DceDeps []string
|
||||
Blocking bool
|
||||
}
|
||||
|
||||
type Dependency struct {
|
||||
Pkg string
|
||||
Type string
|
||||
Method string
|
||||
}
|
||||
|
||||
func ImportDependencies(archive *Archive, importPkg func(string) (*Archive, error)) ([]*Archive, error) {
|
||||
var deps []*Archive
|
||||
paths := make(map[string]bool)
|
||||
var collectDependencies func(path string) error
|
||||
collectDependencies = func(path string) error {
|
||||
if paths[path] {
|
||||
return nil
|
||||
}
|
||||
dep, err := importPkg(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, imp := range dep.Imports {
|
||||
if err := collectDependencies(imp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
deps = append(deps, dep)
|
||||
paths[dep.ImportPath] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := collectDependencies("runtime"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, imp := range archive.Imports {
|
||||
if err := collectDependencies(imp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
deps = append(deps, archive)
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
type dceInfo struct {
|
||||
decl *Decl
|
||||
objectFilter string
|
||||
methodFilter string
|
||||
}
|
||||
|
||||
func WriteProgramCode(pkgs []*Archive, w *SourceMapFilter) error {
|
||||
mainPkg := pkgs[len(pkgs)-1]
|
||||
minify := mainPkg.Minified
|
||||
|
||||
byFilter := make(map[string][]*dceInfo)
|
||||
var pendingDecls []*Decl
|
||||
for _, pkg := range pkgs {
|
||||
for _, d := range pkg.Declarations {
|
||||
if d.DceObjectFilter == "" && d.DceMethodFilter == "" {
|
||||
pendingDecls = append(pendingDecls, d)
|
||||
continue
|
||||
}
|
||||
info := &dceInfo{decl: d}
|
||||
if d.DceObjectFilter != "" {
|
||||
info.objectFilter = pkg.ImportPath + "." + d.DceObjectFilter
|
||||
byFilter[info.objectFilter] = append(byFilter[info.objectFilter], info)
|
||||
}
|
||||
if d.DceMethodFilter != "" {
|
||||
info.methodFilter = pkg.ImportPath + "." + d.DceMethodFilter
|
||||
byFilter[info.methodFilter] = append(byFilter[info.methodFilter], info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dceSelection := make(map[*Decl]struct{})
|
||||
for len(pendingDecls) != 0 {
|
||||
d := pendingDecls[len(pendingDecls)-1]
|
||||
pendingDecls = pendingDecls[:len(pendingDecls)-1]
|
||||
|
||||
dceSelection[d] = struct{}{}
|
||||
|
||||
for _, dep := range d.DceDeps {
|
||||
if infos, ok := byFilter[dep]; ok {
|
||||
delete(byFilter, dep)
|
||||
for _, info := range infos {
|
||||
if info.objectFilter == dep {
|
||||
info.objectFilter = ""
|
||||
}
|
||||
if info.methodFilter == dep {
|
||||
info.methodFilter = ""
|
||||
}
|
||||
if info.objectFilter == "" && info.methodFilter == "" {
|
||||
pendingDecls = append(pendingDecls, info.decl)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte("\"use strict\";\n(function() {\n\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(removeWhitespace([]byte(prelude.Prelude), minify)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// write packages
|
||||
for _, pkg := range pkgs {
|
||||
if err := WritePkgCode(pkg, dceSelection, minify, w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := w.Write([]byte("$synthesizeMethods();\nvar $mainPkg = $packages[\"" + string(mainPkg.ImportPath) + "\"];\n$packages[\"runtime\"].$init();\n$go($mainPkg.$init, []);\n$flushConsole();\n\n}).call(this);\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WritePkgCode(pkg *Archive, dceSelection map[*Decl]struct{}, minify bool, w *SourceMapFilter) error {
|
||||
if w.MappingCallback != nil && pkg.FileSet != nil {
|
||||
w.fileSet = token.NewFileSet()
|
||||
if err := w.fileSet.Read(json.NewDecoder(bytes.NewReader(pkg.FileSet)).Decode); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
if _, err := w.Write(pkg.IncJSCode); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write(removeWhitespace([]byte(fmt.Sprintf("$packages[\"%s\"] = (function() {\n", pkg.ImportPath)), minify)); err != nil {
|
||||
return err
|
||||
}
|
||||
vars := []string{"$pkg = {}", "$init"}
|
||||
var filteredDecls []*Decl
|
||||
for _, d := range pkg.Declarations {
|
||||
if _, ok := dceSelection[d]; ok {
|
||||
vars = append(vars, d.Vars...)
|
||||
filteredDecls = append(filteredDecls, d)
|
||||
}
|
||||
}
|
||||
if _, err := w.Write(removeWhitespace([]byte(fmt.Sprintf("\tvar %s;\n", strings.Join(vars, ", "))), minify)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, d := range filteredDecls {
|
||||
if _, err := w.Write(d.DeclCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, d := range filteredDecls {
|
||||
if _, err := w.Write(d.MethodListCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, d := range filteredDecls {
|
||||
if _, err := w.Write(d.TypeInitCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := w.Write(removeWhitespace([]byte("\t$init = function() {\n\t\t$pkg.$init = function() {};\n\t\t/* */ var $f, $c = false, $s = 0, $r; if (this !== undefined && this.$blk !== undefined) { $f = this; $c = true; $s = $f.$s; $r = $f.$r; } s: while (true) { switch ($s) { case 0:\n"), minify)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, d := range filteredDecls {
|
||||
if _, err := w.Write(d.InitCode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := w.Write(removeWhitespace([]byte("\t\t/* */ } return; } if ($f === undefined) { $f = { $blk: $init }; } $f.$s = $s; $f.$r = $r; return $f;\n\t};\n\t$pkg.$init = $init;\n\treturn $pkg;\n})();"), minify)); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := w.Write([]byte("\n")); err != nil { // keep this \n even when minified
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadArchive(filename, path string, r io.Reader, packages map[string]*types.Package) (*Archive, error) {
|
||||
var a Archive
|
||||
if err := gob.NewDecoder(r).Decode(&a); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
_, packages[path], err = importer.ImportData(packages, a.ExportData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &a, nil
|
||||
}
|
||||
|
||||
func WriteArchive(a *Archive, w io.Writer) error {
|
||||
return gob.NewEncoder(w).Encode(a)
|
||||
}
|
||||
|
||||
type SourceMapFilter struct {
|
||||
Writer io.Writer
|
||||
MappingCallback func(generatedLine, generatedColumn int, originalPos token.Position)
|
||||
line int
|
||||
column int
|
||||
fileSet *token.FileSet
|
||||
}
|
||||
|
||||
func (f *SourceMapFilter) Write(p []byte) (n int, err error) {
|
||||
var n2 int
|
||||
for {
|
||||
i := bytes.IndexByte(p, '\b')
|
||||
w := p
|
||||
if i != -1 {
|
||||
w = p[:i]
|
||||
}
|
||||
|
||||
n2, err = f.Writer.Write(w)
|
||||
n += n2
|
||||
for {
|
||||
i := bytes.IndexByte(w, '\n')
|
||||
if i == -1 {
|
||||
f.column += len(w)
|
||||
break
|
||||
}
|
||||
f.line++
|
||||
f.column = 0
|
||||
w = w[i+1:]
|
||||
}
|
||||
|
||||
if err != nil || i == -1 {
|
||||
return
|
||||
}
|
||||
if f.MappingCallback != nil {
|
||||
f.MappingCallback(f.line+1, f.column, f.fileSet.Position(token.Pos(binary.BigEndian.Uint32(p[i+1:i+5]))))
|
||||
}
|
||||
p = p[i+5:]
|
||||
n += 5
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -1,106 +0,0 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
|
||||
"github.com/gopherjs/gopherjs/compiler/astutil"
|
||||
)
|
||||
|
||||
func Assign(stmt ast.Stmt, info *types.Info, pkg *types.Package) ast.Stmt {
|
||||
if s, ok := stmt.(*ast.AssignStmt); ok && s.Tok != token.ASSIGN && s.Tok != token.DEFINE {
|
||||
var op token.Token
|
||||
switch s.Tok {
|
||||
case token.ADD_ASSIGN:
|
||||
op = token.ADD
|
||||
case token.SUB_ASSIGN:
|
||||
op = token.SUB
|
||||
case token.MUL_ASSIGN:
|
||||
op = token.MUL
|
||||
case token.QUO_ASSIGN:
|
||||
op = token.QUO
|
||||
case token.REM_ASSIGN:
|
||||
op = token.REM
|
||||
case token.AND_ASSIGN:
|
||||
op = token.AND
|
||||
case token.OR_ASSIGN:
|
||||
op = token.OR
|
||||
case token.XOR_ASSIGN:
|
||||
op = token.XOR
|
||||
case token.SHL_ASSIGN:
|
||||
op = token.SHL
|
||||
case token.SHR_ASSIGN:
|
||||
op = token.SHR
|
||||
case token.AND_NOT_ASSIGN:
|
||||
op = token.AND_NOT
|
||||
default:
|
||||
panic(s.Tok)
|
||||
}
|
||||
|
||||
var list []ast.Stmt
|
||||
|
||||
var viaTmpVars func(expr ast.Expr, name string) ast.Expr
|
||||
viaTmpVars = func(expr ast.Expr, name string) ast.Expr {
|
||||
switch e := astutil.RemoveParens(expr).(type) {
|
||||
case *ast.IndexExpr:
|
||||
return astutil.SetType(info, info.TypeOf(e), &ast.IndexExpr{
|
||||
X: viaTmpVars(e.X, "_slice"),
|
||||
Index: viaTmpVars(e.Index, "_index"),
|
||||
})
|
||||
|
||||
case *ast.SelectorExpr:
|
||||
sel, ok := info.Selections[e]
|
||||
if !ok {
|
||||
// qualified identifier
|
||||
return e
|
||||
}
|
||||
newSel := &ast.SelectorExpr{
|
||||
X: viaTmpVars(e.X, "_struct"),
|
||||
Sel: e.Sel,
|
||||
}
|
||||
info.Selections[newSel] = sel
|
||||
return astutil.SetType(info, info.TypeOf(e), newSel)
|
||||
|
||||
case *ast.StarExpr:
|
||||
return astutil.SetType(info, info.TypeOf(e), &ast.StarExpr{
|
||||
X: viaTmpVars(e.X, "_ptr"),
|
||||
})
|
||||
|
||||
case *ast.Ident, *ast.BasicLit:
|
||||
return e
|
||||
|
||||
default:
|
||||
tmpVar := astutil.NewIdent(name, info.TypeOf(e), info, pkg)
|
||||
list = append(list, &ast.AssignStmt{
|
||||
Lhs: []ast.Expr{tmpVar},
|
||||
Tok: token.DEFINE,
|
||||
Rhs: []ast.Expr{e},
|
||||
})
|
||||
return tmpVar
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
lhs := viaTmpVars(s.Lhs[0], "_val")
|
||||
|
||||
list = append(list, &ast.AssignStmt{
|
||||
Lhs: []ast.Expr{lhs},
|
||||
Tok: token.ASSIGN,
|
||||
Rhs: []ast.Expr{
|
||||
astutil.SetType(info, info.TypeOf(s.Lhs[0]), &ast.BinaryExpr{
|
||||
X: lhs,
|
||||
Op: op,
|
||||
Y: astutil.SetType(info, info.TypeOf(s.Rhs[0]), &ast.ParenExpr{
|
||||
X: s.Rhs[0],
|
||||
}),
|
||||
}),
|
||||
},
|
||||
})
|
||||
|
||||
return &ast.BlockStmt{
|
||||
List: list,
|
||||
}
|
||||
}
|
||||
return stmt
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"go/types"
|
||||
)
|
||||
|
||||
func IncDecStmt(stmt ast.Stmt, info *types.Info) ast.Stmt {
|
||||
if s, ok := stmt.(*ast.IncDecStmt); ok {
|
||||
t := info.TypeOf(s.X)
|
||||
if iExpr, isIExpr := s.X.(*ast.IndexExpr); isIExpr {
|
||||
switch u := info.TypeOf(iExpr.X).Underlying().(type) {
|
||||
case *types.Array:
|
||||
t = u.Elem()
|
||||
case *types.Slice:
|
||||
t = u.Elem()
|
||||
case *types.Map:
|
||||
t = u.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
tok := token.ADD_ASSIGN
|
||||
if s.Tok == token.DEC {
|
||||
tok = token.SUB_ASSIGN
|
||||
}
|
||||
|
||||
one := &ast.BasicLit{Kind: token.INT}
|
||||
info.Types[one] = types.TypeAndValue{Type: t, Value: constant.MakeInt64(1)}
|
||||
|
||||
return &ast.AssignStmt{
|
||||
Lhs: []ast.Expr{s.X},
|
||||
Tok: tok,
|
||||
Rhs: []ast.Expr{one},
|
||||
}
|
||||
}
|
||||
return stmt
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue