parent
3744b1b28c
commit
97132fd582
@ -1,39 +0,0 @@
|
||||
package main
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
type Int64 struct {
|
||||
v int64
|
||||
}
|
||||
|
||||
func (a *Int64) Get() int64 {
|
||||
return atomic.LoadInt64(&a.v)
|
||||
}
|
||||
|
||||
func (a *Int64) Set(v int64) {
|
||||
atomic.StoreInt64(&a.v, v)
|
||||
}
|
||||
|
||||
func (a *Int64) CompareAndSwap(o, n int64) bool {
|
||||
return atomic.CompareAndSwapInt64(&a.v, o, n)
|
||||
}
|
||||
|
||||
func (a *Int64) Swap(v int64) int64 {
|
||||
return atomic.SwapInt64(&a.v, v)
|
||||
}
|
||||
|
||||
func (a *Int64) Add(v int64) int64 {
|
||||
return atomic.AddInt64(&a.v, v)
|
||||
}
|
||||
|
||||
func (a *Int64) Sub(v int64) int64 {
|
||||
return a.Add(-v)
|
||||
}
|
||||
|
||||
func (a *Int64) Incr() int64 {
|
||||
return a.Add(1)
|
||||
}
|
||||
|
||||
func (a *Int64) Decr() int64 {
|
||||
return a.Add(-1)
|
||||
}
|
@ -0,0 +1,56 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
// func TestBroadcast(t *testing.T) {
|
||||
// bs := NewBroadcastString()
|
||||
// bs.WriteMessage("hello")
|
||||
// time.Sleep(10 * time.Millisecond)
|
||||
// c1 := bs.AddListener(nil)
|
||||
// go func() {
|
||||
// bs.WriteMessage("world")
|
||||
// }()
|
||||
// message := <-c1
|
||||
// if message != "world" {
|
||||
// t.Fatalf("expect message world, but got %s", message)
|
||||
// }
|
||||
// c2 := bs.AddListener(nil)
|
||||
// go func() {
|
||||
// bs.WriteMessage("tab")
|
||||
// }()
|
||||
|
||||
// // test write multi
|
||||
// wg := sync.WaitGroup{}
|
||||
// wg.Add(2)
|
||||
// go func() {
|
||||
// message = <-c2
|
||||
// if message != "tab" {
|
||||
// t.Errorf("expect tab, but got %s", message)
|
||||
// }
|
||||
// wg.Done()
|
||||
// }()
|
||||
|
||||
// go func() {
|
||||
// message = <-c1
|
||||
// if message != "tab" {
|
||||
// t.Errorf("expect tab, but got %s", message)
|
||||
// }
|
||||
// wg.Done()
|
||||
// }()
|
||||
// wg.Wait()
|
||||
// }
|
||||
|
||||
func TestRingBuffer(t *testing.T) {
|
||||
Convey("Write some string to ring buffer", t, func() {
|
||||
// buf := rbuf.NewFixedSizeRingBuf(5)
|
||||
// buf.Write([]byte("abcde"))
|
||||
// So(string(buf.Bytes()), ShouldEqual, "abcde")
|
||||
// buf.Advance(2)
|
||||
// buf.Write([]byte("fg"))
|
||||
// So(string(buf.Bytes()), ShouldEqual, "cdefg")
|
||||
})
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// BufferPool implements a pool of bytes.Buffers in the form of a bounded
|
||||
// channel.
|
||||
type BufferPool struct {
|
||||
c chan *bytes.Buffer
|
||||
}
|
||||
|
||||
// NewBufferPool creates a new BufferPool bounded to the given size.
|
||||
func NewBufferPool(size int) (bp *BufferPool) {
|
||||
return &BufferPool{
|
||||
c: make(chan *bytes.Buffer, size),
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets a Buffer from the BufferPool, or creates a new one if none are
|
||||
// available in the pool.
|
||||
func (bp *BufferPool) Get() (b *bytes.Buffer) {
|
||||
select {
|
||||
case b = <-bp.c:
|
||||
// reuse existing buffer
|
||||
default:
|
||||
// create new buffer
|
||||
b = bytes.NewBuffer([]byte{})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put returns the given Buffer to the BufferPool.
|
||||
func (bp *BufferPool) Put(b *bytes.Buffer) {
|
||||
b.Reset()
|
||||
select {
|
||||
case bp.c <- b:
|
||||
default: // Discard the buffer if the pool is full.
|
||||
}
|
||||
}
|
After Width: | Height: | Size: 1.3 MiB |
Before Width: | Height: | Size: 396 KiB |
Before Width: | Height: | Size: 234 KiB |
@ -0,0 +1,70 @@
|
||||
hash: ad7063d34838040bf683183342f3814298179801e2e055f29bca796456c88300
|
||||
updated: 2017-08-11T19:08:31.193345249+08:00
|
||||
imports:
|
||||
- name: github.com/codeskyblue/kexec
|
||||
version: 863094f94c7fb7c235764bf8f0f79cccea78c8eb
|
||||
- name: github.com/equinox-io/equinox
|
||||
version: 6f97d0d3970881d3e53dd6f547a41109eb055e54
|
||||
subpackages:
|
||||
- internal/go-update
|
||||
- internal/go-update/internal/binarydist
|
||||
- internal/go-update/internal/osext
|
||||
- internal/osext
|
||||
- proto
|
||||
- name: github.com/franela/goreq
|
||||
version: b5b0f5eb2d16f20345cce0a544a75163579c0b00
|
||||
- name: github.com/glycerine/rbuf
|
||||
version: 96ad00d7fa74f7dd9857f2b6068451062b4ebc5d
|
||||
- name: github.com/go-yaml/yaml
|
||||
version: 25c4ec802a7d637f88d584ab26798e94ad14c13b
|
||||
- name: github.com/goji/httpauth
|
||||
version: 2da839ab0f4df05a6db5eb277995589dadbd4fb9
|
||||
- name: github.com/gorilla/context
|
||||
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
|
||||
- name: github.com/gorilla/mux
|
||||
version: ac112f7d75a0714af1bd86ab17749b31f7809640
|
||||
- name: github.com/gorilla/websocket
|
||||
version: a69d9f6de432e2c6b296a947d8a5ee88f68522cf
|
||||
- name: github.com/kennygrant/sanitize
|
||||
version: 6a0bfdde8629a3a3a7418a7eae45c54154692514
|
||||
- name: github.com/mitchellh/go-ps
|
||||
version: 4fdf99ab29366514c69ccccddab5dc58b8d84062
|
||||
- name: github.com/qiniu/log
|
||||
version: a304a74568d6982c5b89de1c68ac8fca3add196a
|
||||
- name: github.com/shurcooL/httpfs
|
||||
version: bc35257962c2dea93e81c976b72c7c6eac45fd8a
|
||||
subpackages:
|
||||
- vfsutil
|
||||
- name: github.com/shurcooL/vfsgen
|
||||
version: 385e5833a54aaba5860ca26036b8e8b72135ab96
|
||||
- name: github.com/urfave/cli
|
||||
version: cfb38830724cc34fedffe9a2a29fb54fa9169cd1
|
||||
- name: golang.org/x/net
|
||||
version: 1c05540f6879653db88113bc4a2b70aec4bd491f
|
||||
subpackages:
|
||||
- html
|
||||
- html/atom
|
||||
- name: golang.org/x/tools
|
||||
version: 5831d16d18029819d39f99bdc2060b8eff410b6b
|
||||
subpackages:
|
||||
- godoc/vfs
|
||||
testImports:
|
||||
- name: github.com/gopherjs/gopherjs
|
||||
version: 2b1d432c8a82c9bff0b0baffaeb3ec6e92974112
|
||||
subpackages:
|
||||
- js
|
||||
- name: github.com/jtolds/gls
|
||||
version: 77f18212c9c7edc9bd6a33d383a7b545ce62f064
|
||||
- name: github.com/smartystreets/assertions
|
||||
version: 1540c14c9f1bd1abeba90f29762a4c6e50582303
|
||||
subpackages:
|
||||
- internal/go-render/render
|
||||
- internal/oglematchers
|
||||
- name: github.com/smartystreets/goconvey
|
||||
version: 9e8dc3f972df6c8fcc0375ef492c24d0bb204857
|
||||
subpackages:
|
||||
- convey
|
||||
- convey/gotest
|
||||
- convey/reporting
|
||||
- name: github.com/smartystreets/logging
|
||||
version: ac3a674540761aa0b4382094ba4795f917e85c7f
|
@ -0,0 +1,22 @@
|
||||
module gosuv
|
||||
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/codeskyblue/kexec v0.0.0-20160908020525-863094f94c7f
|
||||
github.com/equinox-io/equinox v0.0.0-20151227023206-6f97d0d39708
|
||||
github.com/franela/goreq v0.0.0-20170418064916-b5b0f5eb2d16
|
||||
github.com/glycerine/rbuf v0.0.0-20170809002439-96ad00d7fa74
|
||||
github.com/go-yaml/yaml v0.0.0-20170721122051-25c4ec802a7d
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/gorilla/mux v1.4.1-0.20170704074345-ac112f7d75a0
|
||||
github.com/gorilla/websocket v1.2.1-0.20170718202341-a69d9f6de432
|
||||
github.com/kennygrant/sanitize v0.0.0-20170120101633-6a0bfdde8629
|
||||
github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936
|
||||
github.com/shurcooL/httpfs v0.0.0-20170514040059-bc35257962c2
|
||||
github.com/shurcooL/vfsgen v0.0.0-20170728162603-385e5833a54a
|
||||
github.com/urfave/cli v1.20.0
|
||||
golang.org/x/net v0.0.0-20170809000501-1c05540f6879
|
||||
golang.org/x/tools v0.0.0-20170808144645-5831d16d1802
|
||||
)
|
@ -1,152 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
log "github.com/qiniu/log"
|
||||
)
|
||||
|
||||
type Bool struct {
|
||||
c Int64
|
||||
}
|
||||
|
||||
func (b *Bool) Get() bool {
|
||||
return b.c.Get() != 0
|
||||
}
|
||||
|
||||
func (b *Bool) toInt64(v bool) int64 {
|
||||
if v {
|
||||
return 1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bool) Set(v bool) {
|
||||
b.c.Set(b.toInt64(v))
|
||||
}
|
||||
|
||||
func (b *Bool) CompareAndSwap(o, n bool) bool {
|
||||
return b.c.CompareAndSwap(b.toInt64(o), b.toInt64(n))
|
||||
}
|
||||
|
||||
func (b *Bool) Swap(v bool) bool {
|
||||
return b.c.Swap(b.toInt64(v)) != 0
|
||||
}
|
||||
|
||||
var bufferPool *BufferPool
|
||||
|
||||
func init() {
|
||||
// 4000行日志缓存
|
||||
bufferPool = NewBufferPool(4000)
|
||||
}
|
||||
|
||||
type MergeWriter struct {
|
||||
lines chan *bytes.Buffer
|
||||
writer io.Writer
|
||||
closed Bool
|
||||
}
|
||||
|
||||
func NewMergeWriter(writer io.Writer) *MergeWriter {
|
||||
merger := &MergeWriter{
|
||||
lines: make(chan *bytes.Buffer, 1000),
|
||||
writer: writer,
|
||||
}
|
||||
merger.closed.Set(false)
|
||||
merger.drainLines()
|
||||
return merger
|
||||
}
|
||||
|
||||
func (m *MergeWriter) Close() {
|
||||
// log.Printf("Close MergeWriter")
|
||||
if m.closed.CompareAndSwap(false, true) {
|
||||
// log.Printf("Close lines chan")
|
||||
close(m.lines)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MergeWriter) WriteStrLine(line string) {
|
||||
if m.closed.Get() {
|
||||
return
|
||||
} else {
|
||||
buffer := bufferPool.Get()
|
||||
buffer.WriteString(line)
|
||||
m.lines <- buffer
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MergeWriter) WriteLine(line *bytes.Buffer) {
|
||||
if m.closed.Get() {
|
||||
// 需要回收Buffer
|
||||
// log.Printf("Write to closed MergeWrite...")
|
||||
bufferPool.Put(line)
|
||||
return
|
||||
} else {
|
||||
m.lines <- line
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MergeWriter) drainLines() {
|
||||
go func() {
|
||||
for line := range m.lines {
|
||||
m.writer.Write(line.Bytes())
|
||||
// 回收
|
||||
bufferPool.Put(line)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// 创建新的BufferWriter
|
||||
func (m *MergeWriter) NewWriter(index int) io.Writer {
|
||||
writer := &BufferWriter{
|
||||
merge: m,
|
||||
prefix: fmt.Sprintf(" [P%02d] ", index),
|
||||
}
|
||||
|
||||
// 分配
|
||||
writer.Buffer = bufferPool.Get()
|
||||
writer.Buffer.WriteString(writer.prefix)
|
||||
return writer
|
||||
}
|
||||
|
||||
type BufferWriter struct {
|
||||
Buffer *bytes.Buffer
|
||||
prefix string
|
||||
merge *MergeWriter
|
||||
}
|
||||
|
||||
func (b *BufferWriter) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
|
||||
for len(p) > 0 {
|
||||
index := bytes.IndexByte(p, '\n')
|
||||
if index != -1 {
|
||||
// 写完完整的一行
|
||||
_, err = b.Buffer.Write(p[0 : index+1])
|
||||
if err != nil {
|
||||
log.Error(err, "Writer Buffer failed")
|
||||
return n, err
|
||||
}
|
||||
|
||||
// 将buffer转移到merge中
|
||||
b.merge.WriteLine(b.Buffer)
|
||||
|
||||
// 分配:写入新数据
|
||||
b.Buffer = bufferPool.Get()
|
||||
b.Buffer.WriteString(time.Now().Format("15:04:05") + b.prefix)
|
||||
p = p[index+1:]
|
||||
} else {
|
||||
// 剩下不足一行,一口气全部写入
|
||||
_, err = b.Buffer.Write(p)
|
||||
if err != nil {
|
||||
log.Error(err, "Writer Buffer failed")
|
||||
return n, err
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
@ -1,291 +1,263 @@
|
||||
/* index.js */
|
||||
var W = {};
|
||||
var testPrograms = [{
|
||||
program: {
|
||||
name: "gggg",
|
||||
command: "",
|
||||
dir: "",
|
||||
autoStart: true,
|
||||
},
|
||||
status: "running",
|
||||
}];
|
||||
|
||||
var vm = new Vue({
|
||||
el: "#app",
|
||||
data: {
|
||||
isConnectionAlive: true,
|
||||
log: {
|
||||
content: '',
|
||||
follow: true,
|
||||
line_count: 0,
|
||||
},
|
||||
programs: [],
|
||||
slaves: [],
|
||||
edit: {
|
||||
program: null,
|
||||
el: "#app",
|
||||
data: {
|
||||
isConnectionAlive: true,
|
||||
log: {
|
||||
content: '',
|
||||
follow: true,
|
||||
line_count: 0,
|
||||
},
|
||||
programs: [],
|
||||
edit: {
|
||||
program: null,
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
addNewProgram: function() {
|
||||
console.log("Add")
|
||||
var form = $("#formNewProgram");
|
||||
form.submit(function(e) {
|
||||
e.preventDefault();
|
||||
$("#newProgram").modal('hide')
|
||||
return false;
|
||||
});
|
||||
},
|
||||
showEditProgram: function(p) {
|
||||
this.edit.program = Object.assign({}, p); // here require polyfill.min.js
|
||||
$("#programEdit").modal('show');
|
||||
},
|
||||
editProgram: function() {
|
||||
var p = this.edit.program;
|
||||
$.ajax({
|
||||
url: "/api/programs/" + p.name,
|
||||
method: "PUT",
|
||||
data: JSON.stringify(p),
|
||||
})
|
||||
.then(function(ret) {
|
||||
console.log(ret);
|
||||
$("#programEdit").modal('hide');
|
||||
})
|
||||
// console.log(JSON.stringify(p));
|
||||
},
|
||||
updateBreadcrumb: function() {
|
||||
var pathname = decodeURI(location.pathname || "/");
|
||||
var parts = pathname.split('/');
|
||||
this.breadcrumb = [];
|
||||
if (pathname == "/") {
|
||||
return this.breadcrumb;
|
||||
}
|
||||
var i = 2;
|
||||
for (; i <= parts.length; i += 1) {
|
||||
var name = parts[i - 1];
|
||||
var path = parts.slice(0, i).join('/');
|
||||
this.breadcrumb.push({
|
||||
name: name + (i == parts.length ? ' /' : ''),
|
||||
path: path
|
||||
})
|
||||
}
|
||||
return this.breadcrumb;
|
||||
},
|
||||
refresh: function() {
|
||||
// ws.send("Hello")
|
||||
$.ajax({
|
||||
url: "/api/programs",
|
||||
success: function(data) {
|
||||
vm.programs = data;
|
||||
Vue.nextTick(function() {
|
||||
$('[data-toggle="tooltip"]').tooltip()
|
||||
})
|
||||
}
|
||||
});
|
||||
},
|
||||
methods: {
|
||||
addNewProgram: function (slave) {
|
||||
$("#newProgram").modal({
|
||||
show: true,
|
||||
backdrop: 'static',
|
||||
}).data("slave", slave);
|
||||
},
|
||||
formNewProgram: function () {
|
||||
var url = "/api/programs",
|
||||
data = $("#formNewProgram").serialize(),
|
||||
name = $("#formNewProgram").find("[name=name]").val(),
|
||||
disablechars = "./\\";
|
||||
if (!name) {
|
||||
alert("\"" + name + "\" is empty ")
|
||||
return false
|
||||
}
|
||||
if (disablechars.indexOf(name[0]) != -1) {
|
||||
alert("\"" + name + "\" Can't starts with \".\" \"/\" \"\\\"")
|
||||
return false
|
||||
}
|
||||
var slave = $("#newProgram").data("slave");
|
||||
if (slave !== undefined && slave !== "") {
|
||||
url = "/distributed/" + slave + url;
|
||||
}
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: url,
|
||||
data: data,
|
||||
success: function (data) {
|
||||
if (data.status === 0) {
|
||||
$("#newProgram").modal('hide');
|
||||
} else {
|
||||
window.alert(data.error);
|
||||
}
|
||||
},
|
||||
error: function (err) {
|
||||
alert(err.responseText)
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
showEditProgram: function (p, slave) {
|
||||
this.edit.program = Object.assign({}, p); // here require polyfill.min.js
|
||||
$("#programEdit").data("slave",slave).modal('show');
|
||||
},
|
||||
|
||||
editProgram: function () {
|
||||
var p = this.edit.program;
|
||||
var requestUrl = "/api/programs/" + p.name
|
||||
var slave = $("#programEdit").data("slave");
|
||||
if (slave !== undefined && slave !== "") {
|
||||
requestUrl = "/distributed/" + slave + requestUrl;
|
||||
}
|
||||
$.ajax({
|
||||
url: requestUrl,
|
||||
method: "PUT",
|
||||
data: JSON.stringify(p),
|
||||
}).then(function (ret) {
|
||||
$("#programEdit").modal('hide');
|
||||
})
|
||||
},
|
||||
updateBreadcrumb: function () {
|
||||
var pathname = decodeURI(location.pathname || "/");
|
||||
var parts = pathname.split('/');
|
||||
this.breadcrumb = [];
|
||||
if (pathname == "/") {
|
||||
return this.breadcrumb;
|
||||
}
|
||||
var i = 2;
|
||||
for (; i <= parts.length; i += 1) {
|
||||
var name = parts[i - 1];
|
||||
var path = parts.slice(0, i).join('/');
|
||||
this.breadcrumb.push({
|
||||
name: name + (i == parts.length ? ' /' : ''),
|
||||
path: path
|
||||
})
|
||||
}
|
||||
return this.breadcrumb;
|
||||
reload: function() {
|
||||
$.ajax({
|
||||
url: "/api/reload",
|
||||
method: "POST",
|
||||
success: function(data) {
|
||||
if (data.status == 0) {
|
||||
alert("reload success");
|
||||
} else {
|
||||
alert(data.value);
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
test: function() {
|
||||
console.log("test");
|
||||
},
|
||||
cmdStart: function(name) {
|
||||
console.log(name);
|
||||
$.ajax({
|
||||
url: "/api/programs/" + name + "/start",
|
||||
method: 'post',
|
||||
success: function(data) {
|
||||
console.log(data);
|
||||
}
|
||||
})
|
||||
},
|
||||
cmdStop: function(name) {
|
||||
$.ajax({
|
||||
url: "/api/programs/" + name + "/stop",
|
||||
method: 'post',
|
||||
success: function(data) {
|
||||
console.log(data);
|
||||
}
|
||||
})
|
||||
},
|
||||
cmdTail: function(name) {
|
||||
var that = this;
|
||||
if (W.wsLog) {
|
||||
W.wsLog.close()
|
||||
}
|
||||
W.wsLog = newWebsocket("/ws/logs/" + name, {
|
||||
onopen: function(evt) {
|
||||
that.log.content = "";
|
||||
that.log.line_count = 0;
|
||||
},
|
||||
refresh: function () {
|
||||
// ws.send("Hello")
|
||||
$.ajax({
|
||||
url: "/api/programs",
|
||||
success: function (data) {
|
||||
vm.programs = data;
|
||||
Vue.nextTick(function () {
|
||||
$('[data-toggle="tooltip"]').tooltip()
|
||||
})
|
||||
}
|
||||
});
|
||||
onmessage: function(evt) {
|
||||
// strip ansi color
|
||||
// console.log("DT:", evt.data)
|
||||
that.log.content += evt.data.replace(/\033\[[0-9;]*m/g, "");
|
||||
that.log.line_count = $.trim(that.log.content).split(/\r\n|\r|\n/).length;
|
||||
if (that.log.follow) {
|
||||
var pre = $(".realtime-log")[0];
|
||||
setTimeout(function() {
|
||||
pre.scrollTop = pre.scrollHeight - pre.clientHeight;
|
||||
}, 1);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.log.follow = true;
|
||||
$("#modalTailf").modal({
|
||||
show: true,
|
||||
keyboard: true,
|
||||
// keyboard: false,
|
||||
// backdrop: 'static',
|
||||
})
|
||||
},
|
||||
cmdDelete: function(name) {
|
||||
if (!confirm("Confirm delete \"" + name + "\"")) {
|
||||
return
|
||||
}
|
||||
$.ajax({
|
||||
url: "/api/programs/" + name,
|
||||
method: 'delete',
|
||||
success: function(data) {
|
||||
console.log(data);
|
||||
}
|
||||
})
|
||||
},
|
||||
canStop: function(status) {
|
||||
switch (status) {
|
||||
case "running":
|
||||
case "retry wait":
|
||||
return true;
|
||||
}
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
$.ajax({
|
||||
url: "/distributed/api/programs",
|
||||
success: function (data) {
|
||||
vm.slaves = data;
|
||||
Vue.nextTick(function () {
|
||||
$('[data-toggle="tooltip"]').tooltip()
|
||||
})
|
||||
}
|
||||
});
|
||||
},
|
||||
/*reload: function () {
|
||||
$.ajax({
|
||||
url: "/api/reload",
|
||||
method: "POST",
|
||||
success: function (data) {
|
||||
if (data.status == 0) {
|
||||
alert("reload success");
|
||||
} else {
|
||||
alert(data.value);
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
test: function () {
|
||||
console.log("test");
|
||||
},*/
|
||||
cmdStart: function (name, slave) {
|
||||
console.log(name, slave);
|
||||
requestUrl = "/api/programs/" + name + "/start";
|
||||
if (slave !== undefined && "" !== slave) {
|
||||
requestUrl = "/distributed/" + slave + requestUrl;
|
||||
}
|
||||
$.ajax({
|
||||
url: requestUrl,
|
||||
method: 'post',
|
||||
success: function (data) {
|
||||
console.log(data);
|
||||
}
|
||||
});
|
||||
},
|
||||
cmdStop: function (name, slave) {
|
||||
requestUrl = "/api/programs/" + name + "/stop";
|
||||
if (slave !== undefined && "" !== slave) {
|
||||
requestUrl = "/distributed/" + slave + requestUrl;
|
||||
}
|
||||
$.ajax({
|
||||
url: requestUrl,
|
||||
method: 'post',
|
||||
success: function (data) {
|
||||
console.log(data);
|
||||
}
|
||||
})
|
||||
},
|
||||
cmdTail: function (name, slave) {
|
||||
requestUrl = "/ws/logs/" + name;
|
||||
if (slave !== undefined && "" !== slave) {
|
||||
requestUrl = "/distributed/" + slave + requestUrl;
|
||||
}
|
||||
var that = this;
|
||||
if (W.wsLog) {
|
||||
W.wsLog.close()
|
||||
}
|
||||
W.wsLog = newWebsocket(requestUrl, {
|
||||
onopen: function (evt) {
|
||||
that.log.content = "";
|
||||
that.log.line_count = 0;
|
||||
},
|
||||
onmessage: function (evt) {
|
||||
that.log.content += evt.data.replace(/\033\[[0-9;]*m/g, "");
|
||||
that.log.line_count = $.trim(that.log.content).split(/\r\n|\r|\n/).length;
|
||||
if (that.log.follow) {
|
||||
var pre = $(".realtime-log")[0];
|
||||
setTimeout(function () {
|
||||
pre.scrollTop = pre.scrollHeight - pre.clientHeight;
|
||||
}, 1);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.log.follow = true;
|
||||
$("#modalTailf").modal({
|
||||
show: true,
|
||||
keyboard: true,
|
||||
}).on("hide.bs.modal", function (e) {
|
||||
W.wsLog.close();
|
||||
})
|
||||
},
|
||||
cmdDelete: function (name, slave) {
|
||||
if (!confirm("Confirm delete \"" + name + "\"")) {
|
||||
return
|
||||
}
|
||||
requestUrl = "/api/programs/" + name;
|
||||
if (slave !== undefined && "" !== slave) {
|
||||
requestUrl = "/distributed/" + slave + requestUrl
|
||||
}
|
||||
$.ajax({
|
||||
url: requestUrl,
|
||||
method: 'delete',
|
||||
success: function (data) {
|
||||
console.log(data);
|
||||
}
|
||||
})
|
||||
},
|
||||
canStop: function (status) {
|
||||
switch (status) {
|
||||
case "running":
|
||||
case "retry wait":
|
||||
return true;
|
||||
}
|
||||
},
|
||||
}
|
||||
Vue.filter('fromNow', function(value) {
|
||||
return moment(value).fromNow();
|
||||
})
|
||||
|
||||
Vue.filter('fromNow', function (value) {
|
||||
return moment(value).fromNow();
|
||||
Vue.filter('formatBytes', function(value) {
|
||||
var bytes = parseFloat(value);
|
||||
if (bytes < 0) return "-";
|
||||
else if (bytes < 1024) return bytes + " B";
|
||||
else if (bytes < 1048576) return (bytes / 1024).toFixed(0) + " KB";
|
||||
else if (bytes < 1073741824) return (bytes / 1048576).toFixed(1) + " MB";
|
||||
else return (bytes / 1073741824).toFixed(1) + " GB";
|
||||
})
|
||||
|
||||
Vue.filter('formatBytes', function (value) {
|
||||
var bytes = parseFloat(value);
|
||||
if (bytes < 0) return "-";
|
||||
else if (bytes < 1024) return bytes + " B";
|
||||
else if (bytes < 1048576) return (bytes / 1024).toFixed(0) + " KB";
|
||||
else if (bytes < 1073741824) return (bytes / 1048576).toFixed(1) + " MB";
|
||||
else return (bytes / 1073741824).toFixed(1) + " GB";
|
||||
Vue.filter('colorStatus', function(value) {
|
||||
var makeColorText = function(text, color) {
|
||||
return "<span class='status' style='background-color:" + color + "'>" + text + "</span>";
|
||||
}
|
||||
switch (value) {
|
||||
case "stopping":
|
||||
return makeColorText(value, "#996633");
|
||||
case "running":
|
||||
return makeColorText(value, "green");
|
||||
case "fatal":
|
||||
return makeColorText(value, "red");
|
||||
default:
|
||||
return makeColorText(value, "gray");
|
||||
}
|
||||
})
|
||||
|
||||
Vue.filter('colorStatus', function (value) {
|
||||
var makeColorText = function (text, color) {
|
||||
return "<span class='status' style='background-color:" + color + "'>" + text + "</span>";
|
||||
};
|
||||
switch (value) {
|
||||
case "stopping":
|
||||
return makeColorText(value, "#996633");
|
||||
case "running":
|
||||
return makeColorText(value, "green");
|
||||
case "fatal":
|
||||
return makeColorText(value, "red");
|
||||
default:
|
||||
return makeColorText(value, "gray");
|
||||
}
|
||||
});
|
||||
Vue.directive('disable', function(value) {
|
||||
this.el.disabled = !!value
|
||||
})
|
||||
|
||||
Vue.directive('disable', function (value) {
|
||||
this.el.disabled = !!value
|
||||
});
|
||||
$(function() {
|
||||
vm.refresh();
|
||||
|
||||
$(function () {
|
||||
vm.refresh();
|
||||
$("#formNewProgram").submit(function(e) {
|
||||
var url = "/api/programs",
|
||||
data = $(this).serialize();
|
||||
$.ajax({
|
||||
type: "POST",
|
||||
url: url,
|
||||
data: data,
|
||||
success: function(data) {
|
||||
if (data.status === 0) {
|
||||
$("#newProgram").modal('hide');
|
||||
} else {
|
||||
window.alert(data.error);
|
||||
}
|
||||
},
|
||||
error: function(err) {
|
||||
console.log(err.responseText);
|
||||
}
|
||||
})
|
||||
e.preventDefault()
|
||||
});
|
||||
|
||||
function newEventWatcher() {
|
||||
W.events = newWebsocket("/ws/events", {
|
||||
onopen: function (evt) {
|
||||
vm.isConnectionAlive = true;
|
||||
},
|
||||
onmessage: function (evt) {
|
||||
console.log("response:" + evt.data);
|
||||
vm.refresh();
|
||||
},
|
||||
onclose: function (evt) {
|
||||
W.events = null;
|
||||
vm.isConnectionAlive = false;
|
||||
console.log("Reconnect after 3s");
|
||||
setTimeout(newEventWatcher, 3000)
|
||||
}
|
||||
});
|
||||
};
|
||||
function newEventWatcher() {
|
||||
W.events = newWebsocket("/ws/events", {
|
||||
onopen: function(evt) {
|
||||
vm.isConnectionAlive = true;
|
||||
},
|
||||
onmessage: function(evt) {
|
||||
console.log("response:" + evt.data);
|
||||
vm.refresh();
|
||||
},
|
||||
onclose: function(evt) {
|
||||
W.events = null;
|
||||
vm.isConnectionAlive = false;
|
||||
console.log("Reconnect after 3s")
|
||||
setTimeout(newEventWatcher, 3000)
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
newEventWatcher();
|
||||
newEventWatcher();
|
||||
|
||||
// cancel follow log if people want to see the original data
|
||||
$(".realtime-log").bind('mousewheel', function (evt) {
|
||||
if (evt.originalEvent.wheelDelta >= 0) {
|
||||
vm.log.follow = false;
|
||||
}
|
||||
});
|
||||
$('#modalTailf').on('hidden.bs.modal', function () {
|
||||
// do something…
|
||||
console.log("Hiddeen");
|
||||
if (W.wsLog) {
|
||||
console.log("wsLog closed");
|
||||
W.wsLog.close()
|
||||
}
|
||||
})
|
||||
// cancel follow log if people want to see the original data
|
||||
$(".realtime-log").bind('mousewheel', function(evt) {
|
||||
if (evt.originalEvent.wheelDelta >= 0) {
|
||||
vm.log.follow = false;
|
||||
}
|
||||
})
|
||||
$('#modalTailf').on('hidden.bs.modal', function() {
|
||||
// do something…
|
||||
console.log("Hiddeen")
|
||||
if (W.wsLog) {
|
||||
console.log("wsLog closed")
|
||||
W.wsLog.close()
|
||||
}
|
||||
})
|
||||
});
|
@ -1,52 +1,48 @@
|
||||
/* javascript */
|
||||
var vm = new Vue({
|
||||
el: '#app',
|
||||
data: {
|
||||
name: name,
|
||||
pid: '-',
|
||||
childPids: [],
|
||||
}
|
||||
el: '#app',
|
||||
data: {
|
||||
name: name,
|
||||
pid: '-',
|
||||
childPids: [],
|
||||
}
|
||||
});
|
||||
|
||||
var maxDataCount = 30;
|
||||
var requstUrl = "/ws/perfs/" + name;
|
||||
if ("" !== slave) {
|
||||
requstUrl = "/distributed/" + slave + requstUrl;
|
||||
}
|
||||
var ws = newWebsocket(requstUrl, {
|
||||
onopen: function(evt) {
|
||||
console.log(evt);
|
||||
},
|
||||
onmessage: function(evt) {
|
||||
var data = JSON.parse(evt.data);
|
||||
vm.pid = data.pid;
|
||||
vm.childPids = data.pids;
|
||||
console.log("pid", data.pid, data); //evt.data.pid);
|
||||
if (memData && data.rss) {
|
||||
memData.push({
|
||||
value: [new Date(), data.rss],
|
||||
})
|
||||
if (memData.length > maxDataCount) {
|
||||
memData.shift();
|
||||
}
|
||||
chartMem.setOption({
|
||||
series: [{
|
||||
data: memData,
|
||||
}]
|
||||
});
|
||||
}
|
||||
if (cpuData && data.pcpu !== undefined) {
|
||||
cpuData.push({
|
||||
value: [new Date(), data.pcpu],
|
||||
})
|
||||
if (cpuData.length > maxDataCount) {
|
||||
cpuData.shift();
|
||||
}
|
||||
chartCpu.setOption({
|
||||
series: [{
|
||||
data: cpuData,
|
||||
}]
|
||||
})
|
||||
}
|
||||
var ws = newWebsocket('/ws/perfs/' + name, {
|
||||
onopen: function(evt) {
|
||||
console.log(evt);
|
||||
},
|
||||
onmessage: function(evt) {
|
||||
var data = JSON.parse(evt.data);
|
||||
vm.pid = data.pid;
|
||||
vm.childPids = data.pids;
|
||||
console.log("pid", data.pid, data); //evt.data.pid);
|
||||
if (memData && data.rss) {
|
||||
memData.push({
|
||||
value: [new Date(), data.rss],
|
||||
})
|
||||
if (memData.length > maxDataCount) {
|
||||
memData.shift();
|
||||
}
|
||||
chartMem.setOption({
|
||||
series: [{
|
||||
data: memData,
|
||||
}]
|
||||
});
|
||||
}
|
||||
if (cpuData && data.pcpu !== undefined) {
|
||||
cpuData.push({
|
||||
value: [new Date(), data.pcpu],
|
||||
})
|
||||
if (cpuData.length > maxDataCount) {
|
||||
cpuData.shift();
|
||||
}
|
||||
chartCpu.setOption({
|
||||
series: [{
|
||||
data: cpuData,
|
||||
}]
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
File diff suppressed because one or more lines are too long
@ -0,0 +1,22 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
@ -0,0 +1,22 @@
|
||||
The MIT License (MIT)
|
||||
Copyright (c) 2016 codeskyblue
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
|
||||
OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
@ -0,0 +1,67 @@
|
||||
# kexec
|
||||
[![GoDoc](https://godoc.org/github.com/codeskyblue/kexec?status.svg)](https://godoc.org/github.com/codeskyblue/kexec)
|
||||
|
||||
This is a golang lib, add a `Terminate` command to exec.
|
||||
|
||||
Tested on _windows, linux, darwin._
|
||||
|
||||
This lib has been used in [fswatch](https://github.com/codeskyblue/fswatch).
|
||||
|
||||
## Usage
|
||||
|
||||
go get -v github.com/codeskyblue/kexec
|
||||
|
||||
|
||||
example1:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "github.com/codeskyblue/kexec"
|
||||
|
||||
func main(){
|
||||
p := kexec.Command("python", "flask_main.py")
|
||||
p.Start()
|
||||
p.Terminate(syscall.SIGINT)
|
||||
}
|
||||
```
|
||||
|
||||
example2: see more [examples](examples)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/codeskyblue/kexec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// In unix will call: bash -c "python flask_main.py"
|
||||
// In windows will call: cmd /c "python flask_main.py"
|
||||
p := kexec.CommandString("python flask_main.py")
|
||||
p.Stdout = os.Stdout
|
||||
p.Stderr = os.Stderr
|
||||
p.Start()
|
||||
p.Terminate(syscall.SIGKILL)
|
||||
}
|
||||
```
|
||||
|
||||
example3:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "github.com/codeskyblue/kexec"
|
||||
|
||||
func main() {
|
||||
p := kexec.Command("whoami")
|
||||
p.SetUser("codeskyblue") // Only works on darwin and linux
|
||||
p.Run()
|
||||
}
|
||||
```
|
||||
|
||||
## PS
|
||||
This lib also support you call `Wait()` twice, which is not support by `os/exec`
|
||||
|
||||
## LICENSE
|
||||
[MIT](LICENSE)
|
@ -0,0 +1,19 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/codeskyblue/kexec"
|
||||
)
|
||||
|
||||
func main() {
|
||||
p := kexec.CommandString("python flask_main.py")
|
||||
p.Start()
|
||||
time.Sleep(3 * time.Second)
|
||||
err := p.Terminate(syscall.SIGKILL)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
import flask
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(port=46732, debug=True)
|
@ -0,0 +1,54 @@
|
||||
package kexec
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os/exec"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type KCommand struct {
|
||||
*exec.Cmd
|
||||
|
||||
errCs []chan error
|
||||
err error
|
||||
finished bool
|
||||
once sync.Once
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (c *KCommand) Run() error {
|
||||
if err := c.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Wait()
|
||||
}
|
||||
|
||||
// This Wait wraps exec.Wait, but support multi call
|
||||
func (k *KCommand) Wait() error {
|
||||
if k.Process == nil {
|
||||
return errors.New("exec: not started")
|
||||
}
|
||||
k.once.Do(func() {
|
||||
if k.errCs == nil {
|
||||
k.errCs = make([]chan error, 0)
|
||||
}
|
||||
go func() {
|
||||
k.err = k.Cmd.Wait()
|
||||
k.mu.Lock()
|
||||
k.finished = true
|
||||
for _, errC := range k.errCs {
|
||||
errC <- k.err
|
||||
}
|
||||
k.mu.Unlock()
|
||||
}()
|
||||
})
|
||||
k.mu.Lock()
|
||||
if k.finished {
|
||||
k.mu.Unlock()
|
||||
return k.err
|
||||
}
|
||||
errC := make(chan error, 1)
|
||||
k.errCs = append(k.errCs, errC)
|
||||
k.mu.Unlock()
|
||||
return <-errC
|
||||
}
|
@ -0,0 +1,68 @@
|
||||
// +build !windows
|
||||
|
||||
package kexec
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func setupCmd(cmd *exec.Cmd) {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
cmd.SysProcAttr.Setsid = true
|
||||
}
|
||||
|
||||
func Command(name string, arg ...string) *KCommand {
|
||||
cmd := exec.Command(name, arg...)
|
||||
setupCmd(cmd)
|
||||
return &KCommand{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func CommandString(command string) *KCommand {
|
||||
cmd := exec.Command("/bin/bash", "-c", command)
|
||||
setupCmd(cmd)
|
||||
//cmd.Stdout = os.Stdout
|
||||
//cmd.Stderr = os.Stderr
|
||||
return &KCommand{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *KCommand) Terminate(sig os.Signal) (err error) {
|
||||
if p.Process == nil {
|
||||
return
|
||||
}
|
||||
// find pgid, ref: http://unix.stackexchange.com/questions/14815/process-descendants
|
||||
group, err := os.FindProcess(-1 * p.Process.Pid)
|
||||
//log.Println(group)
|
||||
if err == nil {
|
||||
err = group.Signal(sig)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Ref: http://stackoverflow.com/questions/21705950/running-external-commands-through-os-exec-under-another-user
|
||||
func (k *KCommand) SetUser(name string) (err error) {
|
||||
u, err := user.Lookup(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid, err := strconv.Atoi(u.Uid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
gid, err := strconv.Atoi(u.Gid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k.SysProcAttr == nil {
|
||||
k.SysProcAttr = &syscall.SysProcAttr{}
|
||||
}
|
||||
k.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
package kexec
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/user"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
)
|
||||
|
||||
func TestCommand(t *testing.T) {
|
||||
Convey("1 should equal 1", t, func() {
|
||||
So(1, ShouldEqual, 1)
|
||||
})
|
||||
|
||||
Convey("kexec should work as normal os/exec", t, func() {
|
||||
cmd := Command("echo", "-n", "123")
|
||||
data, err := cmd.Output()
|
||||
So(err, ShouldBeNil)
|
||||
So(string(data), ShouldEqual, "123")
|
||||
})
|
||||
|
||||
Convey("the terminate should kill proc", t, func() {
|
||||
cmd := CommandString("sleep 51")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Start()
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
cmd.Terminate(syscall.SIGINT)
|
||||
err := cmd.Wait()
|
||||
So(err, ShouldNotBeNil)
|
||||
//So(err.Error(), ShouldEqual, "signal: interrupt")
|
||||
})
|
||||
|
||||
Convey("Should ok with call Wait twice", t, func() {
|
||||
cmd := CommandString("not-exists-command-xxl213 true")
|
||||
var err error
|
||||
err = cmd.Start()
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
err1 := cmd.Wait()
|
||||
So(err1, ShouldNotBeNil)
|
||||
err2 := cmd.Wait()
|
||||
So(err1, ShouldEqual, err2)
|
||||
})
|
||||
|
||||
Convey("Set user works", t, func() {
|
||||
u, err := user.Current()
|
||||
So(err, ShouldBeNil)
|
||||
// Set user must be root
|
||||
if u.Uid != "0" {
|
||||
return
|
||||
}
|
||||
|
||||
cmd := Command("whoami")
|
||||
err = cmd.SetUser("qard2")
|
||||
So(err, ShouldBeNil)
|
||||
|
||||
output, err := cmd.Output()
|
||||
So(err, ShouldBeNil)
|
||||
So(string(output), ShouldEqual, "qard2\n")
|
||||
})
|
||||
}
|
@ -0,0 +1,40 @@
|
||||
package kexec
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func Command(name string, arg ...string) *KCommand {
|
||||
return &KCommand{
|
||||
Cmd: exec.Command(name, arg...),
|
||||
}
|
||||
}
|
||||
|
||||
func CommandString(command string) *KCommand {
|
||||
cmd := exec.Command("cmd", "/c", command)
|
||||
//cmd.Stdout = os.Stdout
|
||||
//cmd.Stderr = os.Stderr
|
||||
return &KCommand{
|
||||
Cmd: cmd,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *KCommand) Terminate(sig os.Signal) (err error) {
|
||||
if p.Process == nil {
|
||||
return nil
|
||||
}
|
||||
pid := p.Process.Pid
|
||||
c := exec.Command("taskkill", "/t", "/f", "/pid", strconv.Itoa(pid))
|
||||
c.Stdout = os.Stdout
|
||||
c.Stderr = os.Stderr
|
||||
return c.Run()
|
||||
}
|
||||
|
||||
// SetUser not support on windws
|
||||
func (k *KCommand) SetUser(name string) (err error) {
|
||||
log.Printf("Can not set user(%s) on windows", name)
|
||||
return nil
|
||||
}
|
@ -0,0 +1 @@
|
||||
web: python flask_main.py
|
@ -0,0 +1,11 @@
|
||||
import flask
|
||||
|
||||
|
||||
app = flask.Flask(__name__)
|
||||
|
||||
@app.route('/')
|
||||
def homepage():
|
||||
return 'Home'
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True, host='0.0.0.0')
|
@ -0,0 +1,22 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"kproc"
|
||||
"log"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
p := kproc.ProcString("python flask_main.py")
|
||||
p.Start()
|
||||
time.Sleep(10 * time.Second)
|
||||
err := p.Terminate(syscall.SIGKILL)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
out, _ := exec.Command("lsof", "-i:5000").CombinedOutput()
|
||||
fmt.Println(string(out))
|
||||
}
|
Binary file not shown.
@ -0,0 +1,5 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
script: go test -v github.com/equinox-io/equinox github.com/equinox-io/equinox/proto
|
@ -0,0 +1,99 @@
|
||||
# equinox client SDK [![godoc reference](https://godoc.org/github.com/equinox-io/equinox?status.png)](https://godoc.org/github.com/equinox-io/equinox)
|
||||
|
||||
Package equinox allows applications to remotely update themselves with the [equinox.io](https://equinox.io) service.
|
||||
|
||||
## Minimal Working Example
|
||||
|
||||
```go
|
||||
import "github.com/equinox-io/equinox"
|
||||
|
||||
const appID = "<YOUR EQUINOX APP ID>"
|
||||
|
||||
var publicKey = []byte(`
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtrVmBxQvheRArXjg2vG1xIprWGuCyESx
|
||||
MMY8pjmjepSy2kuz+nl9aFLqmr+rDNdYvEBqQaZrYMc6k29gjvoQnQ==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
||||
|
||||
func update(channel string) error {
|
||||
opts := equinox.Options{Channel: channel}
|
||||
if err := opts.SetPublicKeyPEM(publicKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check for the update
|
||||
resp, err := equinox.Check(appID, opts)
|
||||
switch {
|
||||
case err == equinox.NotAvailableErr:
|
||||
fmt.Println("No update available, already at the latest version!")
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
// fetch the update and apply it
|
||||
err = resp.Apply()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Updated to new version: %s!\n", resp.ReleaseVersion)
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Update To Specific Version
|
||||
|
||||
When you specify a channel in the update options, equinox will try to update the application
|
||||
to the latest release of your application published to that channel. Instead, you may wish to
|
||||
update the application to a specific (possibly older) version. You can do this by explicitly setting
|
||||
Version in the Options struct:
|
||||
|
||||
```go
|
||||
opts := equinox.Options{Version: "0.1.2"}
|
||||
```
|
||||
|
||||
## Prompt For Update
|
||||
|
||||
You may wish to ask the user for approval before updating to a new version. This is as simple
|
||||
as calling the Check function and only calling Apply on the returned result if the user approves.
|
||||
Example:
|
||||
|
||||
```go
|
||||
// check for the update
|
||||
resp, err := equinox.Check(appID, opts)
|
||||
switch {
|
||||
case err == equinox.NotAvailableErr:
|
||||
fmt.Println("No update available, already at the latest version!")
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("New version available!")
|
||||
fmt.Println("Version:", resp.ReleaseVersion)
|
||||
fmt.Println("Name:", resp.ReleaseTitle)
|
||||
fmt.Println("Details:", resp.ReleaseDescription)
|
||||
|
||||
ok := prompt("Would you like to update?")
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.Apply()
|
||||
// ...
|
||||
```
|
||||
|
||||
## Generating Keys
|
||||
|
||||
All equinox releases must be signed with a private ECDSA key, and all updates verified with the
|
||||
public key portion. To do that, you'll need to generate a key pair. The equinox release tool can
|
||||
generate an ecdsa key pair for you easily:
|
||||
|
||||
```shell
|
||||
equinox genkey
|
||||
```
|
||||
|
@ -0,0 +1,92 @@
|
||||
/*
|
||||
Package equinox allows applications to remotely update themselves with the equinox.io service.
|
||||
|
||||
Minimal Working Example
|
||||
|
||||
import "github.com/equinox-io/equinox"
|
||||
|
||||
const appID = "<YOUR EQUINOX APP ID>"
|
||||
|
||||
var publicKey = []byte(`
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtrVmBxQvheRArXjg2vG1xIprWGuCyESx
|
||||
MMY8pjmjepSy2kuz+nl9aFLqmr+rDNdYvEBqQaZrYMc6k29gjvoQnQ==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
||||
|
||||
func update(channel string) error {
|
||||
opts := equinox.Options{Channel: channel}
|
||||
if err := opts.SetPublicKeyPEM(publicKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check for the update
|
||||
resp, err := equinox.Check(appID, opts)
|
||||
switch {
|
||||
case err == equinox.NotAvailableErr:
|
||||
fmt.Println("No update available, already at the latest version!")
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
// fetch the update and apply it
|
||||
err = resp.Apply()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Updated to new version: %s!\n", resp.ReleaseVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
Update To Specific Version
|
||||
|
||||
When you specify a channel in the update options, equinox will try to update the application
|
||||
to the latest release of your application published to that channel. Instead, you may wish to
|
||||
update the application to a specific (possibly older) version. You can do this by explicitly setting
|
||||
Version in the Options struct:
|
||||
|
||||
opts := equinox.Options{Version: "0.1.2"}
|
||||
|
||||
Prompt For Update
|
||||
|
||||
You may wish to ask the user for approval before updating to a new version. This is as simple
|
||||
as calling the Check function and only calling Apply on the returned result if the user approves.
|
||||
Example:
|
||||
|
||||
// check for the update
|
||||
resp, err := equinox.Check(appID, opts)
|
||||
switch {
|
||||
case err == equinox.NotAvailableErr:
|
||||
fmt.Println("No update available, already at the latest version!")
|
||||
return nil
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("New version available!")
|
||||
fmt.Println("Version:", resp.ReleaseVersion)
|
||||
fmt.Println("Name:", resp.ReleaseTitle)
|
||||
fmt.Println("Details:", resp.ReleaseDescription)
|
||||
|
||||
ok := prompt("Would you like to update?")
|
||||
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
err = resp.Apply()
|
||||
// ...
|
||||
|
||||
Generating Keys
|
||||
|
||||
All equinox releases must be signed with a private ECDSA key, and all updates verified with the
|
||||
public key portion. To do that, you'll need to generate a key pair. The equinox release tool can
|
||||
generate an ecdsa key pair for you easily:
|
||||
|
||||
equinox genkey
|
||||
|
||||
*/
|
||||
package equinox
|
@ -0,0 +1,13 @@
|
||||
Copyright 2015 Alan Shreve
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@ -0,0 +1,65 @@
|
||||
# go-update: Build self-updating Go programs [![godoc reference](https://godoc.org/github.com/inconshreveable/go-update?status.png)](https://godoc.org/github.com/inconshreveable/go-update)
|
||||
|
||||
Package update provides functionality to implement secure, self-updating Go programs (or other single-file targets)
|
||||
A program can update itself by replacing its executable file with a new version.
|
||||
|
||||
It provides the flexibility to implement different updating user experiences
|
||||
like auto-updating, or manual user-initiated updates. It also boasts
|
||||
advanced features like binary patching and code signing verification.
|
||||
|
||||
Example of updating from a URL:
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/inconshreveable/go-update"
|
||||
)
|
||||
|
||||
func doUpdate(url string) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err := update.Apply(resp.Body, update.Options{})
|
||||
if err != nil {
|
||||
// error handling
|
||||
}
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- Cross platform support (Windows too!)
|
||||
- Binary patch application
|
||||
- Checksum verification
|
||||
- Code signing verification
|
||||
- Support for updating arbitrary files
|
||||
|
||||
## [equinox.io](https://equinox.io)
|
||||
[equinox.io](https://equinox.io) is a complete ready-to-go updating solution built on top of go-update that provides:
|
||||
|
||||
- Hosted updates
|
||||
- Update channels (stable, beta, nightly, ...)
|
||||
- Dynamically computed binary diffs
|
||||
- Automatic key generation and code
|
||||
- Release tooling with proper code signing
|
||||
- Update/download metrics
|
||||
|
||||
## API Compatibility Promises
|
||||
The master branch of `go-update` is *not* guaranteed to have a stable API over time. For any production application, you should vendor
|
||||
your dependency on `go-update` with a tool like git submodules, [gb](http://getgb.io/) or [govendor](https://github.com/kardianos/govendor).
|
||||
|
||||
The `go-update` package makes the following promises about API compatibility:
|
||||
1. A list of all API-breaking changes will be documented in this README.
|
||||
1. `go-update` will strive for as few API-breaking changes as possible.
|
||||
|
||||
## API Breaking Changes
|
||||
- **Sept 3, 2015**: The `Options` struct passed to `Apply` was changed to be passed by value instead of passed by pointer. Old API at `28de026`.
|
||||
- **Aug 9, 2015**: 2.0 API. Old API at `221d034` or `gopkg.in/inconshreveable/go-update.v0`.
|
||||
|
||||
## License
|
||||
Apache
|
@ -0,0 +1,322 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/equinox-io/equinox/internal/go-update/internal/osext"
|
||||
)
|
||||
|
||||
var (
|
||||
openFile = os.OpenFile
|
||||
)
|
||||
|
||||
// Apply performs an update of the current executable (or opts.TargetFile, if set) with the contents of the given io.Reader.
|
||||
//
|
||||
// Apply performs the following actions to ensure a safe cross-platform update:
|
||||
//
|
||||
// 1. If configured, applies the contents of the update io.Reader as a binary patch.
|
||||
//
|
||||
// 2. If configured, computes the checksum of the new executable and verifies it matches.
|
||||
//
|
||||
// 3. If configured, verifies the signature with a public key.
|
||||
//
|
||||
// 4. Creates a new file, /path/to/.target.new with the TargetMode with the contents of the updated file
|
||||
//
|
||||
// 5. Renames /path/to/target to /path/to/.target.old
|
||||
//
|
||||
// 6. Renames /path/to/.target.new to /path/to/target
|
||||
//
|
||||
// 7. If the final rename is successful, deletes /path/to/.target.old, returns no error. On Windows,
|
||||
// the removal of /path/to/target.old always fails, so instead Apply hides the old file instead.
|
||||
//
|
||||
// 8. If the final rename fails, attempts to roll back by renaming /path/to/.target.old
|
||||
// back to /path/to/target.
|
||||
//
|
||||
// If the roll back operation fails, the file system is left in an inconsistent state (betweet steps 5 and 6) where
|
||||
// there is no new executable file and the old executable file could not be be moved to its original location. In this
|
||||
// case you should notify the user of the bad news and ask them to recover manually. Applications can determine whether
|
||||
// the rollback failed by calling RollbackError, see the documentation on that function for additional detail.
|
||||
func Apply(update io.Reader, opts Options) error {
|
||||
// validate
|
||||
verify := false
|
||||
switch {
|
||||
case opts.Signature != nil && opts.PublicKey != nil:
|
||||
// okay
|
||||
verify = true
|
||||
case opts.Signature != nil:
|
||||
return errors.New("no public key to verify signature with")
|
||||
case opts.PublicKey != nil:
|
||||
return errors.New("No signature to verify with")
|
||||
}
|
||||
|
||||
// set defaults
|
||||
if opts.Hash == 0 {
|
||||
opts.Hash = crypto.SHA256
|
||||
}
|
||||
if opts.Verifier == nil {
|
||||
opts.Verifier = NewECDSAVerifier()
|
||||
}
|
||||
if opts.TargetMode == 0 {
|
||||
opts.TargetMode = 0755
|
||||
}
|
||||
|
||||
// get target path
|
||||
var err error
|
||||
opts.TargetPath, err = opts.getPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var newBytes []byte
|
||||
if opts.Patcher != nil {
|
||||
if newBytes, err = opts.applyPatch(update); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// no patch to apply, go on through
|
||||
if newBytes, err = ioutil.ReadAll(update); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// verify checksum if requested
|
||||
if opts.Checksum != nil {
|
||||
if err = opts.verifyChecksum(newBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if verify {
|
||||
if err = opts.verifySignature(newBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// get the directory the executable exists in
|
||||
updateDir := filepath.Dir(opts.TargetPath)
|
||||
filename := filepath.Base(opts.TargetPath)
|
||||
|
||||
// Copy the contents of newbinary to a new executable file
|
||||
newPath := filepath.Join(updateDir, fmt.Sprintf(".%s.new", filename))
|
||||
fp, err := openFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, opts.TargetMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fp.Close()
|
||||
|
||||
_, err = io.Copy(fp, bytes.NewReader(newBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we don't call fp.Close(), windows won't let us move the new executable
|
||||
// because the file will still be "in use"
|
||||
fp.Close()
|
||||
|
||||
// this is where we'll move the executable to so that we can swap in the updated replacement
|
||||
oldPath := opts.OldSavePath
|
||||
removeOld := opts.OldSavePath == ""
|
||||
if removeOld {
|
||||
oldPath = filepath.Join(updateDir, fmt.Sprintf(".%s.old", filename))
|
||||
}
|
||||
|
||||
// delete any existing old exec file - this is necessary on Windows for two reasons:
|
||||
// 1. after a successful update, Windows can't remove the .old file because the process is still running
|
||||
// 2. windows rename operations fail if the destination file already exists
|
||||
_ = os.Remove(oldPath)
|
||||
|
||||
// move the existing executable to a new file in the same directory
|
||||
err = os.Rename(opts.TargetPath, oldPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// move the new exectuable in to become the new program
|
||||
err = os.Rename(newPath, opts.TargetPath)
|
||||
|
||||
if err != nil {
|
||||
// move unsuccessful
|
||||
//
|
||||
// The filesystem is now in a bad state. We have successfully
|
||||
// moved the existing binary to a new location, but we couldn't move the new
|
||||
// binary to take its place. That means there is no file where the current executable binary
|
||||
// used to be!
|
||||
// Try to rollback by restoring the old binary to its original path.
|
||||
rerr := os.Rename(oldPath, opts.TargetPath)
|
||||
if rerr != nil {
|
||||
return &rollbackErr{err, rerr}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// move successful, remove the old binary if needed
|
||||
if removeOld {
|
||||
errRemove := os.Remove(oldPath)
|
||||
|
||||
// windows has trouble with removing old binaries, so hide it instead
|
||||
if errRemove != nil {
|
||||
_ = hideFile(oldPath)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RollbackError takes an error value returned by Apply and returns the error, if any,
|
||||
// that occurred when attempting to roll back from a failed update. Applications should
|
||||
// always call this function on any non-nil errors returned by Apply.
|
||||
//
|
||||
// If no rollback was needed or if the rollback was successful, RollbackError returns nil,
|
||||
// otherwise it returns the error encountered when trying to roll back.
|
||||
func RollbackError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if rerr, ok := err.(*rollbackErr); ok {
|
||||
return rerr.rollbackErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type rollbackErr struct {
|
||||
error // original error
|
||||
rollbackErr error // error encountered while rolling back
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
// TargetPath defines the path to the file to update.
|
||||
// The emptry string means 'the executable file of the running program'.
|
||||
TargetPath string
|
||||
|
||||
// Create TargetPath replacement with this file mode. If zero, defaults to 0755.
|
||||
TargetMode os.FileMode
|
||||
|
||||
// Checksum of the new binary to verify against. If nil, no checksum or signature verification is done.
|
||||
Checksum []byte
|
||||
|
||||
// Public key to use for signature verification. If nil, no signature verification is done.
|
||||
PublicKey crypto.PublicKey
|
||||
|
||||
// Signature to verify the updated file. If nil, no signature verification is done.
|
||||
Signature []byte
|
||||
|
||||
// Pluggable signature verification algorithm. If nil, ECDSA is used.
|
||||
Verifier Verifier
|
||||
|
||||
// Use this hash function to generate the checksum. If not set, SHA256 is used.
|
||||
Hash crypto.Hash
|
||||
|
||||
// If nil, treat the update as a complete replacement for the contents of the file at TargetPath.
|
||||
// If non-nil, treat the update contents as a patch and use this object to apply the patch.
|
||||
Patcher Patcher
|
||||
|
||||
// Store the old executable file at this path after a successful update.
|
||||
// The empty string means the old executable file will be removed after the update.
|
||||
OldSavePath string
|
||||
}
|
||||
|
||||
// CheckPermissions determines whether the process has the correct permissions to
|
||||
// perform the requested update. If the update can proceed, it returns nil, otherwise
|
||||
// it returns the error that would occur if an update were attempted.
|
||||
func (o *Options) CheckPermissions() error {
|
||||
// get the directory the file exists in
|
||||
path, err := o.getPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileDir := filepath.Dir(path)
|
||||
fileName := filepath.Base(path)
|
||||
|
||||
// attempt to open a file in the file's directory
|
||||
newPath := filepath.Join(fileDir, fmt.Sprintf(".%s.new", fileName))
|
||||
fp, err := openFile(newPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, o.TargetMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fp.Close()
|
||||
|
||||
_ = os.Remove(newPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetPublicKeyPEM is a convenience method to set the PublicKey property
|
||||
// used for checking a completed update's signature by parsing a
|
||||
// Public Key formatted as PEM data.
|
||||
func (o *Options) SetPublicKeyPEM(pembytes []byte) error {
|
||||
block, _ := pem.Decode(pembytes)
|
||||
if block == nil {
|
||||
return errors.New("couldn't parse PEM data")
|
||||
}
|
||||
|
||||
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.PublicKey = pub
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Options) getPath() (string, error) {
|
||||
if o.TargetPath == "" {
|
||||
return osext.Executable()
|
||||
} else {
|
||||
return o.TargetPath, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Options) applyPatch(patch io.Reader) ([]byte, error) {
|
||||
// open the file to patch
|
||||
old, err := os.Open(o.TargetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer old.Close()
|
||||
|
||||
// apply the patch
|
||||
var applied bytes.Buffer
|
||||
if err = o.Patcher.Patch(old, &applied, patch); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return applied.Bytes(), nil
|
||||
}
|
||||
|
||||
func (o *Options) verifyChecksum(updated []byte) error {
|
||||
checksum, err := checksumFor(o.Hash, updated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(o.Checksum, checksum) {
|
||||
return fmt.Errorf("Updated file has wrong checksum. Expected: %x, got: %x", o.Checksum, checksum)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *Options) verifySignature(updated []byte) error {
|
||||
checksum, err := checksumFor(o.Hash, updated)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return o.Verifier.VerifySignature(checksum, o.Signature, o.Hash, o.PublicKey)
|
||||
}
|
||||
|
||||
func checksumFor(h crypto.Hash, payload []byte) ([]byte, error) {
|
||||
if !h.Available() {
|
||||
return nil, errors.New("requested hash function not available")
|
||||
}
|
||||
hash := h.New()
|
||||
hash.Write(payload) // guaranteed not to error
|
||||
return hash.Sum([]byte{}), nil
|
||||
}
|
@ -0,0 +1,426 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/equinox-io/equinox/internal/go-update/internal/binarydist"
|
||||
)
|
||||
|
||||
var (
|
||||
oldFile = []byte{0xDE, 0xAD, 0xBE, 0xEF}
|
||||
newFile = []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06}
|
||||
newFileChecksum = sha256.Sum256(newFile)
|
||||
)
|
||||
|
||||
func cleanup(path string) {
|
||||
os.Remove(path)
|
||||
os.Remove(fmt.Sprintf(".%s.new", path))
|
||||
}
|
||||
|
||||
// we write with a separate name for each test so that we can run them in parallel
|
||||
func writeOldFile(path string, t *testing.T) {
|
||||
if err := ioutil.WriteFile(path, oldFile, 0777); err != nil {
|
||||
t.Fatalf("Failed to write file for testing preparation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func validateUpdate(path string, err error, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update: %v", err)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file post-update: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf, newFile) {
|
||||
t.Fatalf("File was not updated! Bytes read: %v, Bytes expected: %v", buf, newFile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplySimple(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestApplySimple"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestApplyOldSavePath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestApplyOldSavePath"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
oldfName := "OldSavePath"
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
OldSavePath: oldfName,
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
|
||||
if _, err := os.Stat(oldfName); os.IsNotExist(err) {
|
||||
t.Fatalf("Failed to find the old file: %v", err)
|
||||
}
|
||||
|
||||
cleanup(oldfName)
|
||||
}
|
||||
|
||||
func TestVerifyChecksum(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyChecksum"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
Checksum: newFileChecksum[:],
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestVerifyChecksumNegative(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyChecksumNegative"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
badChecksum := []byte{0x0A, 0x0B, 0x0C, 0xFF}
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
Checksum: badChecksum,
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to detect bad checksum!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyPatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestApplyPatch"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
patch := new(bytes.Buffer)
|
||||
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(newFile), patch)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create patch: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(patch, Options{
|
||||
TargetPath: fName,
|
||||
Patcher: NewBSDiffPatcher(),
|
||||
})
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestCorruptPatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestCorruptPatch"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
badPatch := []byte{0x44, 0x38, 0x86, 0x3c, 0x4f, 0x8d, 0x26, 0x54, 0xb, 0x11, 0xce, 0xfe, 0xc1, 0xc0, 0xf8, 0x31, 0x38, 0xa0, 0x12, 0x1a, 0xa2, 0x57, 0x2a, 0xe1, 0x3a, 0x48, 0x62, 0x40, 0x2b, 0x81, 0x12, 0xb1, 0x21, 0xa5, 0x16, 0xed, 0x73, 0xd6, 0x54, 0x84, 0x29, 0xa6, 0xd6, 0xb2, 0x1b, 0xfb, 0xe6, 0xbe, 0x7b, 0x70}
|
||||
err := Apply(bytes.NewReader(badPatch), Options{
|
||||
TargetPath: fName,
|
||||
Patcher: NewBSDiffPatcher(),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to detect corrupt patch!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyChecksumPatchNegative(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyChecksumPatchNegative"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
patch := new(bytes.Buffer)
|
||||
anotherFile := []byte{0x77, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
|
||||
err := binarydist.Diff(bytes.NewReader(oldFile), bytes.NewReader(anotherFile), patch)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create patch: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(patch, Options{
|
||||
TargetPath: fName,
|
||||
Checksum: newFileChecksum[:],
|
||||
Patcher: NewBSDiffPatcher(),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Failed to detect patch to wrong file!")
|
||||
}
|
||||
}
|
||||
|
||||
const ecdsaPublicKey = `
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEL8ThbSyEucsCxnd4dCZR2hIy5nea54ko
|
||||
O+jUUfIjkvwhCWzASm0lpCVdVpXKZXIe+NZ+44RQRv3+OqJkCCGzUgJkPNI3lxdG
|
||||
9zu8rbrnxISV06VQ8No7Ei9wiTpqmTBB
|
||||
-----END PUBLIC KEY-----
|
||||
`
|
||||
|
||||
const ecdsaPrivateKey = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDBttCB/1NOY4T+WrG4FSV49Ayn3gK1DNzfGaJ01JUXeiNFCWQM2pqpU
|
||||
om8ATPP/dkegBwYFK4EEACKhZANiAAQvxOFtLIS5ywLGd3h0JlHaEjLmd5rniSg7
|
||||
6NRR8iOS/CEJbMBKbSWkJV1Wlcplch741n7jhFBG/f46omQIIbNSAmQ80jeXF0b3
|
||||
O7ytuufEhJXTpVDw2jsSL3CJOmqZMEE=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
|
||||
const rsaPublicKey = `
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxSWmu7trWKAwDFjiCN2D
|
||||
Tk2jj2sgcr/CMlI4cSSiIOHrXCFxP1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKab
|
||||
b9ead+kD0kxk7i2bFYvKX43oq66IW0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4
|
||||
y20C59dPr9Dpcz8DZkdLsBV6YKF6Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjT
|
||||
x4xRnjgTRRRlZvRtALHMUkIChgxDOhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv5
|
||||
5fhJ08Rz7mmZmtH5JxTK5XTquo59sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7Nrf
|
||||
fQIDAQAB
|
||||
-----END PUBLIC KEY-----`
|
||||
|
||||
const rsaPrivateKey = `
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAxSWmu7trWKAwDFjiCN2DTk2jj2sgcr/CMlI4cSSiIOHrXCFx
|
||||
P1I8i9PvQkd4hasXQrLbT5WXKrRGv1HKUKabb9ead+kD0kxk7i2bFYvKX43oq66I
|
||||
W0mOLTQBO7I9UyT4L7svcMD+HUQ2BqHoaQe4y20C59dPr9Dpcz8DZkdLsBV6YKF6
|
||||
Ieb3iGk8oRLMWNaUqPa8f1BGgxAkvPHcqDjTx4xRnjgTRRRlZvRtALHMUkIChgxD
|
||||
OhoEzKpGiqnX7HtMJfrhV6h0PAXNA4h9Kjv55fhJ08Rz7mmZmtH5JxTK5XTquo59
|
||||
sihSajR4bSjZbbkQ1uLkeFlY3eli3xdQ7NrffQIDAQABAoIBAAkN+6RvrTR61voa
|
||||
Mvd5RQiZpEN4Bht/Fyo8gH8h0Zh1B9xJZOwlmMZLS5fdtHlfLEhR8qSrGDBL61vq
|
||||
I8KkhEsUufF78EL+YzxVN+Q7cWYGHIOWFokqza7hzpSxUQO6lPOMQ1eIZaNueJTB
|
||||
Zu07/47ISPPg/bXzgGVcpYlTCPTjUwKjtfyMqvX9AD7fIyYRm6zfE7EHj1J2sBFt
|
||||
Yz1OGELg6HfJwXfpnPfBvftD0hWGzJ78Bp71fPJe6n5gnqmSqRvrcXNWFnH/yqkN
|
||||
d6vPIxD6Z3LjvyZpkA7JillLva2L/zcIFhg4HZvQnWd8/PpDnUDonu36hcj4SC5j
|
||||
W4aVPLkCgYEA4XzNKWxqYcajzFGZeSxlRHupSAl2MT7Cc5085MmE7dd31wK2T8O4
|
||||
n7N4bkm/rjTbX85NsfWdKtWb6mpp8W3VlLP0rp4a/12OicVOkg4pv9LZDmY0sRlE
|
||||
YuDJk1FeCZ50UrwTZI3rZ9IhZHhkgVA6uWAs7tYndONkxNHG0pjqs4sCgYEA39MZ
|
||||
JwMqo3qsPntpgP940cCLflEsjS9hYNO3+Sv8Dq3P0HLVhBYajJnotf8VuU0fsQZG
|
||||
grmtVn1yThFbMq7X1oY4F0XBA+paSiU18c4YyUnwax2u4sw9U/Q9tmQUZad5+ueT
|
||||
qriMBwGv+ewO+nQxqvAsMUmemrVzrfwA5Oct+hcCgYAfiyXoNZJsOy2O15twqBVC
|
||||
j0oPGcO+/9iT89sg5lACNbI+EdMPNYIOVTzzsL1v0VUfAe08h++Enn1BPcG0VHkc
|
||||
ZFBGXTfJoXzfKQrkw7ZzbzuOGB4m6DH44xlP0oIlNlVvfX/5ASF9VJf3RiBJNsAA
|
||||
TsP6ZVr/rw/ZuL7nlxy+IQKBgDhL/HOXlE3yOQiuOec8WsNHTs7C1BXe6PtVxVxi
|
||||
988pYK/pclL6zEq5G5NLSceF4obAMVQIJ9UtUGbabrncyGUo9UrFPLsjYvprSZo8
|
||||
YHegpVwL50UcYgCP2kXZ/ldjPIcjYDz8lhvdDMor2cidGTEJn9P11HLNWP9V91Ob
|
||||
4jCZAoGAPNRSC5cC8iP/9j+s2/kdkfWJiNaolPYAUrmrkL6H39PYYZM5tnhaIYJV
|
||||
Oh9AgABamU0eb3p3vXTISClVgV7ifq1HyZ7BSUhMfaY2Jk/s3sUHCWFxPZe9sgEG
|
||||
KinIY/373KIkIV/5g4h2v1w330IWcfptxKcY/Er3DJr38f695GE=
|
||||
-----END RSA PRIVATE KEY-----`
|
||||
|
||||
func signec(privatePEM string, source []byte, t *testing.T) []byte {
|
||||
parseFn := func(p []byte) (crypto.Signer, error) { return x509.ParseECPrivateKey(p) }
|
||||
return sign(parseFn, privatePEM, source, t)
|
||||
}
|
||||
|
||||
func signrsa(privatePEM string, source []byte, t *testing.T) []byte {
|
||||
parseFn := func(p []byte) (crypto.Signer, error) { return x509.ParsePKCS1PrivateKey(p) }
|
||||
return sign(parseFn, privatePEM, source, t)
|
||||
}
|
||||
|
||||
func sign(parsePrivKey func([]byte) (crypto.Signer, error), privatePEM string, source []byte, t *testing.T) []byte {
|
||||
block, _ := pem.Decode([]byte(privatePEM))
|
||||
if block == nil {
|
||||
t.Fatalf("Failed to parse private key PEM")
|
||||
}
|
||||
|
||||
priv, err := parsePrivKey(block.Bytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse private key DER: %v", err)
|
||||
}
|
||||
|
||||
checksum := sha256.Sum256(source)
|
||||
sig, err := priv.Sign(rand.Reader, checksum[:], crypto.SHA256)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to sign: %v", sig)
|
||||
}
|
||||
|
||||
return sig
|
||||
}
|
||||
|
||||
func TestVerifyECSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifySignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
opts.Signature = signec(ecdsaPrivateKey, newFile, t)
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestVerifyRSASignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifySignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{
|
||||
TargetPath: fName,
|
||||
Verifier: NewRSAVerifier(),
|
||||
}
|
||||
err := opts.SetPublicKeyPEM([]byte(rsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
opts.Signature = signrsa(rsaPrivateKey, newFile, t)
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
validateUpdate(fName, err, t)
|
||||
}
|
||||
|
||||
func TestVerifyFailBadSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyFailBadSignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{
|
||||
TargetPath: fName,
|
||||
Signature: []byte{0xFF, 0xEE, 0xDD, 0xCC, 0xBB, 0xAA},
|
||||
}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Did not fail with bad signature")
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyFailNoSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifySignatureWithPEM"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Did not fail with empty signature")
|
||||
}
|
||||
}
|
||||
|
||||
const wrongKey = `
|
||||
-----BEGIN EC PRIVATE KEY-----
|
||||
MIGkAgEBBDBzqYp6N2s8YWYifBjS03/fFfmGeIPcxQEi+bbFeekIYt8NIKIkhD+r
|
||||
hpaIwSmot+qgBwYFK4EEACKhZANiAAR0EC8Usbkc4k30frfEB2ECmsIghu9DJSqE
|
||||
RbH7jfq2ULNv8tN/clRjxf2YXgp+iP3SQF1R1EYERKpWr8I57pgfIZtoZXjwpbQC
|
||||
VBbP/Ff+05HOqwPC7rJMy1VAJLKg7Cw=
|
||||
-----END EC PRIVATE KEY-----
|
||||
`
|
||||
|
||||
func TestVerifyFailWrongSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestVerifyFailWrongSignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey))
|
||||
if err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
|
||||
opts.Signature = signec(wrongKey, newFile, t)
|
||||
err = Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Verified an update that was signed by an untrusted key!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignatureButNoPublicKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestSignatureButNoPublicKey"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{
|
||||
TargetPath: fName,
|
||||
Signature: signec(ecdsaPrivateKey, newFile, t),
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("Allowed an update with a signautre verification when no public key was specified!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicKeyButNoSignature(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fName := "TestPublicKeyButNoSignature"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
opts := Options{TargetPath: fName}
|
||||
if err := opts.SetPublicKeyPEM([]byte(ecdsaPublicKey)); err != nil {
|
||||
t.Fatalf("Could not parse public key: %v", err)
|
||||
}
|
||||
err := Apply(bytes.NewReader(newFile), opts)
|
||||
if err == nil {
|
||||
t.Fatalf("Allowed an update with no signautre when a public key was specified!")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteError(t *testing.T) {
|
||||
t.Parallel()
|
||||
fName := "TestWriteError"
|
||||
defer cleanup(fName)
|
||||
writeOldFile(fName, t)
|
||||
|
||||
openFile = func(name string, flags int, perm os.FileMode) (*os.File, error) {
|
||||
f, err := os.OpenFile(name, flags, perm)
|
||||
|
||||
// simulate Write() error by closing the file prematurely
|
||||
f.Close()
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
err := Apply(bytes.NewReader(newFile), Options{TargetPath: fName})
|
||||
if err == nil {
|
||||
t.Fatalf("Allowed an update to an empty file")
|
||||
}
|
||||
}
|
@ -0,0 +1,172 @@
|
||||
/*
|
||||
Package update provides functionality to implement secure, self-updating Go programs (or other single-file targets).
|
||||
|
||||
For complete updating solutions please see Equinox (https://equinox.io) and go-tuf (https://github.com/flynn/go-tuf).
|
||||
|
||||
Basic Example
|
||||
|
||||
This example shows how to update a program remotely from a URL.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/inconshreveable/go-update"
|
||||
)
|
||||
|
||||
func doUpdate(url string) error {
|
||||
// request the new file
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
err := update.Apply(resp.Body, update.Options{})
|
||||
if err != nil {
|
||||
if rerr := update.RollbackError(err); rerr != nil {
|
||||
fmt.Println("Failed to rollback from bad update: %v", rerr)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
Binary Patching
|
||||
|
||||
Go binaries can often be large. It can be advantageous to only ship a binary patch to a client
|
||||
instead of the complete program text of a new version.
|
||||
|
||||
This example shows how to update a program with a bsdiff binary patch. Other patch formats
|
||||
may be applied by implementing the Patcher interface.
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
"github.com/inconshreveable/go-update"
|
||||
)
|
||||
|
||||
func updateWithPatch(patch io.Reader) error {
|
||||
err := update.Apply(patch, update.Options{
|
||||
Patcher: update.NewBSDiffPatcher()
|
||||
})
|
||||
if err != nil {
|
||||
// error handling
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
Checksum Verification
|
||||
|
||||
Updating executable code on a computer can be a dangerous operation unless you
|
||||
take the appropriate steps to guarantee the authenticity of the new code. While
|
||||
checksum verification is important, it should always be combined with signature
|
||||
verification (next section) to guarantee that the code came from a trusted party.
|
||||
|
||||
go-update validates SHA256 checksums by default, but this is pluggable via the Hash
|
||||
property on the Options struct.
|
||||
|
||||
This example shows how to guarantee that the newly-updated binary is verified to
|
||||
have an appropriate checksum (that was otherwise retrived via a secure channel)
|
||||
specified as a hex string.
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
_ "crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
"github.com/inconshreveable/go-update"
|
||||
)
|
||||
|
||||
func updateWithChecksum(binary io.Reader, hexChecksum string) error {
|
||||
checksum, err := hex.DecodeString(hexChecksum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = update.Apply(binary, update.Options{
|
||||
Hash: crypto.SHA256, // this is the default, you don't need to specify it
|
||||
Checksum: checksum,
|
||||
})
|
||||
if err != nil {
|
||||
// error handling
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
Cryptographic Signature Verification
|
||||
|
||||
Cryptographic verification of new code from an update is an extremely important way to guarantee the
|
||||
security and integrity of your updates.
|
||||
|
||||
Verification is performed by validating the signature of a hash of the new file. This
|
||||
means nothing changes if you apply your update with a patch.
|
||||
|
||||
This example shows how to add signature verification to your updates. To make all of this work
|
||||
an application distributor must first create a public/private key pair and embed the public key
|
||||
into their application. When they issue a new release, the issuer must sign the new executable file
|
||||
with the private key and distribute the signature along with the update.
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
_ "crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
|
||||
"github.com/inconshreveable/go-update"
|
||||
)
|
||||
|
||||
var publicKey = []byte(`
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFYwEAYHKoZIzj0CAQYFK4EEAAoDQgAEtrVmBxQvheRArXjg2vG1xIprWGuCyESx
|
||||
MMY8pjmjepSy2kuz+nl9aFLqmr+rDNdYvEBqQaZrYMc6k29gjvoQnQ==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
||||
|
||||
func verifiedUpdate(binary io.Reader, hexChecksum, hexSignature string) {
|
||||
checksum, err := hex.DecodeString(hexChecksum)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signature, err := hex.DecodeString(hexSignature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := update.Options{
|
||||
Checksum: checksum,
|
||||
Signature: signature,
|
||||
Hash: crypto.SHA256, // this is the default, you don't need to specify it
|
||||
Verifier: update.NewECDSAVerifier(), // this is the default, you don't need to specify it
|
||||
}
|
||||
err = opts.SetPublicKeyPEM(publicKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = update.Apply(binary, opts)
|
||||
if err != nil {
|
||||
// error handling
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
Building Single-File Go Binaries
|
||||
|
||||
In order to update a Go application with go-update, you must distributed it as a single executable.
|
||||
This is often easy, but some applications require static assets (like HTML and CSS asset files or TLS certificates).
|
||||
In order to update applications like these, you'll want to make sure to embed those asset files into
|
||||
the distributed binary with a tool like go-bindata (my favorite): https://github.com/jteeuwen/go-bindata
|
||||
|
||||
Non-Goals
|
||||
|
||||
Mechanisms and protocols for determining whether an update should be applied and, if so, which one are
|
||||
out of scope for this package. Please consult go-tuf (https://github.com/flynn/go-tuf) or Equinox (https://equinox.io)
|
||||
for more complete solutions.
|
||||
|
||||
go-update only works for self-updating applications that are distributed as a single binary, i.e.
|
||||
applications that do not have additional assets or dependency files.
|
||||
Updating application that are distributed as mutliple on-disk files is out of scope, although this
|
||||
may change in future versions of this library.
|
||||
|
||||
*/
|
||||
package update
|
@ -0,0 +1,7 @@
|
||||
// +build !windows
|
||||
|
||||
package update
|
||||
|
||||
func hideFile(path string) error {
|
||||
return nil
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func hideFile(path string) error {
|
||||
kernel32 := syscall.NewLazyDLL("kernel32.dll")
|
||||
setFileAttributes := kernel32.NewProc("SetFileAttributesW")
|
||||
|
||||
r1, _, err := setFileAttributes.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), 2)
|
||||
|
||||
if r1 == 0 {
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
22
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/License
generated
vendored
22
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/License
generated
vendored
@ -0,0 +1,22 @@
|
||||
Copyright 2012 Keith Rarick
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
@ -0,0 +1,7 @@
|
||||
# binarydist
|
||||
|
||||
Package binarydist implements binary diff and patch as described on
|
||||
<http://www.daemonology.net/bsdiff/>. It reads and writes files
|
||||
compatible with the tools there.
|
||||
|
||||
Documentation at <http://go.pkgdoc.org/github.com/kr/binarydist>.
|
40
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/bzip2.go
generated
vendored
40
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/bzip2.go
generated
vendored
@ -0,0 +1,40 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
type bzip2Writer struct {
|
||||
c *exec.Cmd
|
||||
w io.WriteCloser
|
||||
}
|
||||
|
||||
func (w bzip2Writer) Write(b []byte) (int, error) {
|
||||
return w.w.Write(b)
|
||||
}
|
||||
|
||||
func (w bzip2Writer) Close() error {
|
||||
if err := w.w.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.c.Wait()
|
||||
}
|
||||
|
||||
// Package compress/bzip2 implements only decompression,
|
||||
// so we'll fake it by running bzip2 in another process.
|
||||
func newBzip2Writer(w io.Writer) (wc io.WriteCloser, err error) {
|
||||
var bw bzip2Writer
|
||||
bw.c = exec.Command("bzip2", "-c")
|
||||
bw.c.Stdout = w
|
||||
|
||||
if bw.w, err = bw.c.StdinPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = bw.c.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bw, nil
|
||||
}
|
@ -0,0 +1,93 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
func mustOpen(path string) *os.File {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
func mustReadAll(r io.Reader) []byte {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func fileCmp(a, b *os.File) int64 {
|
||||
sa, err := a.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
sb, err := b.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if sa != sb {
|
||||
return sa
|
||||
}
|
||||
|
||||
_, err = a.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = b.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pa, err := ioutil.ReadAll(a)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pb, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for i := range pa {
|
||||
if pa[i] != pb[i] {
|
||||
return int64(i)
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func mustWriteRandFile(path string, size int) *os.File {
|
||||
p := make([]byte, size)
|
||||
_, err := rand.Read(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = f.Write(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
_, err = f.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
408
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/diff.go
generated
vendored
408
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/diff.go
generated
vendored
@ -0,0 +1,408 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
func swap(a []int, i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
func split(I, V []int, start, length, h int) {
|
||||
var i, j, k, x, jj, kk int
|
||||
|
||||
if length < 16 {
|
||||
for k = start; k < start+length; k += j {
|
||||
j = 1
|
||||
x = V[I[k]+h]
|
||||
for i = 1; k+i < start+length; i++ {
|
||||
if V[I[k+i]+h] < x {
|
||||
x = V[I[k+i]+h]
|
||||
j = 0
|
||||
}
|
||||
if V[I[k+i]+h] == x {
|
||||
swap(I, k+i, k+j)
|
||||
j++
|
||||
}
|
||||
}
|
||||
for i = 0; i < j; i++ {
|
||||
V[I[k+i]] = k + j - 1
|
||||
}
|
||||
if j == 1 {
|
||||
I[k] = -1
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
x = V[I[start+length/2]+h]
|
||||
jj = 0
|
||||
kk = 0
|
||||
for i = start; i < start+length; i++ {
|
||||
if V[I[i]+h] < x {
|
||||
jj++
|
||||
}
|
||||
if V[I[i]+h] == x {
|
||||
kk++
|
||||
}
|
||||
}
|
||||
jj += start
|
||||
kk += jj
|
||||
|
||||
i = start
|
||||
j = 0
|
||||
k = 0
|
||||
for i < jj {
|
||||
if V[I[i]+h] < x {
|
||||
i++
|
||||
} else if V[I[i]+h] == x {
|
||||
swap(I, i, jj+j)
|
||||
j++
|
||||
} else {
|
||||
swap(I, i, kk+k)
|
||||
k++
|
||||
}
|
||||
}
|
||||
|
||||
for jj+j < kk {
|
||||
if V[I[jj+j]+h] == x {
|
||||
j++
|
||||
} else {
|
||||
swap(I, jj+j, kk+k)
|
||||
k++
|
||||
}
|
||||
}
|
||||
|
||||
if jj > start {
|
||||
split(I, V, start, jj-start, h)
|
||||
}
|
||||
|
||||
for i = 0; i < kk-jj; i++ {
|
||||
V[I[jj+i]] = kk - 1
|
||||
}
|
||||
if jj == kk-1 {
|
||||
I[jj] = -1
|
||||
}
|
||||
|
||||
if start+length > kk {
|
||||
split(I, V, kk, start+length-kk, h)
|
||||
}
|
||||
}
|
||||
|
||||
func qsufsort(obuf []byte) []int {
|
||||
var buckets [256]int
|
||||
var i, h int
|
||||
I := make([]int, len(obuf)+1)
|
||||
V := make([]int, len(obuf)+1)
|
||||
|
||||
for _, c := range obuf {
|
||||
buckets[c]++
|
||||
}
|
||||
for i = 1; i < 256; i++ {
|
||||
buckets[i] += buckets[i-1]
|
||||
}
|
||||
copy(buckets[1:], buckets[:])
|
||||
buckets[0] = 0
|
||||
|
||||
for i, c := range obuf {
|
||||
buckets[c]++
|
||||
I[buckets[c]] = i
|
||||
}
|
||||
|
||||
I[0] = len(obuf)
|
||||
for i, c := range obuf {
|
||||
V[i] = buckets[c]
|
||||
}
|
||||
|
||||
V[len(obuf)] = 0
|
||||
for i = 1; i < 256; i++ {
|
||||
if buckets[i] == buckets[i-1]+1 {
|
||||
I[buckets[i]] = -1
|
||||
}
|
||||
}
|
||||
I[0] = -1
|
||||
|
||||
for h = 1; I[0] != -(len(obuf) + 1); h += h {
|
||||
var n int
|
||||
for i = 0; i < len(obuf)+1; {
|
||||
if I[i] < 0 {
|
||||
n -= I[i]
|
||||
i -= I[i]
|
||||
} else {
|
||||
if n != 0 {
|
||||
I[i-n] = -n
|
||||
}
|
||||
n = V[I[i]] + 1 - i
|
||||
split(I, V, i, n, h)
|
||||
i += n
|
||||
n = 0
|
||||
}
|
||||
}
|
||||
if n != 0 {
|
||||
I[i-n] = -n
|
||||
}
|
||||
}
|
||||
|
||||
for i = 0; i < len(obuf)+1; i++ {
|
||||
I[V[i]] = i
|
||||
}
|
||||
return I
|
||||
}
|
||||
|
||||
func matchlen(a, b []byte) (i int) {
|
||||
for i < len(a) && i < len(b) && a[i] == b[i] {
|
||||
i++
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func search(I []int, obuf, nbuf []byte, st, en int) (pos, n int) {
|
||||
if en-st < 2 {
|
||||
x := matchlen(obuf[I[st]:], nbuf)
|
||||
y := matchlen(obuf[I[en]:], nbuf)
|
||||
|
||||
if x > y {
|
||||
return I[st], x
|
||||
} else {
|
||||
return I[en], y
|
||||
}
|
||||
}
|
||||
|
||||
x := st + (en-st)/2
|
||||
if bytes.Compare(obuf[I[x]:], nbuf) < 0 {
|
||||
return search(I, obuf, nbuf, x, en)
|
||||
} else {
|
||||
return search(I, obuf, nbuf, st, x)
|
||||
}
|
||||
panic("unreached")
|
||||
}
|
||||
|
||||
// Diff computes the difference between old and new, according to the bsdiff
|
||||
// algorithm, and writes the result to patch.
|
||||
func Diff(old, new io.Reader, patch io.Writer) error {
|
||||
obuf, err := ioutil.ReadAll(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nbuf, err := ioutil.ReadAll(new)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pbuf, err := diffBytes(obuf, nbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = patch.Write(pbuf)
|
||||
return err
|
||||
}
|
||||
|
||||
func diffBytes(obuf, nbuf []byte) ([]byte, error) {
|
||||
var patch seekBuffer
|
||||
err := diff(obuf, nbuf, &patch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return patch.buf, nil
|
||||
}
|
||||
|
||||
func diff(obuf, nbuf []byte, patch io.WriteSeeker) error {
|
||||
var lenf int
|
||||
I := qsufsort(obuf)
|
||||
db := make([]byte, len(nbuf))
|
||||
eb := make([]byte, len(nbuf))
|
||||
var dblen, eblen int
|
||||
|
||||
var hdr header
|
||||
hdr.Magic = magic
|
||||
hdr.NewSize = int64(len(nbuf))
|
||||
err := binary.Write(patch, signMagLittleEndian{}, &hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute the differences, writing ctrl as we go
|
||||
pfbz2, err := newBzip2Writer(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var scan, pos, length int
|
||||
var lastscan, lastpos, lastoffset int
|
||||
for scan < len(nbuf) {
|
||||
var oldscore int
|
||||
scan += length
|
||||
for scsc := scan; scan < len(nbuf); scan++ {
|
||||
pos, length = search(I, obuf, nbuf[scan:], 0, len(obuf))
|
||||
|
||||
for ; scsc < scan+length; scsc++ {
|
||||
if scsc+lastoffset < len(obuf) &&
|
||||
obuf[scsc+lastoffset] == nbuf[scsc] {
|
||||
oldscore++
|
||||
}
|
||||
}
|
||||
|
||||
if (length == oldscore && length != 0) || length > oldscore+8 {
|
||||
break
|
||||
}
|
||||
|
||||
if scan+lastoffset < len(obuf) && obuf[scan+lastoffset] == nbuf[scan] {
|
||||
oldscore--
|
||||
}
|
||||
}
|
||||
|
||||
if length != oldscore || scan == len(nbuf) {
|
||||
var s, Sf int
|
||||
lenf = 0
|
||||
for i := 0; lastscan+i < scan && lastpos+i < len(obuf); {
|
||||
if obuf[lastpos+i] == nbuf[lastscan+i] {
|
||||
s++
|
||||
}
|
||||
i++
|
||||
if s*2-i > Sf*2-lenf {
|
||||
Sf = s
|
||||
lenf = i
|
||||
}
|
||||
}
|
||||
|
||||
lenb := 0
|
||||
if scan < len(nbuf) {
|
||||
var s, Sb int
|
||||
for i := 1; (scan >= lastscan+i) && (pos >= i); i++ {
|
||||
if obuf[pos-i] == nbuf[scan-i] {
|
||||
s++
|
||||
}
|
||||
if s*2-i > Sb*2-lenb {
|
||||
Sb = s
|
||||
lenb = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if lastscan+lenf > scan-lenb {
|
||||
overlap := (lastscan + lenf) - (scan - lenb)
|
||||
s := 0
|
||||
Ss := 0
|
||||
lens := 0
|
||||
for i := 0; i < overlap; i++ {
|
||||
if nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {
|
||||
s++
|
||||
}
|
||||
if nbuf[scan-lenb+i] == obuf[pos-lenb+i] {
|
||||
s--
|
||||
}
|
||||
if s > Ss {
|
||||
Ss = s
|
||||
lens = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
lenf += lens - overlap
|
||||
lenb -= lens
|
||||
}
|
||||
|
||||
for i := 0; i < lenf; i++ {
|
||||
db[dblen+i] = nbuf[lastscan+i] - obuf[lastpos+i]
|
||||
}
|
||||
for i := 0; i < (scan-lenb)-(lastscan+lenf); i++ {
|
||||
eb[eblen+i] = nbuf[lastscan+lenf+i]
|
||||
}
|
||||
|
||||
dblen += lenf
|
||||
eblen += (scan - lenb) - (lastscan + lenf)
|
||||
|
||||
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(lenf))
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
val := (scan - lenb) - (lastscan + lenf)
|
||||
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
val = (pos - lenb) - (lastpos + lenf)
|
||||
err = binary.Write(pfbz2, signMagLittleEndian{}, int64(val))
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
lastscan = scan - lenb
|
||||
lastpos = pos - lenb
|
||||
lastoffset = pos - scan
|
||||
}
|
||||
}
|
||||
err = pfbz2.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute size of compressed ctrl data
|
||||
l64, err := patch.Seek(0, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.CtrlLen = int64(l64 - 32)
|
||||
|
||||
// Write compressed diff data
|
||||
pfbz2, err = newBzip2Writer(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err := pfbz2.Write(db[:dblen])
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
if n != dblen {
|
||||
pfbz2.Close()
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
err = pfbz2.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Compute size of compressed diff data
|
||||
n64, err := patch.Seek(0, 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.DiffLen = n64 - l64
|
||||
|
||||
// Write compressed extra data
|
||||
pfbz2, err = newBzip2Writer(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n, err = pfbz2.Write(eb[:eblen])
|
||||
if err != nil {
|
||||
pfbz2.Close()
|
||||
return err
|
||||
}
|
||||
if n != eblen {
|
||||
pfbz2.Close()
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
err = pfbz2.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Seek to the beginning, write the header, and close the file
|
||||
_, err = patch.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = binary.Write(patch, signMagLittleEndian{}, &hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
67
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/diff_test.go
generated
vendored
67
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/diff_test.go
generated
vendored
@ -0,0 +1,67 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var diffT = []struct {
|
||||
old *os.File
|
||||
new *os.File
|
||||
}{
|
||||
{
|
||||
old: mustWriteRandFile("test.old", 1e3),
|
||||
new: mustWriteRandFile("test.new", 1e3),
|
||||
},
|
||||
{
|
||||
old: mustOpen("testdata/sample.old"),
|
||||
new: mustOpen("testdata/sample.new"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
for _, s := range diffT {
|
||||
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(got.Name())
|
||||
|
||||
exp, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cmd := exec.Command("bsdiff", s.old.Name(), s.new.Name(), exp.Name())
|
||||
cmd.Stdout = os.Stdout
|
||||
err = cmd.Run()
|
||||
os.Remove(exp.Name())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = Diff(s.old, s.new, got)
|
||||
if err != nil {
|
||||
t.Fatal("err", err)
|
||||
}
|
||||
|
||||
_, err = got.Seek(0, 0)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
gotBuf := mustReadAll(got)
|
||||
expBuf := mustReadAll(exp)
|
||||
|
||||
if !bytes.Equal(gotBuf, expBuf) {
|
||||
t.Fail()
|
||||
t.Logf("diff %s %s", s.old.Name(), s.new.Name())
|
||||
t.Logf("%s: len(got) = %d", got.Name(), len(gotBuf))
|
||||
t.Logf("%s: len(exp) = %d", exp.Name(), len(expBuf))
|
||||
i := matchlen(gotBuf, expBuf)
|
||||
t.Logf("produced different output at pos %d; %d != %d", i, gotBuf[i], expBuf[i])
|
||||
}
|
||||
}
|
||||
}
|
24
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/doc.go
generated
vendored
24
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/doc.go
generated
vendored
@ -0,0 +1,24 @@
|
||||
// Package binarydist implements binary diff and patch as described on
|
||||
// http://www.daemonology.net/bsdiff/. It reads and writes files
|
||||
// compatible with the tools there.
|
||||
package binarydist
|
||||
|
||||
var magic = [8]byte{'B', 'S', 'D', 'I', 'F', 'F', '4', '0'}
|
||||
|
||||
// File format:
|
||||
// 0 8 "BSDIFF40"
|
||||
// 8 8 X
|
||||
// 16 8 Y
|
||||
// 24 8 sizeof(newfile)
|
||||
// 32 X bzip2(control block)
|
||||
// 32+X Y bzip2(diff block)
|
||||
// 32+X+Y ??? bzip2(extra block)
|
||||
// with control block a set of triples (x,y,z) meaning "add x bytes
|
||||
// from oldfile to x bytes from the diff block; copy y bytes from the
|
||||
// extra block; seek forwards in oldfile by z bytes".
|
||||
type header struct {
|
||||
Magic [8]byte
|
||||
CtrlLen int64
|
||||
DiffLen int64
|
||||
NewSize int64
|
||||
}
|
53
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/encoding.go
generated
vendored
53
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/encoding.go
generated
vendored
@ -0,0 +1,53 @@
|
||||
package binarydist
|
||||
|
||||
// SignMagLittleEndian is the numeric encoding used by the bsdiff tools.
|
||||
// It implements binary.ByteOrder using a sign-magnitude format
|
||||
// and little-endian byte order. Only methods Uint64 and String
|
||||
// have been written; the rest panic.
|
||||
type signMagLittleEndian struct{}
|
||||
|
||||
func (signMagLittleEndian) Uint16(b []byte) uint16 { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) PutUint16(b []byte, v uint16) { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) Uint32(b []byte) uint32 { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) PutUint32(b []byte, v uint32) { panic("unimplemented") }
|
||||
|
||||
func (signMagLittleEndian) Uint64(b []byte) uint64 {
|
||||
y := int64(b[0]) |
|
||||
int64(b[1])<<8 |
|
||||
int64(b[2])<<16 |
|
||||
int64(b[3])<<24 |
|
||||
int64(b[4])<<32 |
|
||||
int64(b[5])<<40 |
|
||||
int64(b[6])<<48 |
|
||||
int64(b[7]&0x7f)<<56
|
||||
|
||||
if b[7]&0x80 != 0 {
|
||||
y = -y
|
||||
}
|
||||
return uint64(y)
|
||||
}
|
||||
|
||||
func (signMagLittleEndian) PutUint64(b []byte, v uint64) {
|
||||
x := int64(v)
|
||||
neg := x < 0
|
||||
if neg {
|
||||
x = -x
|
||||
}
|
||||
|
||||
b[0] = byte(x)
|
||||
b[1] = byte(x >> 8)
|
||||
b[2] = byte(x >> 16)
|
||||
b[3] = byte(x >> 24)
|
||||
b[4] = byte(x >> 32)
|
||||
b[5] = byte(x >> 40)
|
||||
b[6] = byte(x >> 48)
|
||||
b[7] = byte(x >> 56)
|
||||
if neg {
|
||||
b[7] |= 0x80
|
||||
}
|
||||
}
|
||||
|
||||
func (signMagLittleEndian) String() string { return "signMagLittleEndian" }
|
109
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/patch.go
generated
vendored
109
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/patch.go
generated
vendored
@ -0,0 +1,109 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
var ErrCorrupt = errors.New("corrupt patch")
|
||||
|
||||
// Patch applies patch to old, according to the bspatch algorithm,
|
||||
// and writes the result to new.
|
||||
func Patch(old io.Reader, new io.Writer, patch io.Reader) error {
|
||||
var hdr header
|
||||
err := binary.Read(patch, signMagLittleEndian{}, &hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hdr.Magic != magic {
|
||||
return ErrCorrupt
|
||||
}
|
||||
if hdr.CtrlLen < 0 || hdr.DiffLen < 0 || hdr.NewSize < 0 {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
ctrlbuf := make([]byte, hdr.CtrlLen)
|
||||
_, err = io.ReadFull(patch, ctrlbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cpfbz2 := bzip2.NewReader(bytes.NewReader(ctrlbuf))
|
||||
|
||||
diffbuf := make([]byte, hdr.DiffLen)
|
||||
_, err = io.ReadFull(patch, diffbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dpfbz2 := bzip2.NewReader(bytes.NewReader(diffbuf))
|
||||
|
||||
// The entire rest of the file is the extra block.
|
||||
epfbz2 := bzip2.NewReader(patch)
|
||||
|
||||
obuf, err := ioutil.ReadAll(old)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nbuf := make([]byte, hdr.NewSize)
|
||||
|
||||
var oldpos, newpos int64
|
||||
for newpos < hdr.NewSize {
|
||||
var ctrl struct{ Add, Copy, Seek int64 }
|
||||
err = binary.Read(cpfbz2, signMagLittleEndian{}, &ctrl)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sanity-check
|
||||
if newpos+ctrl.Add > hdr.NewSize {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Read diff string
|
||||
_, err = io.ReadFull(dpfbz2, nbuf[newpos:newpos+ctrl.Add])
|
||||
if err != nil {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Add old data to diff string
|
||||
for i := int64(0); i < ctrl.Add; i++ {
|
||||
if oldpos+i >= 0 && oldpos+i < int64(len(obuf)) {
|
||||
nbuf[newpos+i] += obuf[oldpos+i]
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust pointers
|
||||
newpos += ctrl.Add
|
||||
oldpos += ctrl.Add
|
||||
|
||||
// Sanity-check
|
||||
if newpos+ctrl.Copy > hdr.NewSize {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Read extra string
|
||||
_, err = io.ReadFull(epfbz2, nbuf[newpos:newpos+ctrl.Copy])
|
||||
if err != nil {
|
||||
return ErrCorrupt
|
||||
}
|
||||
|
||||
// Adjust pointers
|
||||
newpos += ctrl.Copy
|
||||
oldpos += ctrl.Seek
|
||||
}
|
||||
|
||||
// Write the new file
|
||||
for len(nbuf) > 0 {
|
||||
n, err := new.Write(nbuf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nbuf = nbuf[n:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
mustWriteRandFile("test.old", 1e3)
|
||||
mustWriteRandFile("test.new", 1e3)
|
||||
|
||||
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(got.Name())
|
||||
|
||||
err = exec.Command("bsdiff", "test.old", "test.new", "test.patch").Run()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = Patch(mustOpen("test.old"), got, mustOpen("test.patch"))
|
||||
if err != nil {
|
||||
t.Fatal("err", err)
|
||||
}
|
||||
|
||||
ref, err := got.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("got %d bytes", ref)
|
||||
if n := fileCmp(got, mustOpen("test.new")); n > -1 {
|
||||
t.Fatalf("produced different output at pos %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchHk(t *testing.T) {
|
||||
got, err := ioutil.TempFile("/tmp", "bspatch.")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
os.Remove(got.Name())
|
||||
|
||||
err = Patch(mustOpen("testdata/sample.old"), got, mustOpen("testdata/sample.patch"))
|
||||
if err != nil {
|
||||
t.Fatal("err", err)
|
||||
}
|
||||
|
||||
ref, err := got.Seek(0, 2)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
t.Logf("got %d bytes", ref)
|
||||
if n := fileCmp(got, mustOpen("testdata/sample.new")); n > -1 {
|
||||
t.Fatalf("produced different output at pos %d", n)
|
||||
}
|
||||
}
|
43
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/seek.go
generated
vendored
43
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/seek.go
generated
vendored
@ -0,0 +1,43 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
type seekBuffer struct {
|
||||
buf []byte
|
||||
pos int
|
||||
}
|
||||
|
||||
func (b *seekBuffer) Write(p []byte) (n int, err error) {
|
||||
n = copy(b.buf[b.pos:], p)
|
||||
if n == len(p) {
|
||||
b.pos += n
|
||||
return n, nil
|
||||
}
|
||||
b.buf = append(b.buf, p[n:]...)
|
||||
b.pos += len(p)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (b *seekBuffer) Seek(offset int64, whence int) (ret int64, err error) {
|
||||
var abs int64
|
||||
switch whence {
|
||||
case 0:
|
||||
abs = offset
|
||||
case 1:
|
||||
abs = int64(b.pos) + offset
|
||||
case 2:
|
||||
abs = int64(len(b.buf)) + offset
|
||||
default:
|
||||
return 0, errors.New("binarydist: invalid whence")
|
||||
}
|
||||
if abs < 0 {
|
||||
return 0, errors.New("binarydist: negative position")
|
||||
}
|
||||
if abs >= 1<<31 {
|
||||
return 0, errors.New("binarydist: position out of range")
|
||||
}
|
||||
b.pos = int(abs)
|
||||
return abs, nil
|
||||
}
|
33
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/sort_test.go
generated
vendored
33
vendor/github.com/equinox-io/equinox/internal/go-update/internal/binarydist/sort_test.go
generated
vendored
@ -0,0 +1,33 @@
|
||||
package binarydist
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var sortT = [][]byte{
|
||||
mustRandBytes(1000),
|
||||
mustReadAll(mustOpen("test.old")),
|
||||
[]byte("abcdefabcdef"),
|
||||
}
|
||||
|
||||
func TestQsufsort(t *testing.T) {
|
||||
for _, s := range sortT {
|
||||
I := qsufsort(s)
|
||||
for i := 1; i < len(I); i++ {
|
||||
if bytes.Compare(s[I[i-1]:], s[I[i]:]) > 0 {
|
||||
t.Fatalf("unsorted at %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustRandBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
_, err := rand.Read(b)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
27
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/LICENSE
generated
vendored
27
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/LICENSE
generated
vendored
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
16
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/README.md
generated
vendored
16
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/README.md
generated
vendored
@ -0,0 +1,16 @@
|
||||
### Extensions to the "os" package.
|
||||
|
||||
## Find the current Executable and ExecutableFolder.
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file. Both
|
||||
working directory and the os.Args[0] value are arbitrary and cannot
|
||||
be relied on; os.Args[0] can be "faked".
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
* OS X
|
||||
* Windows
|
||||
* Plan 9
|
||||
* BSDs.
|
27
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext.go
generated
vendored
27
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext.go
generated
vendored
@ -0,0 +1,27 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Executable returns an absolute path that can be used to
|
||||
// re-invoke the current program.
|
||||
// It may not be valid after the current program exits.
|
||||
func Executable() (string, error) {
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name and any trailing slash.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Dir(p), nil
|
||||
}
|
20
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_plan9.go
generated
vendored
20
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_plan9.go
generated
vendored
@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
36
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_procfs.go
generated
vendored
36
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_procfs.go
generated
vendored
@ -0,0 +1,36 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd solaris dragonfly
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
const deletedTag = " (deleted)"
|
||||
execpath, err := os.Readlink("/proc/self/exe")
|
||||
if err != nil {
|
||||
return execpath, err
|
||||
}
|
||||
execpath = strings.TrimSuffix(execpath, deletedTag)
|
||||
execpath = strings.TrimPrefix(execpath, deletedTag)
|
||||
return execpath, nil
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd", "dragonfly":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
case "solaris":
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||
}
|
||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||
}
|
79
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_sysctl.go
generated
vendored
79
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_sysctl.go
generated
vendored
@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var initCwd, initCwdErr = os.Getwd()
|
||||
|
||||
func executable() (string, error) {
|
||||
var mib [4]int32
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||
case "darwin":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
// Get length.
|
||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
execPath, err = getAbs(execPath)
|
||||
if err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable.
|
||||
if runtime.GOOS == "darwin" {
|
||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
func getAbs(execPath string) (string, error) {
|
||||
if initCwdErr != nil {
|
||||
return execPath, initCwdErr
|
||||
}
|
||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||
// the generic Join function.
|
||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||
}
|
203
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_test.go
generated
vendored
203
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_test.go
generated
vendored
@ -0,0 +1,203 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
|
||||
|
||||
executableEnvValueMatch = "match"
|
||||
executableEnvValueDelete = "delete"
|
||||
)
|
||||
|
||||
func TestPrintExecutable(t *testing.T) {
|
||||
ef, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
t.Log("Executable:", ef)
|
||||
}
|
||||
func TestPrintExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
t.Log("Executable Folder:", ef)
|
||||
}
|
||||
func TestExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
if ef[len(ef)-1] == filepath.Separator {
|
||||
t.Fatal("ExecutableFolder ends with a trailing slash.")
|
||||
}
|
||||
}
|
||||
func TestExecutableMatch(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
// fullpath to be of the form "dir/prog".
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fullpath, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
// Make child start with a relative program path.
|
||||
// Alter argv[0] for child to verify getting real path without argv[0].
|
||||
cmd := &exec.Cmd{
|
||||
Dir: dir,
|
||||
Path: fullpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutableDelete(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip()
|
||||
}
|
||||
fpath, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
stderrBuff := &bytes.Buffer{}
|
||||
stdoutBuff := &bytes.Buffer{}
|
||||
cmd := &exec.Cmd{
|
||||
Path: fpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
|
||||
Stdin: r,
|
||||
Stderr: stderrBuff,
|
||||
Stdout: stdoutBuff,
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) start failed: %v", err)
|
||||
}
|
||||
|
||||
tempPath := fpath + "_copy"
|
||||
_ = os.Remove(tempPath)
|
||||
|
||||
err = copyFile(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("copy file failed: %v", err)
|
||||
}
|
||||
err = os.Remove(fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("remove running test file failed: %v", err)
|
||||
}
|
||||
err = os.Rename(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("rename copy to previous name failed: %v", err)
|
||||
}
|
||||
|
||||
w.Write([]byte{0})
|
||||
w.Close()
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
t.Fatalf("exec wait failed: %v", err)
|
||||
}
|
||||
|
||||
childPath := stderrBuff.String()
|
||||
if !filepath.IsAbs(childPath) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", childPath)
|
||||
}
|
||||
if !sameFile(childPath, fpath) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
func copyFile(dest, src string) error {
|
||||
df, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer df.Close()
|
||||
|
||||
sf, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sf.Close()
|
||||
|
||||
_, err = io.Copy(df, sf)
|
||||
return err
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
env := os.Getenv(executableEnvVar)
|
||||
switch env {
|
||||
case "":
|
||||
os.Exit(m.Run())
|
||||
case executableEnvValueMatch:
|
||||
// First chdir to another path.
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
case executableEnvValueDelete:
|
||||
bb := make([]byte, 1)
|
||||
var err error
|
||||
n, err := os.Stdin.Read(bb)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if n != 1 {
|
||||
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
|
||||
os.Exit(2)
|
||||
}
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
34
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_windows.go
generated
vendored
34
vendor/github.com/equinox-io/equinox/internal/go-update/internal/osext/osext_windows.go
generated
vendored
@ -0,0 +1,34 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
// GetModuleFileName() with hModule = NULL
|
||||
func executable() (exePath string, err error) {
|
||||
return getModuleFileName()
|
||||
}
|
||||
|
||||
func getModuleFileName() (string, error) {
|
||||
var n uint32
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
size := uint32(len(b))
|
||||
|
||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
||||
n = uint32(r0)
|
||||
if n == 0 {
|
||||
return "", e1
|
||||
}
|
||||
return string(utf16.Decode(b[0:n])), nil
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/equinox-io/equinox/internal/go-update/internal/binarydist"
|
||||
)
|
||||
|
||||
// Patcher defines an interface for applying binary patches to an old item to get an updated item.
|
||||
type Patcher interface {
|
||||
Patch(old io.Reader, new io.Writer, patch io.Reader) error
|
||||
}
|
||||
|
||||
type patchFn func(io.Reader, io.Writer, io.Reader) error
|
||||
|
||||
func (fn patchFn) Patch(old io.Reader, new io.Writer, patch io.Reader) error {
|
||||
return fn(old, new, patch)
|
||||
}
|
||||
|
||||
// NewBSDifferPatcher returns a new Patcher that applies binary patches using
|
||||
// the bsdiff algorithm. See http://www.daemonology.net/bsdiff/
|
||||
func NewBSDiffPatcher() Patcher {
|
||||
return patchFn(binarydist.Patch)
|
||||
}
|
@ -0,0 +1,74 @@
|
||||
package update
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
// Verifier defines an interface for verfiying an update's signature with a public key.
|
||||
type Verifier interface {
|
||||
VerifySignature(checksum, signature []byte, h crypto.Hash, publicKey crypto.PublicKey) error
|
||||
}
|
||||
|
||||
type verifyFn func([]byte, []byte, crypto.Hash, crypto.PublicKey) error
|
||||
|
||||
func (fn verifyFn) VerifySignature(checksum []byte, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
|
||||
return fn(checksum, signature, hash, publicKey)
|
||||
}
|
||||
|
||||
// NewRSAVerifier returns a Verifier that uses the RSA algorithm to verify updates.
|
||||
func NewRSAVerifier() Verifier {
|
||||
return verifyFn(func(checksum, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
|
||||
key, ok := publicKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return errors.New("not a valid RSA public key")
|
||||
}
|
||||
return rsa.VerifyPKCS1v15(key, hash, checksum, signature)
|
||||
})
|
||||
}
|
||||
|
||||
type rsDER struct {
|
||||
R *big.Int
|
||||
S *big.Int
|
||||
}
|
||||
|
||||
// NewECDSAVerifier returns a Verifier that uses the ECDSA algorithm to verify updates.
|
||||
func NewECDSAVerifier() Verifier {
|
||||
return verifyFn(func(checksum, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
|
||||
key, ok := publicKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return errors.New("not a valid ECDSA public key")
|
||||
}
|
||||
var rs rsDER
|
||||
if _, err := asn1.Unmarshal(signature, &rs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !ecdsa.Verify(key, checksum, rs.R, rs.S) {
|
||||
return errors.New("failed to verify ecsda signature")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// NewDSAVerifier returns a Verifier that uses the DSA algorithm to verify updates.
|
||||
func NewDSAVerifier() Verifier {
|
||||
return verifyFn(func(checksum, signature []byte, hash crypto.Hash, publicKey crypto.PublicKey) error {
|
||||
key, ok := publicKey.(*dsa.PublicKey)
|
||||
if !ok {
|
||||
return errors.New("not a valid DSA public key")
|
||||
}
|
||||
var rs rsDER
|
||||
if _, err := asn1.Unmarshal(signature, &rs); err != nil {
|
||||
return err
|
||||
}
|
||||
if !dsa.Verify(key, checksum, rs.R, rs.S) {
|
||||
return errors.New("failed to verify ecsda signature")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,16 @@
|
||||
### Extensions to the "os" package.
|
||||
|
||||
## Find the current Executable and ExecutableFolder.
|
||||
|
||||
There is sometimes utility in finding the current executable file
|
||||
that is running. This can be used for upgrading the current executable
|
||||
or finding resources located relative to the executable file. Both
|
||||
working directory and the os.Args[0] value are arbitrary and cannot
|
||||
be relied on; os.Args[0] can be "faked".
|
||||
|
||||
Multi-platform and supports:
|
||||
* Linux
|
||||
* OS X
|
||||
* Windows
|
||||
* Plan 9
|
||||
* BSDs.
|
@ -0,0 +1,27 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Extensions to the standard "os" package.
|
||||
package osext
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
// Executable returns an absolute path that can be used to
|
||||
// re-invoke the current program.
|
||||
// It may not be valid after the current program exits.
|
||||
func Executable() (string, error) {
|
||||
p, err := executable()
|
||||
return filepath.Clean(p), err
|
||||
}
|
||||
|
||||
// Returns same path as Executable, returns just the folder
|
||||
// path. Excludes the executable name and any trailing slash.
|
||||
func ExecutableFolder() (string, error) {
|
||||
p, err := Executable()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return filepath.Dir(p), nil
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
return syscall.Fd2path(int(f.Fd()))
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux netbsd openbsd solaris dragonfly
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func executable() (string, error) {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
const deletedTag = " (deleted)"
|
||||
execpath, err := os.Readlink("/proc/self/exe")
|
||||
if err != nil {
|
||||
return execpath, err
|
||||
}
|
||||
execpath = strings.TrimSuffix(execpath, deletedTag)
|
||||
execpath = strings.TrimPrefix(execpath, deletedTag)
|
||||
return execpath, nil
|
||||
case "netbsd":
|
||||
return os.Readlink("/proc/curproc/exe")
|
||||
case "openbsd", "dragonfly":
|
||||
return os.Readlink("/proc/curproc/file")
|
||||
case "solaris":
|
||||
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
|
||||
}
|
||||
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
|
||||
}
|
@ -0,0 +1,79 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin freebsd
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var initCwd, initCwdErr = os.Getwd()
|
||||
|
||||
func executable() (string, error) {
|
||||
var mib [4]int32
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
|
||||
case "darwin":
|
||||
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
|
||||
}
|
||||
|
||||
n := uintptr(0)
|
||||
// Get length.
|
||||
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
buf := make([]byte, n)
|
||||
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
|
||||
if errNum != 0 {
|
||||
return "", errNum
|
||||
}
|
||||
if n == 0 { // This shouldn't happen.
|
||||
return "", nil
|
||||
}
|
||||
for i, v := range buf {
|
||||
if v == 0 {
|
||||
buf = buf[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
var err error
|
||||
execPath := string(buf)
|
||||
// execPath will not be empty due to above checks.
|
||||
// Try to get the absolute path if the execPath is not rooted.
|
||||
if execPath[0] != '/' {
|
||||
execPath, err = getAbs(execPath)
|
||||
if err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
|
||||
// actual executable.
|
||||
if runtime.GOOS == "darwin" {
|
||||
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
|
||||
return execPath, err
|
||||
}
|
||||
}
|
||||
return execPath, nil
|
||||
}
|
||||
|
||||
func getAbs(execPath string) (string, error) {
|
||||
if initCwdErr != nil {
|
||||
return execPath, initCwdErr
|
||||
}
|
||||
// The execPath may begin with a "../" or a "./" so clean it first.
|
||||
// Join the two paths, trailing and starting slashes undetermined, so use
|
||||
// the generic Join function.
|
||||
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
|
||||
}
|
@ -0,0 +1,203 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build darwin linux freebsd netbsd windows
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
executableEnvVar = "OSTEST_OUTPUT_EXECUTABLE"
|
||||
|
||||
executableEnvValueMatch = "match"
|
||||
executableEnvValueDelete = "delete"
|
||||
)
|
||||
|
||||
func TestPrintExecutable(t *testing.T) {
|
||||
ef, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
t.Log("Executable:", ef)
|
||||
}
|
||||
func TestPrintExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
t.Log("Executable Folder:", ef)
|
||||
}
|
||||
func TestExecutableFolder(t *testing.T) {
|
||||
ef, err := ExecutableFolder()
|
||||
if err != nil {
|
||||
t.Fatalf("ExecutableFolder failed: %v", err)
|
||||
}
|
||||
if ef[len(ef)-1] == filepath.Separator {
|
||||
t.Fatal("ExecutableFolder ends with a trailing slash.")
|
||||
}
|
||||
}
|
||||
func TestExecutableMatch(t *testing.T) {
|
||||
ep, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
// fullpath to be of the form "dir/prog".
|
||||
dir := filepath.Dir(filepath.Dir(ep))
|
||||
fullpath, err := filepath.Rel(dir, ep)
|
||||
if err != nil {
|
||||
t.Fatalf("filepath.Rel: %v", err)
|
||||
}
|
||||
// Make child start with a relative program path.
|
||||
// Alter argv[0] for child to verify getting real path without argv[0].
|
||||
cmd := &exec.Cmd{
|
||||
Dir: dir,
|
||||
Path: fullpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueMatch)},
|
||||
}
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) failed: %v", err)
|
||||
}
|
||||
outs := string(out)
|
||||
if !filepath.IsAbs(outs) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", out)
|
||||
}
|
||||
if !sameFile(outs, ep) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", out, ep)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExecutableDelete(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip()
|
||||
}
|
||||
fpath, err := Executable()
|
||||
if err != nil {
|
||||
t.Fatalf("Executable failed: %v", err)
|
||||
}
|
||||
|
||||
r, w := io.Pipe()
|
||||
stderrBuff := &bytes.Buffer{}
|
||||
stdoutBuff := &bytes.Buffer{}
|
||||
cmd := &exec.Cmd{
|
||||
Path: fpath,
|
||||
Env: []string{fmt.Sprintf("%s=%s", executableEnvVar, executableEnvValueDelete)},
|
||||
Stdin: r,
|
||||
Stderr: stderrBuff,
|
||||
Stdout: stdoutBuff,
|
||||
}
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
t.Fatalf("exec(self) start failed: %v", err)
|
||||
}
|
||||
|
||||
tempPath := fpath + "_copy"
|
||||
_ = os.Remove(tempPath)
|
||||
|
||||
err = copyFile(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("copy file failed: %v", err)
|
||||
}
|
||||
err = os.Remove(fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("remove running test file failed: %v", err)
|
||||
}
|
||||
err = os.Rename(tempPath, fpath)
|
||||
if err != nil {
|
||||
t.Fatalf("rename copy to previous name failed: %v", err)
|
||||
}
|
||||
|
||||
w.Write([]byte{0})
|
||||
w.Close()
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
t.Fatalf("exec wait failed: %v", err)
|
||||
}
|
||||
|
||||
childPath := stderrBuff.String()
|
||||
if !filepath.IsAbs(childPath) {
|
||||
t.Fatalf("Child returned %q, want an absolute path", childPath)
|
||||
}
|
||||
if !sameFile(childPath, fpath) {
|
||||
t.Fatalf("Child returned %q, not the same file as %q", childPath, fpath)
|
||||
}
|
||||
}
|
||||
|
||||
func sameFile(fn1, fn2 string) bool {
|
||||
fi1, err := os.Stat(fn1)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fi2, err := os.Stat(fn2)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(fi1, fi2)
|
||||
}
|
||||
func copyFile(dest, src string) error {
|
||||
df, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer df.Close()
|
||||
|
||||
sf, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sf.Close()
|
||||
|
||||
_, err = io.Copy(df, sf)
|
||||
return err
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
env := os.Getenv(executableEnvVar)
|
||||
switch env {
|
||||
case "":
|
||||
os.Exit(m.Run())
|
||||
case executableEnvValueMatch:
|
||||
// First chdir to another path.
|
||||
dir := "/"
|
||||
if runtime.GOOS == "windows" {
|
||||
dir = filepath.VolumeName(".")
|
||||
}
|
||||
os.Chdir(dir)
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
case executableEnvValueDelete:
|
||||
bb := make([]byte, 1)
|
||||
var err error
|
||||
n, err := os.Stdin.Read(bb)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
if n != 1 {
|
||||
fmt.Fprint(os.Stderr, "ERROR: n != 1, n == ", n)
|
||||
os.Exit(2)
|
||||
}
|
||||
if ep, err := Executable(); err != nil {
|
||||
fmt.Fprint(os.Stderr, "ERROR: ", err)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, ep)
|
||||
}
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
@ -0,0 +1,34 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package osext
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unicode/utf16"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
kernel = syscall.MustLoadDLL("kernel32.dll")
|
||||
getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW")
|
||||
)
|
||||
|
||||
// GetModuleFileName() with hModule = NULL
|
||||
func executable() (exePath string, err error) {
|
||||
return getModuleFileName()
|
||||
}
|
||||
|
||||
func getModuleFileName() (string, error) {
|
||||
var n uint32
|
||||
b := make([]uint16, syscall.MAX_PATH)
|
||||
size := uint32(len(b))
|
||||
|
||||
r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size))
|
||||
n = uint32(r0)
|
||||
if n == 0 {
|
||||
return "", e1
|
||||
}
|
||||
return string(utf16.Decode(b[0:n])), nil
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
/*
|
||||
package proto defines a set of structures used to negotiate an update between an
|
||||
an application (the client) and an Equinox update service.
|
||||
*/
|
||||
package proto
|
||||
|
||||
import "time"
|
||||
|
||||
type PatchKind string
|
||||
|
||||
const (
|
||||
PatchNone PatchKind = "none"
|
||||
PatchBSDiff PatchKind = "bsdiff"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
AppID string `json:"app_id"`
|
||||
Channel string `json:"channel"`
|
||||
OS string `json:"os"`
|
||||
Arch string `json:"arch"`
|
||||
GoARM string `json:"goarm"`
|
||||
TargetVersion string `json:"target_version"`
|
||||
|
||||
CurrentVersion string `json:"current_version"`
|
||||
CurrentSHA256 string `json:"current_sha256"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Available bool `json:"available"`
|
||||
DownloadURL string `json:"download_url"`
|
||||
Checksum string `json:"checksum"`
|
||||
Signature string `json:"signature"`
|
||||
Patch PatchKind `json:"patch_type"`
|
||||
Release Release `json:"release"`
|
||||
}
|
||||
|
||||
type Release struct {
|
||||
Title string `json:"title"`
|
||||
Version string `json:"version"`
|
||||
Description string `json:"description"`
|
||||
CreateDate time.Time `json:"create_date"`
|
||||
}
|
@ -0,0 +1,305 @@
|
||||
package equinox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/equinox-io/equinox/internal/go-update"
|
||||
"github.com/equinox-io/equinox/internal/osext"
|
||||
"github.com/equinox-io/equinox/proto"
|
||||
)
|
||||
|
||||
const protocolVersion = "1"
|
||||
const defaultCheckURL = "https://update.equinox.io/check"
|
||||
const userAgent = "EquinoxSDK/1.0"
|
||||
|
||||
var NotAvailableErr = errors.New("No update available")
|
||||
|
||||
type Options struct {
|
||||
// Channel specifies the name of an Equinox release channel to check for
|
||||
// a newer version of the application.
|
||||
//
|
||||
// If empty, defaults to 'stable'.
|
||||
Channel string
|
||||
|
||||
// Version requests an update to a specific version of the application.
|
||||
// If specified, `Channel` is ignored.
|
||||
Version string
|
||||
|
||||
// TargetPath defines the path to the file to update.
|
||||
// The emptry string means 'the executable file of the running program'.
|
||||
TargetPath string
|
||||
|
||||
// Create TargetPath replacement with this file mode. If zero, defaults to 0755.
|
||||
TargetMode os.FileMode
|
||||
|
||||
// Public key to use for signature verification. If nil, no signature
|
||||
// verification is done. Use `SetPublicKeyPEM` to set this field with PEM data.
|
||||
PublicKey crypto.PublicKey
|
||||
|
||||
// Target operating system of the update. Uses the same standard OS names used
|
||||
// by Go build tags (windows, darwin, linux, etc).
|
||||
// If empty, it will be populated by consulting runtime.GOOS
|
||||
OS string
|
||||
|
||||
// Target architecture of the update. Uses the same standard Arch names used
|
||||
// by Go build tags (amd64, 386, arm, etc).
|
||||
// If empty, it will be populated by consulting runtime.GOARCH
|
||||
Arch string
|
||||
|
||||
// Target ARM architecture, if a specific one if required. Uses the same names
|
||||
// as the GOARM environment variable (5, 6, 7).
|
||||
//
|
||||
// GoARM is ignored if Arch != 'arm'.
|
||||
// GoARM is ignored if it is the empty string. Omit it if you do not need
|
||||
// to distinguish between ARM versions.
|
||||
GoARM string
|
||||
|
||||
// The current application version. This is used for statistics and reporting only,
|
||||
// it is optional.
|
||||
CurrentVersion string
|
||||
|
||||
// CheckURL is the URL to request an update check from. You should only set
|
||||
// this if you are running an on-prem Equinox server.
|
||||
// If empty the default Equinox update service endpoint is used.
|
||||
CheckURL string
|
||||
|
||||
// HTTPClient is used to make all HTTP requests necessary for the update check protocol.
|
||||
// You may configure it to use custom timeouts, proxy servers or other behaviors.
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
|
||||
// Response is returned by Check when an update is available. It may be
|
||||
// passed to Apply to perform the update.
|
||||
type Response struct {
|
||||
// Version of the release that will be updated to if applied.
|
||||
ReleaseVersion string
|
||||
|
||||
// Title of the the release
|
||||
ReleaseTitle string
|
||||
|
||||
// Additional details about the release
|
||||
ReleaseDescription string
|
||||
|
||||
// Creation date of the release
|
||||
ReleaseDate time.Time
|
||||
|
||||
downloadURL string
|
||||
checksum []byte
|
||||
signature []byte
|
||||
patch proto.PatchKind
|
||||
opts Options
|
||||
}
|
||||
|
||||
// SetPublicKeyPEM is a convenience method to set the PublicKey property
|
||||
// used for checking a completed update's signature by parsing a
|
||||
// Public Key formatted as PEM data.
|
||||
func (o *Options) SetPublicKeyPEM(pembytes []byte) error {
|
||||
block, _ := pem.Decode(pembytes)
|
||||
if block == nil {
|
||||
return errors.New("couldn't parse PEM data")
|
||||
}
|
||||
|
||||
pub, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.PublicKey = pub
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check communicates with an Equinox update service to determine if
|
||||
// an update for the given application matching the specified options is
|
||||
// available. The returned error is nil only if an update is available.
|
||||
//
|
||||
// The appID is issued to you when creating an application at https://equinox.io
|
||||
//
|
||||
// You can compare the returned error to NotAvailableErr to differentiate between
|
||||
// a successful check that found no update from other errors like a failed
|
||||
// network connection.
|
||||
func Check(appID string, opts Options) (Response, error) {
|
||||
var r Response
|
||||
|
||||
if opts.Channel == "" {
|
||||
opts.Channel = "stable"
|
||||
}
|
||||
if opts.TargetPath == "" {
|
||||
var err error
|
||||
opts.TargetPath, err = osext.Executable()
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
if opts.OS == "" {
|
||||
opts.OS = runtime.GOOS
|
||||
}
|
||||
if opts.Arch == "" {
|
||||
opts.Arch = runtime.GOARCH
|
||||
}
|
||||
if opts.CheckURL == "" {
|
||||
opts.CheckURL = defaultCheckURL
|
||||
}
|
||||
if opts.HTTPClient == nil {
|
||||
opts.HTTPClient = new(http.Client)
|
||||
}
|
||||
opts.HTTPClient.Transport = newUserAgentTransport(userAgent, opts.HTTPClient.Transport)
|
||||
|
||||
checksum := computeChecksum(opts.TargetPath)
|
||||
|
||||
payload, err := json.Marshal(proto.Request{
|
||||
AppID: appID,
|
||||
Channel: opts.Channel,
|
||||
OS: opts.OS,
|
||||
Arch: opts.Arch,
|
||||
GoARM: opts.GoARM,
|
||||
TargetVersion: opts.Version,
|
||||
CurrentVersion: opts.CurrentVersion,
|
||||
CurrentSHA256: checksum,
|
||||
})
|
||||
|
||||
req, err := http.NewRequest("POST", opts.CheckURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
req.Header.Set("Accept", fmt.Sprintf("application/json; q=1; version=%s; charset=utf-8", protocolVersion))
|
||||
req.Header.Set("Content-Type", "application/json; charset=utf-8")
|
||||
|
||||
resp, err := opts.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
body, _ := ioutil.ReadAll(resp.Body)
|
||||
return r, fmt.Errorf("Server responded with %s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
var protoResp proto.Response
|
||||
err = json.NewDecoder(resp.Body).Decode(&protoResp)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
|
||||
if !protoResp.Available {
|
||||
return r, NotAvailableErr
|
||||
}
|
||||
|
||||
r.ReleaseVersion = protoResp.Release.Version
|
||||
r.ReleaseTitle = protoResp.Release.Title
|
||||
r.ReleaseDescription = protoResp.Release.Description
|
||||
r.ReleaseDate = protoResp.Release.CreateDate
|
||||
r.downloadURL = protoResp.DownloadURL
|
||||
r.patch = protoResp.Patch
|
||||
r.opts = opts
|
||||
r.checksum, err = hex.DecodeString(protoResp.Checksum)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
r.signature, err = hex.DecodeString(protoResp.Signature)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func computeChecksum(path string) string {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer f.Close()
|
||||
h := sha256.New()
|
||||
_, err = io.Copy(h, f)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// Apply performs an update of the current executable (or TargetFile, if it was
|
||||
// set on the Options) with the update specified by Response.
|
||||
//
|
||||
// Error is nil if and only if the entire update completes successfully.
|
||||
func (r Response) Apply() error {
|
||||
opts := update.Options{
|
||||
TargetPath: r.opts.TargetPath,
|
||||
TargetMode: r.opts.TargetMode,
|
||||
Checksum: r.checksum,
|
||||
Signature: r.signature,
|
||||
Verifier: update.NewECDSAVerifier(),
|
||||
PublicKey: r.opts.PublicKey,
|
||||
}
|
||||
switch r.patch {
|
||||
case proto.PatchBSDiff:
|
||||
opts.Patcher = update.NewBSDiffPatcher()
|
||||
}
|
||||
|
||||
if err := opts.CheckPermissions(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", r.downloadURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// fetch the update
|
||||
resp, err := r.opts.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
// check that we got a patch
|
||||
if resp.StatusCode >= 400 {
|
||||
msg := "error downloading patch"
|
||||
|
||||
id := resp.Header.Get("Request-Id")
|
||||
if id != "" {
|
||||
msg += ", request " + id
|
||||
}
|
||||
|
||||
blob, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
msg += ": " + string(bytes.TrimSpace(blob))
|
||||
}
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
|
||||
return update.Apply(resp.Body, opts)
|
||||
}
|
||||
|
||||
type userAgentTransport struct {
|
||||
userAgent string
|
||||
http.RoundTripper
|
||||
}
|
||||
|
||||
func newUserAgentTransport(userAgent string, rt http.RoundTripper) *userAgentTransport {
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
}
|
||||
return &userAgentTransport{userAgent, rt}
|
||||
}
|
||||
|
||||
func (t *userAgentTransport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
if r.Header.Get("User-Agent") == "" {
|
||||
r.Header.Set("User-Agent", t.userAgent)
|
||||
}
|
||||
return t.RoundTripper.RoundTrip(r)
|
||||
}
|
@ -0,0 +1,183 @@
|
||||
package equinox
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/equinox-io/equinox/proto"
|
||||
)
|
||||
|
||||
const fakeAppID = "fake_app_id"
|
||||
|
||||
var (
|
||||
fakeBinary = []byte{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1}
|
||||
newFakeBinary = []byte{0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2}
|
||||
ts *httptest.Server
|
||||
key *ecdsa.PrivateKey
|
||||
sha string
|
||||
newSHA string
|
||||
signature string
|
||||
)
|
||||
|
||||
func init() {
|
||||
shaBytes := sha256.Sum256(fakeBinary)
|
||||
sha = hex.EncodeToString(shaBytes[:])
|
||||
newSHABytes := sha256.Sum256(newFakeBinary)
|
||||
newSHA = hex.EncodeToString(newSHABytes[:])
|
||||
|
||||
var err error
|
||||
key, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to generate ecdsa key: %v", err))
|
||||
}
|
||||
sig, err := key.Sign(rand.Reader, newSHABytes[:], nil)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to sign new binary: %v", err))
|
||||
}
|
||||
signature = hex.EncodeToString(sig)
|
||||
}
|
||||
|
||||
func TestNotAvailable(t *testing.T) {
|
||||
opts := setup(t, "TestNotAvailable", proto.Response{
|
||||
Available: false,
|
||||
})
|
||||
defer cleanup(opts)
|
||||
|
||||
_, err := Check(fakeAppID, opts)
|
||||
if err != NotAvailableErr {
|
||||
t.Fatalf("Expected not available error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndToEnd(t *testing.T) {
|
||||
opts := setup(t, "TestEndtoEnd", proto.Response{
|
||||
Available: true,
|
||||
Release: proto.Release{
|
||||
Version: "0.1.2.3",
|
||||
Title: "Release Title",
|
||||
Description: "Release Description",
|
||||
CreateDate: time.Now(),
|
||||
},
|
||||
Checksum: newSHA,
|
||||
Signature: signature,
|
||||
})
|
||||
defer cleanup(opts)
|
||||
|
||||
resp, err := Check(fakeAppID, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed check: %v", err)
|
||||
}
|
||||
err = resp.Apply()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed apply: %v", err)
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadFile(opts.TargetPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(buf, newFakeBinary) {
|
||||
t.Fatalf("Binary did not update to new expected value. Got %v, expected %v", buf, newFakeBinary)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidPatch(t *testing.T) {
|
||||
opts := setup(t, "TestInavlidPatch", proto.Response{
|
||||
Available: true,
|
||||
Release: proto.Release{
|
||||
Version: "0.1.2.3",
|
||||
Title: "Release Title",
|
||||
Description: "Release Description",
|
||||
CreateDate: time.Now(),
|
||||
},
|
||||
DownloadURL: "bad-request",
|
||||
Checksum: newSHA,
|
||||
Signature: signature,
|
||||
Patch: proto.PatchBSDiff,
|
||||
})
|
||||
defer cleanup(opts)
|
||||
|
||||
resp, err := Check(fakeAppID, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed check: %v", err)
|
||||
}
|
||||
err = resp.Apply()
|
||||
if err == nil {
|
||||
t.Fatalf("Apply succeeded")
|
||||
}
|
||||
if err.Error() != "error downloading patch: bad-request" {
|
||||
t.Fatalf("Expected a different error message: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setup(t *testing.T, name string, resp proto.Response) Options {
|
||||
checkUserAgent := func(req *http.Request) {
|
||||
if req.Header.Get("User-Agent") != userAgent {
|
||||
t.Errorf("Expected user agent to be %s, not %s", userAgent, req.Header.Get("User-Agent"))
|
||||
}
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/check", func(w http.ResponseWriter, r *http.Request) {
|
||||
checkUserAgent(r)
|
||||
var req proto.Request
|
||||
err := json.NewDecoder(r.Body).Decode(&req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to decode proto request: %v", err)
|
||||
}
|
||||
if resp.Available {
|
||||
if req.AppID != fakeAppID {
|
||||
t.Fatalf("Unexpected app ID. Got %v, expected %v", err)
|
||||
}
|
||||
if req.CurrentSHA256 != sha {
|
||||
t.Fatalf("Unexpected request SHA: %v", sha)
|
||||
}
|
||||
}
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
})
|
||||
|
||||
// Keying off the download URL may not be the best idea...
|
||||
if resp.DownloadURL == "bad-request" {
|
||||
mux.HandleFunc("/bin", func(w http.ResponseWriter, r *http.Request) {
|
||||
checkUserAgent(r)
|
||||
http.Error(w, "bad-request", http.StatusBadRequest)
|
||||
})
|
||||
} else {
|
||||
mux.HandleFunc("/bin", func(w http.ResponseWriter, r *http.Request) {
|
||||
checkUserAgent(r)
|
||||
w.Write(newFakeBinary)
|
||||
})
|
||||
}
|
||||
|
||||
ts = httptest.NewServer(mux)
|
||||
resp.DownloadURL = ts.URL + "/bin"
|
||||
|
||||
var opts Options
|
||||
opts.CheckURL = ts.URL + "/check"
|
||||
opts.PublicKey = key.Public()
|
||||
|
||||
if name != "" {
|
||||
opts.TargetPath = name
|
||||
ioutil.WriteFile(name, fakeBinary, 0644)
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func cleanup(opts Options) {
|
||||
if opts.TargetPath != "" {
|
||||
os.Remove(opts.TargetPath)
|
||||
}
|
||||
ts.Close()
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
src
|
@ -0,0 +1,8 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.5.3
|
||||
- tip
|
||||
notifications:
|
||||
email:
|
||||
- ionathan@gmail.com
|
||||
- marcosnils@gmail.com
|
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Jonathan Leibiusky and Marcos Lilljedahl
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@ -0,0 +1,3 @@
|
||||
test:
|
||||
go get -v -d -t ./...
|
||||
go test -v
|
@ -0,0 +1,444 @@
|
||||
[![Build Status](https://img.shields.io/travis/franela/goreq/master.svg)](https://travis-ci.org/franela/goreq)
|
||||
[![GoDoc](https://godoc.org/github.com/franela/goreq?status.svg)](https://godoc.org/github.com/franela/goreq)
|
||||
|
||||
GoReq
|
||||
=======
|
||||
|
||||
Simple and sane HTTP request library for Go language.
|
||||
|
||||
|
||||
|
||||
**Table of Contents**
|
||||
|
||||
- [Why GoReq?](#user-content-why-goreq)
|
||||
- [How do I install it?](#user-content-how-do-i-install-it)
|
||||
- [What can I do with it?](#user-content-what-can-i-do-with-it)
|
||||
- [Making requests with different methods](#user-content-making-requests-with-different-methods)
|
||||
- [GET](#user-content-get)
|
||||
- [Tags](#user-content-tags)
|
||||
- [POST](#user-content-post)
|
||||
- [Sending payloads in the Body](#user-content-sending-payloads-in-the-body)
|
||||
- [Specifiying request headers](#user-content-specifiying-request-headers)
|
||||
- [Sending Cookies](#cookie-support)
|
||||
- [Setting timeouts](#user-content-setting-timeouts)
|
||||
- [Using the Response and Error](#user-content-using-the-response-and-error)
|
||||
- [Receiving JSON](#user-content-receiving-json)
|
||||
- [Sending/Receiving Compressed Payloads](#user-content-sendingreceiving-compressed-payloads)
|
||||
- [Using gzip compression:](#user-content-using-gzip-compression)
|
||||
- [Using deflate compression:](#user-content-using-deflate-compression)
|
||||
- [Using compressed responses:](#user-content-using-compressed-responses)
|
||||
- [Proxy](#proxy)
|
||||
- [Debugging requests](#debug)
|
||||
- [Getting raw Request & Response](#getting-raw-request--response)
|
||||
- [TODO:](#user-content-todo)
|
||||
|
||||
|
||||
|
||||
Why GoReq?
|
||||
==========
|
||||
|
||||
Go has very nice native libraries that allows you to do lots of cool things. But sometimes those libraries are too low level, which means that to do a simple thing, like an HTTP Request, it takes some time. And if you want to do something as simple as adding a timeout to a request, you will end up writing several lines of code.
|
||||
|
||||
This is why we think GoReq is useful. Because you can do all your HTTP requests in a very simple and comprehensive way, while enabling you to do more advanced stuff by giving you access to the native API.
|
||||
|
||||
How do I install it?
|
||||
====================
|
||||
|
||||
```bash
|
||||
go get github.com/franela/goreq
|
||||
```
|
||||
|
||||
What can I do with it?
|
||||
======================
|
||||
|
||||
## Making requests with different methods
|
||||
|
||||
#### GET
|
||||
```go
|
||||
res, err := goreq.Request{ Uri: "http://www.google.com" }.Do()
|
||||
```
|
||||
|
||||
GoReq default method is GET.
|
||||
|
||||
You can also set value to GET method easily
|
||||
|
||||
```go
|
||||
type Item struct {
|
||||
Limit int
|
||||
Skip int
|
||||
Fields string
|
||||
}
|
||||
|
||||
item := Item {
|
||||
Limit: 3,
|
||||
Skip: 5,
|
||||
Fields: "Value",
|
||||
}
|
||||
|
||||
res, err := goreq.Request{
|
||||
Uri: "http://localhost:3000/",
|
||||
QueryString: item,
|
||||
}.Do()
|
||||
```
|
||||
The sample above will send `http://localhost:3000/?limit=3&skip=5&fields=Value`
|
||||
|
||||
Alternatively the `url` tag can be used in struct fields to customize encoding properties
|
||||
|
||||
```go
|
||||
type Item struct {
|
||||
TheLimit int `url:"the_limit"`
|
||||
TheSkip string `url:"the_skip,omitempty"`
|
||||
TheFields string `url:"-"`
|
||||
}
|
||||
|
||||
item := Item {
|
||||
TheLimit: 3,
|
||||
TheSkip: "",
|
||||
TheFields: "Value",
|
||||
}
|
||||
|
||||
res, err := goreq.Request{
|
||||
Uri: "http://localhost:3000/",
|
||||
QueryString: item,
|
||||
}.Do()
|
||||
```
|
||||
The sample above will send `http://localhost:3000/?the_limit=3`
|
||||
|
||||
|
||||
QueryString also support url.Values
|
||||
|
||||
```go
|
||||
item := url.Values{}
|
||||
item.Set("Limit", 3)
|
||||
item.Add("Field", "somefield")
|
||||
item.Add("Field", "someotherfield")
|
||||
|
||||
res, err := goreq.Request{
|
||||
Uri: "http://localhost:3000/",
|
||||
QueryString: item,
|
||||
}.Do()
|
||||
```
|
||||
|
||||
The sample above will send `http://localhost:3000/?limit=3&field=somefield&field=someotherfield`
|
||||
|
||||
### Tags
|
||||
|
||||
Struct field `url` tag is mainly used as the request parameter name.
|
||||
Tags can be comma separated multiple values, 1st value is for naming and rest has special meanings.
|
||||
|
||||
- special tag for 1st value
|
||||
- `-`: value is ignored if set this
|
||||
|
||||
- special tag for rest 2nd value
|
||||
- `omitempty`: zero-value is ignored if set this
|
||||
- `squash`: the fields of embedded struct is used for parameter
|
||||
|
||||
#### Tag Examples
|
||||
|
||||
```go
|
||||
type Place struct {
|
||||
Country string `url:"country"`
|
||||
City string `url:"city"`
|
||||
ZipCode string `url:"zipcode,omitempty"`
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Place `url:",squash"`
|
||||
|
||||
FirstName string `url:"first_name"`
|
||||
LastName string `url:"last_name"`
|
||||
Age string `url:"age,omitempty"`
|
||||
Password string `url:"-"`
|
||||
}
|
||||
|
||||
johnbull := Person{
|
||||
Place: Place{ // squash the embedded struct value
|
||||
Country: "UK",
|
||||
City: "London",
|
||||
ZipCode: "SW1",
|
||||
},
|
||||
FirstName: "John",
|
||||
LastName: "Doe",
|
||||
Age: "35",
|
||||
Password: "my-secret", // ignored for parameter
|
||||
}
|
||||
|
||||
goreq.Request{
|
||||
Uri: "http://localhost/",
|
||||
QueryString: johnbull,
|
||||
}.Do()
|
||||
// => `http://localhost/?first_name=John&last_name=Doe&age=35&country=UK&city=London&zip_code=SW1`
|
||||
|
||||
|
||||
// age and zipcode will be ignored because of `omitempty`
|
||||
// but firstname isn't.
|
||||
samurai := Person{
|
||||
Place: Place{ // squash the embedded struct value
|
||||
Country: "Japan",
|
||||
City: "Tokyo",
|
||||
},
|
||||
LastName: "Yagyu",
|
||||
}
|
||||
|
||||
goreq.Request{
|
||||
Uri: "http://localhost/",
|
||||
QueryString: samurai,
|
||||
}.Do()
|
||||
// => `http://localhost/?first_name=&last_name=yagyu&country=Japan&city=Tokyo`
|
||||
```
|
||||
|
||||
|
||||
#### POST
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{ Method: "POST", Uri: "http://www.google.com" }.Do()
|
||||
```
|
||||
|
||||
## Sending payloads in the Body
|
||||
|
||||
You can send ```string```, ```Reader``` or ```interface{}``` in the body. The first two will be sent as text. The last one will be marshalled to JSON, if possible.
|
||||
|
||||
```go
|
||||
type Item struct {
|
||||
Id int
|
||||
Name string
|
||||
}
|
||||
|
||||
item := Item{ Id: 1111, Name: "foobar" }
|
||||
|
||||
res, err := goreq.Request{
|
||||
Method: "POST",
|
||||
Uri: "http://www.google.com",
|
||||
Body: item,
|
||||
}.Do()
|
||||
```
|
||||
|
||||
## Specifiying request headers
|
||||
|
||||
We think that most of the times the request headers that you use are: ```Host```, ```Content-Type```, ```Accept``` and ```User-Agent```. This is why we decided to make it very easy to set these headers.
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Uri: "http://www.google.com",
|
||||
Host: "foobar.com",
|
||||
Accept: "application/json",
|
||||
ContentType: "application/json",
|
||||
UserAgent: "goreq",
|
||||
}.Do()
|
||||
```
|
||||
|
||||
But sometimes you need to set other headers. You can still do it.
|
||||
|
||||
```go
|
||||
req := goreq.Request{ Uri: "http://www.google.com" }
|
||||
|
||||
req.AddHeader("X-Custom", "somevalue")
|
||||
|
||||
req.Do()
|
||||
```
|
||||
|
||||
Alternatively you can use the `WithHeader` function to keep the syntax short
|
||||
|
||||
```go
|
||||
res, err = goreq.Request{ Uri: "http://www.google.com" }.WithHeader("X-Custom", "somevalue").Do()
|
||||
```
|
||||
|
||||
## Cookie support
|
||||
|
||||
Cookies can be either set at the request level by sending a [CookieJar](http://golang.org/pkg/net/http/cookiejar/) in the `CookieJar` request field
|
||||
or you can use goreq's one-liner WithCookie method as shown below
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Uri: "http://www.google.com",
|
||||
}.
|
||||
WithCookie(&http.Cookie{Name: "c1", Value: "v1"}).
|
||||
Do()
|
||||
```
|
||||
|
||||
## Setting timeouts
|
||||
|
||||
GoReq supports 2 kind of timeouts. A general connection timeout and a request specific one. By default the connection timeout is of 1 second. There is no default for request timeout, which means it will wait forever.
|
||||
|
||||
You can change the connection timeout doing:
|
||||
|
||||
```go
|
||||
goreq.SetConnectTimeout(100 * time.Millisecond)
|
||||
```
|
||||
|
||||
And specify the request timeout doing:
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Uri: "http://www.google.com",
|
||||
Timeout: 500 * time.Millisecond,
|
||||
}.Do()
|
||||
```
|
||||
|
||||
## Using the Response and Error
|
||||
|
||||
GoReq will always return 2 values: a ```Response``` and an ```Error```.
|
||||
If ```Error``` is not ```nil``` it means that an error happened while doing the request and you shouldn't use the ```Response``` in any way.
|
||||
You can check what happened by getting the error message:
|
||||
|
||||
```go
|
||||
fmt.Println(err.Error())
|
||||
```
|
||||
And to make it easy to know if it was a timeout error, you can ask the error or return it:
|
||||
|
||||
```go
|
||||
if serr, ok := err.(*goreq.Error); ok {
|
||||
if serr.Timeout() {
|
||||
...
|
||||
}
|
||||
}
|
||||
return err
|
||||
```
|
||||
|
||||
If you don't get an error, you can safely use the ```Response```.
|
||||
|
||||
```go
|
||||
res.Uri // return final URL location of the response (fulfilled after redirect was made)
|
||||
res.StatusCode // return the status code of the response
|
||||
res.Body // gives you access to the body
|
||||
res.Body.ToString() // will return the body as a string
|
||||
res.Header.Get("Content-Type") // gives you access to all the response headers
|
||||
```
|
||||
Remember that you should **always** close `res.Body` if it's not `nil`
|
||||
|
||||
## Receiving JSON
|
||||
|
||||
GoReq will help you to receive and unmarshal JSON.
|
||||
|
||||
```go
|
||||
type Item struct {
|
||||
Id int
|
||||
Name string
|
||||
}
|
||||
|
||||
var item Item
|
||||
|
||||
res.Body.FromJsonTo(&item)
|
||||
```
|
||||
|
||||
## Sending/Receiving Compressed Payloads
|
||||
GoReq supports gzip, deflate and zlib compression of requests' body and transparent decompression of responses provided they have a correct `Content-Encoding` header.
|
||||
|
||||
##### Using gzip compression:
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Method: "POST",
|
||||
Uri: "http://www.google.com",
|
||||
Body: item,
|
||||
Compression: goreq.Gzip(),
|
||||
}.Do()
|
||||
```
|
||||
##### Using deflate/zlib compression:
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Method: "POST",
|
||||
Uri: "http://www.google.com",
|
||||
Body: item,
|
||||
Compression: goreq.Deflate(),
|
||||
}.Do()
|
||||
```
|
||||
##### Using compressed responses:
|
||||
If servers replies a correct and matching `Content-Encoding` header (gzip requires `Content-Encoding: gzip` and deflate `Content-Encoding: deflate`) goreq transparently decompresses the response so the previous example should always work:
|
||||
```go
|
||||
type Item struct {
|
||||
Id int
|
||||
Name string
|
||||
}
|
||||
res, err := goreq.Request{
|
||||
Method: "POST",
|
||||
Uri: "http://www.google.com",
|
||||
Body: item,
|
||||
Compression: goreq.Gzip(),
|
||||
}.Do()
|
||||
var item Item
|
||||
res.Body.FromJsonTo(&item)
|
||||
```
|
||||
If no `Content-Encoding` header is replied by the server GoReq will return the crude response.
|
||||
|
||||
## Proxy
|
||||
If you need to use a proxy for your requests GoReq supports the standard `http_proxy` env variable as well as manually setting the proxy for each request
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Method: "GET",
|
||||
Proxy: "http://myproxy:myproxyport",
|
||||
Uri: "http://www.google.com",
|
||||
}.Do()
|
||||
```
|
||||
|
||||
### Proxy basic auth is also supported
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Method: "GET",
|
||||
Proxy: "http://user:pass@myproxy:myproxyport",
|
||||
Uri: "http://www.google.com",
|
||||
}.Do()
|
||||
```
|
||||
|
||||
## Debug
|
||||
If you need to debug your http requests, it can print the http request detail.
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Method: "GET",
|
||||
Uri: "http://www.google.com",
|
||||
Compression: goreq.Gzip(),
|
||||
ShowDebug: true,
|
||||
}.Do()
|
||||
fmt.Println(res, err)
|
||||
```
|
||||
|
||||
and it will print the log:
|
||||
```
|
||||
GET / HTTP/1.1
|
||||
Host: www.google.com
|
||||
Accept:
|
||||
Accept-Encoding: gzip
|
||||
Content-Encoding: gzip
|
||||
Content-Type:
|
||||
```
|
||||
|
||||
|
||||
### Getting raw Request & Response
|
||||
|
||||
To get the Request:
|
||||
|
||||
```go
|
||||
req := goreq.Request{
|
||||
Host: "foobar.com",
|
||||
}
|
||||
|
||||
//req.Request will return a new instance of an http.Request so you can safely use it for something else
|
||||
request, _ := req.NewRequest()
|
||||
|
||||
```
|
||||
|
||||
|
||||
To get the Response:
|
||||
|
||||
```go
|
||||
res, err := goreq.Request{
|
||||
Method: "GET",
|
||||
Uri: "http://www.google.com",
|
||||
Compression: goreq.Gzip(),
|
||||
ShowDebug: true,
|
||||
}.Do()
|
||||
|
||||
// res.Response will contain the original http.Response structure
|
||||
fmt.Println(res.Response, err)
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
TODO:
|
||||
-----
|
||||
|
||||
We do have a couple of [issues](https://github.com/franela/goreq/issues) pending we'll be addressing soon. But feel free to
|
||||
contribute and send us PRs (with tests please :smile:).
|
@ -0,0 +1,491 @@
|
||||
package goreq
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type itimeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
type Request struct {
|
||||
headers []headerTuple
|
||||
cookies []*http.Cookie
|
||||
Method string
|
||||
Uri string
|
||||
Body interface{}
|
||||
QueryString interface{}
|
||||
Timeout time.Duration
|
||||
ContentType string
|
||||
Accept string
|
||||
Host string
|
||||
UserAgent string
|
||||
Insecure bool
|
||||
MaxRedirects int
|
||||
RedirectHeaders bool
|
||||
Proxy string
|
||||
Compression *compression
|
||||
BasicAuthUsername string
|
||||
BasicAuthPassword string
|
||||
CookieJar http.CookieJar
|
||||
ShowDebug bool
|
||||
OnBeforeRequest func(goreq *Request, httpreq *http.Request)
|
||||
}
|
||||
|
||||
type compression struct {
|
||||
writer func(buffer io.Writer) (io.WriteCloser, error)
|
||||
reader func(buffer io.Reader) (io.ReadCloser, error)
|
||||
ContentEncoding string
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
*http.Response
|
||||
Uri string
|
||||
Body *Body
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (r Response) CancelRequest() {
|
||||
cancelRequest(DefaultTransport, r.req)
|
||||
|
||||
}
|
||||
|
||||
func cancelRequest(transport interface{}, r *http.Request) {
|
||||
if tp, ok := transport.(transportRequestCanceler); ok {
|
||||
tp.CancelRequest(r)
|
||||
}
|
||||
}
|
||||
|
||||
type headerTuple struct {
|
||||
name string
|
||||
value string
|
||||
}
|
||||
|
||||
type Body struct {
|
||||
reader io.ReadCloser
|
||||
compressedReader io.ReadCloser
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
timeout bool
|
||||
Err error
|
||||
}
|
||||
|
||||
type transportRequestCanceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
|
||||
func (e *Error) Timeout() bool {
|
||||
return e.timeout
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (b *Body) Read(p []byte) (int, error) {
|
||||
if b.compressedReader != nil {
|
||||
return b.compressedReader.Read(p)
|
||||
}
|
||||
return b.reader.Read(p)
|
||||
}
|
||||
|
||||
func (b *Body) Close() error {
|
||||
err := b.reader.Close()
|
||||
if b.compressedReader != nil {
|
||||
return b.compressedReader.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Body) FromJsonTo(o interface{}) error {
|
||||
return json.NewDecoder(b).Decode(o)
|
||||
}
|
||||
|
||||
func (b *Body) ToString() (string, error) {
|
||||
body, err := ioutil.ReadAll(b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
func Gzip() *compression {
|
||||
reader := func(buffer io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(buffer)
|
||||
}
|
||||
writer := func(buffer io.Writer) (io.WriteCloser, error) {
|
||||
return gzip.NewWriter(buffer), nil
|
||||
}
|
||||
return &compression{writer: writer, reader: reader, ContentEncoding: "gzip"}
|
||||
}
|
||||
|
||||
func Deflate() *compression {
|
||||
reader := func(buffer io.Reader) (io.ReadCloser, error) {
|
||||
return zlib.NewReader(buffer)
|
||||
}
|
||||
writer := func(buffer io.Writer) (io.WriteCloser, error) {
|
||||
return zlib.NewWriter(buffer), nil
|
||||
}
|
||||
return &compression{writer: writer, reader: reader, ContentEncoding: "deflate"}
|
||||
}
|
||||
|
||||
func Zlib() *compression {
|
||||
return Deflate()
|
||||
}
|
||||
|
||||
func paramParse(query interface{}) (string, error) {
|
||||
switch query.(type) {
|
||||
case url.Values:
|
||||
return query.(url.Values).Encode(), nil
|
||||
case *url.Values:
|
||||
return query.(*url.Values).Encode(), nil
|
||||
default:
|
||||
var v = &url.Values{}
|
||||
err := paramParseStruct(v, query)
|
||||
return v.Encode(), err
|
||||
}
|
||||
}
|
||||
|
||||
func paramParseStruct(v *url.Values, query interface{}) error {
|
||||
var (
|
||||
s = reflect.ValueOf(query)
|
||||
t = reflect.TypeOf(query)
|
||||
)
|
||||
for t.Kind() == reflect.Ptr || t.Kind() == reflect.Interface {
|
||||
s = s.Elem()
|
||||
t = s.Type()
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Struct {
|
||||
return errors.New("Can not parse QueryString.")
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
var name string
|
||||
|
||||
field := s.Field(i)
|
||||
typeField := t.Field(i)
|
||||
|
||||
if !field.CanInterface() {
|
||||
continue
|
||||
}
|
||||
|
||||
urlTag := typeField.Tag.Get("url")
|
||||
if urlTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
name, opts := parseTag(urlTag)
|
||||
|
||||
var omitEmpty, squash bool
|
||||
omitEmpty = opts.Contains("omitempty")
|
||||
squash = opts.Contains("squash")
|
||||
|
||||
if squash {
|
||||
err := paramParseStruct(v, field.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if urlTag == "" {
|
||||
name = strings.ToLower(typeField.Name)
|
||||
}
|
||||
|
||||
if val := fmt.Sprintf("%v", field.Interface()); !(omitEmpty && len(val) == 0) {
|
||||
v.Add(name, val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareRequestBody(b interface{}) (io.Reader, error) {
|
||||
switch b.(type) {
|
||||
case string:
|
||||
// treat is as text
|
||||
return strings.NewReader(b.(string)), nil
|
||||
case io.Reader:
|
||||
// treat is as text
|
||||
return b.(io.Reader), nil
|
||||
case []byte:
|
||||
//treat as byte array
|
||||
return bytes.NewReader(b.([]byte)), nil
|
||||
case nil:
|
||||
return nil, nil
|
||||
default:
|
||||
// try to jsonify it
|
||||
j, err := json.Marshal(b)
|
||||
if err == nil {
|
||||
return bytes.NewReader(j), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var DefaultDialer = &net.Dialer{Timeout: 1000 * time.Millisecond}
|
||||
var DefaultTransport http.RoundTripper = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyFromEnvironment}
|
||||
var DefaultClient = &http.Client{Transport: DefaultTransport}
|
||||
|
||||
var proxyTransport http.RoundTripper
|
||||
var proxyClient *http.Client
|
||||
|
||||
func SetConnectTimeout(duration time.Duration) {
|
||||
DefaultDialer.Timeout = duration
|
||||
}
|
||||
|
||||
func (r *Request) AddHeader(name string, value string) {
|
||||
if r.headers == nil {
|
||||
r.headers = []headerTuple{}
|
||||
}
|
||||
r.headers = append(r.headers, headerTuple{name: name, value: value})
|
||||
}
|
||||
|
||||
func (r Request) WithHeader(name string, value string) Request {
|
||||
r.AddHeader(name, value)
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Request) AddCookie(c *http.Cookie) {
|
||||
r.cookies = append(r.cookies, c)
|
||||
}
|
||||
|
||||
func (r Request) WithCookie(c *http.Cookie) Request {
|
||||
r.AddCookie(c)
|
||||
return r
|
||||
|
||||
}
|
||||
|
||||
func (r Request) Do() (*Response, error) {
|
||||
var client = DefaultClient
|
||||
var transport = DefaultTransport
|
||||
var resUri string
|
||||
var redirectFailed bool
|
||||
|
||||
r.Method = valueOrDefault(r.Method, "GET")
|
||||
|
||||
// use a client with a cookie jar if necessary. We create a new client not
|
||||
// to modify the default one.
|
||||
if r.CookieJar != nil {
|
||||
client = &http.Client{
|
||||
Transport: transport,
|
||||
Jar: r.CookieJar,
|
||||
}
|
||||
}
|
||||
|
||||
if r.Proxy != "" {
|
||||
proxyUrl, err := url.Parse(r.Proxy)
|
||||
if err != nil {
|
||||
// proxy address is in a wrong format
|
||||
return nil, &Error{Err: err}
|
||||
}
|
||||
|
||||
//If jar is specified new client needs to be built
|
||||
if proxyTransport == nil || client.Jar != nil {
|
||||
proxyTransport = &http.Transport{Dial: DefaultDialer.Dial, Proxy: http.ProxyURL(proxyUrl)}
|
||||
proxyClient = &http.Client{Transport: proxyTransport, Jar: client.Jar}
|
||||
} else if proxyTransport, ok := proxyTransport.(*http.Transport); ok {
|
||||
proxyTransport.Proxy = http.ProxyURL(proxyUrl)
|
||||
}
|
||||
transport = proxyTransport
|
||||
client = proxyClient
|
||||
}
|
||||
|
||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
|
||||
if len(via) > r.MaxRedirects {
|
||||
redirectFailed = true
|
||||
return errors.New("Error redirecting. MaxRedirects reached")
|
||||
}
|
||||
|
||||
resUri = req.URL.String()
|
||||
|
||||
//By default Golang will not redirect request headers
|
||||
// https://code.google.com/p/go/issues/detail?id=4800&q=request%20header
|
||||
if r.RedirectHeaders {
|
||||
for key, val := range via[0].Header {
|
||||
req.Header[key] = val
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if transport, ok := transport.(*http.Transport); ok {
|
||||
if r.Insecure {
|
||||
if transport.TLSClientConfig != nil {
|
||||
transport.TLSClientConfig.InsecureSkipVerify = true
|
||||
} else {
|
||||
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
} else if transport.TLSClientConfig != nil {
|
||||
// the default TLS client (when transport.TLSClientConfig==nil) is
|
||||
// already set to verify, so do nothing in that case
|
||||
transport.TLSClientConfig.InsecureSkipVerify = false
|
||||
}
|
||||
}
|
||||
|
||||
req, err := r.NewRequest()
|
||||
|
||||
if err != nil {
|
||||
// we couldn't parse the URL.
|
||||
return nil, &Error{Err: err}
|
||||
}
|
||||
|
||||
timeout := false
|
||||
if r.Timeout > 0 {
|
||||
client.Timeout = r.Timeout
|
||||
}
|
||||
|
||||
if r.ShowDebug {
|
||||
dump, err := httputil.DumpRequest(req, true)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
log.Println(string(dump))
|
||||
}
|
||||
|
||||
if r.OnBeforeRequest != nil {
|
||||
r.OnBeforeRequest(&r, req)
|
||||
}
|
||||
res, err := client.Do(req)
|
||||
|
||||
if err != nil {
|
||||
if !timeout {
|
||||
if t, ok := err.(itimeout); ok {
|
||||
timeout = t.Timeout()
|
||||
}
|
||||
if ue, ok := err.(*url.Error); ok {
|
||||
if t, ok := ue.Err.(itimeout); ok {
|
||||
timeout = t.Timeout()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var response *Response
|
||||
//If redirect fails we still want to return response data
|
||||
if redirectFailed {
|
||||
if res != nil {
|
||||
response = &Response{res, resUri, &Body{reader: res.Body}, req}
|
||||
} else {
|
||||
response = &Response{res, resUri, nil, req}
|
||||
}
|
||||
}
|
||||
|
||||
//If redirect fails and we haven't set a redirect count we shouldn't return an error
|
||||
if redirectFailed && r.MaxRedirects == 0 {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
return response, &Error{timeout: timeout, Err: err}
|
||||
}
|
||||
|
||||
if r.Compression != nil && strings.Contains(res.Header.Get("Content-Encoding"), r.Compression.ContentEncoding) {
|
||||
compressedReader, err := r.Compression.reader(res.Body)
|
||||
if err != nil {
|
||||
return nil, &Error{Err: err}
|
||||
}
|
||||
return &Response{res, resUri, &Body{reader: res.Body, compressedReader: compressedReader}, req}, nil
|
||||
}
|
||||
|
||||
return &Response{res, resUri, &Body{reader: res.Body}, req}, nil
|
||||
}
|
||||
|
||||
func (r Request) addHeaders(headersMap http.Header) {
|
||||
if len(r.UserAgent) > 0 {
|
||||
headersMap.Add("User-Agent", r.UserAgent)
|
||||
}
|
||||
if r.Accept != "" {
|
||||
headersMap.Add("Accept", r.Accept)
|
||||
}
|
||||
if r.ContentType != "" {
|
||||
headersMap.Add("Content-Type", r.ContentType)
|
||||
}
|
||||
}
|
||||
|
||||
func (r Request) NewRequest() (*http.Request, error) {
|
||||
|
||||
b, e := prepareRequestBody(r.Body)
|
||||
if e != nil {
|
||||
// there was a problem marshaling the body
|
||||
return nil, &Error{Err: e}
|
||||
}
|
||||
|
||||
if r.QueryString != nil {
|
||||
param, e := paramParse(r.QueryString)
|
||||
if e != nil {
|
||||
return nil, &Error{Err: e}
|
||||
}
|
||||
r.Uri = r.Uri + "?" + param
|
||||
}
|
||||
|
||||
var bodyReader io.Reader
|
||||
if b != nil && r.Compression != nil {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
readBuffer := bufio.NewReader(b)
|
||||
writer, err := r.Compression.writer(buffer)
|
||||
if err != nil {
|
||||
return nil, &Error{Err: err}
|
||||
}
|
||||
_, e = readBuffer.WriteTo(writer)
|
||||
writer.Close()
|
||||
if e != nil {
|
||||
return nil, &Error{Err: e}
|
||||
}
|
||||
bodyReader = buffer
|
||||
} else {
|
||||
bodyReader = b
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(r.Method, r.Uri, bodyReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add headers to the request
|
||||
req.Host = r.Host
|
||||
|
||||
r.addHeaders(req.Header)
|
||||
if r.Compression != nil {
|
||||
req.Header.Add("Content-Encoding", r.Compression.ContentEncoding)
|
||||
req.Header.Add("Accept-Encoding", r.Compression.ContentEncoding)
|
||||
}
|
||||
if r.headers != nil {
|
||||
for _, header := range r.headers {
|
||||
req.Header.Add(header.name, header.value)
|
||||
}
|
||||
}
|
||||
|
||||
//use basic auth if required
|
||||
if r.BasicAuthUsername != "" {
|
||||
req.SetBasicAuth(r.BasicAuthUsername, r.BasicAuthPassword)
|
||||
}
|
||||
|
||||
for _, c := range r.cookies {
|
||||
req.AddCookie(c)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Return value if nonempty, def otherwise.
|
||||
func valueOrDefault(value, def string) string {
|
||||
if value != "" {
|
||||
return value
|
||||
}
|
||||
return def
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,64 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found here: https://github.com/golang/go/blob/master/LICENSE
|
||||
|
||||
package goreq
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014, 2015 Jason E. Aten, Ph.D.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -0,0 +1,34 @@
|
||||
rbuf: a circular ring buffer in Golang
|
||||
====
|
||||
|
||||
|
||||
type FixedSizeRingBuf struct:
|
||||
|
||||
* is a fixed-size circular ring buffer. Yes, just what is says.
|
||||
This structure is only for bytes, as it was written to
|
||||
optimize I/O, but could be easily adapted to any other type.
|
||||
|
||||
* We keep a pair of ping/pong buffers so that we can linearize
|
||||
the circular buffer into a contiguous slice if need be.
|
||||
|
||||
For efficiency, a FixedSizeRingBuf may be vastly preferred to
|
||||
a bytes.Buffer. The ReadWithoutAdvance(), Advance(), and Adopt()
|
||||
methods are all non-standard methods written for speed.
|
||||
|
||||
For an I/O heavy application, I have replaced bytes.Buffer with
|
||||
FixedSizeRingBuf and seen memory consumption go from 8GB to 25MB.
|
||||
Yes, that is a 300x reduction in memory footprint. Everything ran
|
||||
faster too.
|
||||
|
||||
Note that Bytes(), while inescapable at times, is expensive: avoid
|
||||
it if possible. If all you need is len(Bytes()), then it is better
|
||||
to use the FixedSizeRingBuf.Readable member directly.
|
||||
Bytes() is expensive because it may copy the back and then
|
||||
the front of a wrapped buffer A[Use] into A[1-Use] in order to
|
||||
get a contiguous, unwrapped, slice. If possible use ContigLen()
|
||||
first to get the size that can be read without copying, Read() that
|
||||
amount, and then Read() a second time -- to avoid the copy.
|
||||
|
||||
copyright (c) 2014, Jason E. Aten
|
||||
|
||||
license: MIT
|
@ -0,0 +1,481 @@
|
||||
package rbuf
|
||||
|
||||
// AtomicFixedSizeRingBuf: Synchronized version of FixedSizeRingBuf,
|
||||
// safe for concurrent access.
|
||||
//
|
||||
// copyright (c) 2014, Jason E. Aten
|
||||
// license: MIT
|
||||
//
|
||||
// Some text from the Golang standard library doc is adapted and
|
||||
// reproduced in fragments below to document the expected behaviors
|
||||
// of the interface functions Read()/Write()/ReadFrom()/WriteTo() that
|
||||
// are implemented here. Those descriptions (see
|
||||
// http://golang.org/pkg/io/#Reader for example) are
|
||||
// copyright 2010 The Go Authors.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AtomicFixedSizeRingBuf: see FixedSizeRingBuf for the full
|
||||
// details; this is the same, just safe for current access
|
||||
// (and thus paying the price of synchronization on each call
|
||||
// as well.)
|
||||
//
|
||||
type AtomicFixedSizeRingBuf struct {
|
||||
A [2][]byte // a pair of ping/pong buffers. Only one is active.
|
||||
Use int // which A buffer is in active use, 0 or 1
|
||||
N int // MaxViewInBytes, the size of A[0] and A[1] in bytes.
|
||||
Beg int // start of data in A[Use]
|
||||
readable int // number of bytes available to read in A[Use]
|
||||
tex sync.Mutex
|
||||
}
|
||||
|
||||
// Readable() returns the number of bytes available for reading.
|
||||
func (b *AtomicFixedSizeRingBuf) Readable() int {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
return b.readable
|
||||
}
|
||||
|
||||
// get the length of the largest read that we can provide to a contiguous slice
|
||||
// without an extra linearizing copy of all bytes internally.
|
||||
func (b *AtomicFixedSizeRingBuf) ContigLen() int {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
extent := b.Beg + b.readable
|
||||
firstContigLen := intMin2(extent, b.N) - b.Beg
|
||||
return firstContigLen
|
||||
}
|
||||
|
||||
// constructor. NewAtomicFixedSizeRingBuf will allocate internally
|
||||
// two buffers of size maxViewInBytes.
|
||||
func NewAtomicFixedSizeRingBuf(maxViewInBytes int) *AtomicFixedSizeRingBuf {
|
||||
n := maxViewInBytes
|
||||
r := &AtomicFixedSizeRingBuf{
|
||||
Use: 0, // 0 or 1, whichever is actually in use at the moment.
|
||||
// If we are asked for Bytes() and we wrap, linearize into the other.
|
||||
|
||||
N: n,
|
||||
Beg: 0,
|
||||
readable: 0,
|
||||
}
|
||||
r.A[0] = make([]byte, n, n)
|
||||
r.A[1] = make([]byte, n, n)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Bytes() returns a slice of the contents of the unread portion of the buffer.
|
||||
//
|
||||
// To avoid copying, see the companion BytesTwo() call.
|
||||
//
|
||||
// Unlike the standard library Bytes() method (on bytes.Buffer for example),
|
||||
// the result of the AtomicFixedSizeRingBuf::Bytes(true) is a completely new
|
||||
// returned slice, so modifying that slice will have no impact on the contents
|
||||
// of the internal ring.
|
||||
//
|
||||
// Bytes(false) acts like the standard library bytes.Buffer::Bytes() call,
|
||||
// in that it returns a slice which is backed by the buffer itself (so
|
||||
// no copy is involved).
|
||||
//
|
||||
// The largest slice Bytes ever returns is bounded above by the maxViewInBytes
|
||||
// value used when calling NewAtomicFixedSizeRingBuf().
|
||||
//
|
||||
// Possible side-effect: may modify b.Use, the buffer in use.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Bytes(makeCopy bool) []byte {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
extent := b.Beg + b.readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other
|
||||
return b.A[b.Use][b.Beg:(b.Beg + b.readable)]
|
||||
}
|
||||
|
||||
// wrap into the other buffer
|
||||
src := b.Use
|
||||
dest := 1 - b.Use
|
||||
|
||||
n := copy(b.A[dest], b.A[src][b.Beg:])
|
||||
n += copy(b.A[dest][n:], b.A[src][0:(extent%b.N)])
|
||||
|
||||
b.Use = dest
|
||||
b.Beg = 0
|
||||
|
||||
if makeCopy {
|
||||
ret := make([]byte, n)
|
||||
copy(ret, b.A[b.Use][:n])
|
||||
return ret
|
||||
}
|
||||
return b.A[b.Use][:n]
|
||||
}
|
||||
|
||||
// TwoBuffers: the return value of BytesTwo(). TwoBuffers
|
||||
// holds two slices to the contents of the readable
|
||||
// area of the internal buffer. The slices contents are logically
|
||||
// ordered First then Second, but the Second will actually
|
||||
// be physically before the First. Either or both of
|
||||
// First and Second may be empty slices.
|
||||
type TwoBuffers struct {
|
||||
First []byte // the first part of the contents
|
||||
Second []byte // the second part of the contents
|
||||
}
|
||||
|
||||
// BytesTwo returns all readable bytes, but in two separate slices,
|
||||
// to avoid copying. The two slices are from the same buffer, but
|
||||
// are not contiguous. Either or both may be empty slices.
|
||||
func (b *AtomicFixedSizeRingBuf) BytesTwo() TwoBuffers {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
return b.unatomic_BytesTwo()
|
||||
}
|
||||
|
||||
func (b *AtomicFixedSizeRingBuf) unatomic_BytesTwo() TwoBuffers {
|
||||
extent := b.Beg + b.readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other.
|
||||
// Let second stay an empty slice.
|
||||
return TwoBuffers{First: b.A[b.Use][b.Beg:(b.Beg + b.readable)], Second: []byte{}}
|
||||
}
|
||||
|
||||
return TwoBuffers{First: b.A[b.Use][b.Beg:(b.Beg + b.readable)], Second: b.A[b.Use][0:(extent % b.N)]}
|
||||
}
|
||||
|
||||
// Purpose of BytesTwo() and AdvanceBytesTwo(): avoid extra copying of data.
|
||||
//
|
||||
// AdvanceBytesTwo() takes a TwoBuffers as input, this must have been
|
||||
// from a previous call to BytesTwo(); no intervening calls to Bytes()
|
||||
// or Adopt() are allowed (or any other future routine or client data
|
||||
// access that changes the internal data location or contents) can have
|
||||
// been made.
|
||||
//
|
||||
// After sanity checks, AdvanceBytesTwo() advances the internal buffer, effectively
|
||||
// calling Advance( len(tb.First) + len(tb.Second)).
|
||||
//
|
||||
// If intervening-calls that changed the buffers (other than appending
|
||||
// data to the buffer) are detected, we will panic as a safety/sanity/
|
||||
// aid-to-debugging measure.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) AdvanceBytesTwo(tb TwoBuffers) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
tblen := len(tb.First) + len(tb.Second)
|
||||
|
||||
if tblen == 0 {
|
||||
return // nothing to do
|
||||
}
|
||||
|
||||
// sanity check: insure we have re-located in the meantime
|
||||
if tblen > b.readable {
|
||||
panic(fmt.Sprintf("tblen was %d, and this was greater than b.readerable = %d. Usage error detected and data loss may have occurred (available data appears to have shrunken out from under us!).", tblen, b.readable))
|
||||
}
|
||||
|
||||
tbnow := b.unatomic_BytesTwo()
|
||||
|
||||
if len(tb.First) > 0 {
|
||||
if tb.First[0] != tbnow.First[0] {
|
||||
panic(fmt.Sprintf("slice contents of First have changed out from under us!: '%s' vs '%s'", string(tb.First), string(tbnow.First)))
|
||||
}
|
||||
}
|
||||
if len(tb.Second) > 0 {
|
||||
if len(tb.First) > len(tbnow.First) {
|
||||
panic(fmt.Sprintf("slice contents of Second have changed out from under us! tbnow.First length(%d) is less than tb.First(%d.", len(tbnow.First), len(tb.First)))
|
||||
}
|
||||
if len(tbnow.Second) == 0 {
|
||||
panic(fmt.Sprintf("slice contents of Second have changed out from under us! tbnow.Second is empty, but tb.Second was not"))
|
||||
}
|
||||
if tb.Second[0] != tbnow.Second[0] {
|
||||
panic(fmt.Sprintf("slice contents of Second have changed out from under us!: '%s' vs '%s'", string(tb.Second), string(tbnow.Second)))
|
||||
}
|
||||
}
|
||||
|
||||
b.unatomic_advance(tblen)
|
||||
}
|
||||
|
||||
// Read():
|
||||
//
|
||||
// From bytes.Buffer.Read(): Read reads the next len(p) bytes
|
||||
// from the buffer or until the buffer is drained. The return
|
||||
// value n is the number of bytes read. If the buffer has no data
|
||||
// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil.
|
||||
//
|
||||
// from the description of the Reader interface,
|
||||
// http://golang.org/pkg/io/#Reader
|
||||
//
|
||||
/*
|
||||
Reader is the interface that wraps the basic Read method.
|
||||
|
||||
Read reads up to len(p) bytes into p. It returns the number
|
||||
of bytes read (0 <= n <= len(p)) and any error encountered.
|
||||
Even if Read returns n < len(p), it may use all of p as scratch
|
||||
space during the call. If some data is available but not
|
||||
len(p) bytes, Read conventionally returns what is available
|
||||
instead of waiting for more.
|
||||
|
||||
When Read encounters an error or end-of-file condition after
|
||||
successfully reading n > 0 bytes, it returns the number of bytes
|
||||
read. It may return the (non-nil) error from the same call or
|
||||
return the error (and n == 0) from a subsequent call. An instance
|
||||
of this general case is that a Reader returning a non-zero number
|
||||
of bytes at the end of the input stream may return
|
||||
either err == EOF or err == nil. The next Read should
|
||||
return 0, EOF regardless.
|
||||
|
||||
Callers should always process the n > 0 bytes returned before
|
||||
considering the error err. Doing so correctly handles I/O errors
|
||||
that happen after reading some bytes and also both of the
|
||||
allowed EOF behaviors.
|
||||
|
||||
Implementations of Read are discouraged from returning a zero
|
||||
byte count with a nil error, and callers should treat that
|
||||
situation as a no-op.
|
||||
*/
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Read(p []byte) (n int, err error) {
|
||||
return b.ReadAndMaybeAdvance(p, true)
|
||||
}
|
||||
|
||||
// ReadWithoutAdvance(): if you want to Read the data and leave
|
||||
// it in the buffer, so as to peek ahead for example.
|
||||
func (b *AtomicFixedSizeRingBuf) ReadWithoutAdvance(p []byte) (n int, err error) {
|
||||
return b.ReadAndMaybeAdvance(p, false)
|
||||
}
|
||||
|
||||
func (b *AtomicFixedSizeRingBuf) ReadAndMaybeAdvance(p []byte, doAdvance bool) (n int, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
extent := b.Beg + b.readable
|
||||
if extent <= b.N {
|
||||
n += copy(p, b.A[b.Use][b.Beg:extent])
|
||||
} else {
|
||||
n += copy(p, b.A[b.Use][b.Beg:b.N])
|
||||
if n < len(p) {
|
||||
n += copy(p[n:], b.A[b.Use][0:(extent%b.N)])
|
||||
}
|
||||
}
|
||||
if doAdvance {
|
||||
b.unatomic_advance(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
// Write writes len(p) bytes from p to the underlying data stream.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// Write must return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
// Write doesn't modify b.User, so once a []byte is pinned with
|
||||
// a call to Bytes(), it should remain valid even with additional
|
||||
// calls to Write() that come after the Bytes() call.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Write(p []byte) (n int, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// nothing (left) to copy in; notice we shorten our
|
||||
// local copy p (below) as we read from it.
|
||||
return
|
||||
}
|
||||
|
||||
writeCapacity := b.N - b.readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full up already.
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
if len(p) > writeCapacity {
|
||||
err = io.ErrShortWrite
|
||||
// leave err set and
|
||||
// keep going, write what we can.
|
||||
}
|
||||
|
||||
writeStart := (b.Beg + b.readable) % b.N
|
||||
|
||||
upperLim := intMin2(writeStart+writeCapacity, b.N)
|
||||
|
||||
k := copy(b.A[b.Use][writeStart:upperLim], p)
|
||||
|
||||
n += k
|
||||
b.readable += k
|
||||
p = p[k:]
|
||||
|
||||
// we can fill from b.A[b.Use][0:something] from
|
||||
// p's remainder, so loop
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo and ReadFrom avoid intermediate allocation and copies.
|
||||
|
||||
// WriteTo avoids intermediate allocation and copies.
|
||||
// WriteTo writes data to w until there's no more data to write
|
||||
// or when an error occurs. The return value n is the number of
|
||||
// bytes written. Any error encountered during the write is also returned.
|
||||
func (b *AtomicFixedSizeRingBuf) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
if b.readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
extent := b.Beg + b.readable
|
||||
firstWriteLen := intMin2(extent, b.N) - b.Beg
|
||||
secondWriteLen := b.readable - firstWriteLen
|
||||
if firstWriteLen > 0 {
|
||||
m, e := w.Write(b.A[b.Use][b.Beg:(b.Beg + firstWriteLen)])
|
||||
n += int64(m)
|
||||
b.unatomic_advance(m)
|
||||
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
// all bytes should have been written, by definition of
|
||||
// Write method in io.Writer
|
||||
if m != firstWriteLen {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if secondWriteLen > 0 {
|
||||
m, e := w.Write(b.A[b.Use][0:secondWriteLen])
|
||||
n += int64(m)
|
||||
b.unatomic_advance(m)
|
||||
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
// all bytes should have been written, by definition of
|
||||
// Write method in io.Writer
|
||||
if m != secondWriteLen {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadFrom avoids intermediate allocation and copies.
|
||||
// ReadFrom() reads data from r until EOF or error. The return value n
|
||||
// is the number of bytes read. Any error except io.EOF encountered
|
||||
// during the read is also returned.
|
||||
func (b *AtomicFixedSizeRingBuf) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
for {
|
||||
writeCapacity := b.N - b.readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full
|
||||
return n, nil
|
||||
}
|
||||
writeStart := (b.Beg + b.readable) % b.N
|
||||
upperLim := intMin2(writeStart+writeCapacity, b.N)
|
||||
|
||||
m, e := r.Read(b.A[b.Use][writeStart:upperLim])
|
||||
n += int64(m)
|
||||
b.readable += m
|
||||
if e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset quickly forgets any data stored in the ring buffer. The
|
||||
// data is still there, but the ring buffer will ignore it and
|
||||
// overwrite those buffers as new data comes in.
|
||||
func (b *AtomicFixedSizeRingBuf) Reset() {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
b.Beg = 0
|
||||
b.readable = 0
|
||||
b.Use = 0
|
||||
}
|
||||
|
||||
// Advance(): non-standard, but better than Next(),
|
||||
// because we don't have to unwrap our buffer and pay the cpu time
|
||||
// for the copy that unwrapping may need.
|
||||
// Useful in conjuction/after ReadWithoutAdvance() above.
|
||||
func (b *AtomicFixedSizeRingBuf) Advance(n int) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
b.unatomic_advance(n)
|
||||
}
|
||||
|
||||
// unatomic_advance(): private implementation of Advance() without
|
||||
// the locks. See Advance() above for description.
|
||||
// Necessary so that other methods that already hold
|
||||
// locks can advance, and there are no recursive mutexes
|
||||
// in Go.
|
||||
func (b *AtomicFixedSizeRingBuf) unatomic_advance(n int) {
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
if n > b.readable {
|
||||
n = b.readable
|
||||
}
|
||||
b.readable -= n
|
||||
b.Beg = (b.Beg + n) % b.N
|
||||
}
|
||||
|
||||
// Adopt(): non-standard.
|
||||
//
|
||||
// For efficiency's sake, (possibly) take ownership of
|
||||
// already allocated slice offered in me.
|
||||
//
|
||||
// If me is large we will adopt it, and we will potentially then
|
||||
// write to the me buffer.
|
||||
// If we already have a bigger buffer, copy me into the existing
|
||||
// buffer instead.
|
||||
//
|
||||
// Side-effect: may change b.Use, among other internal state changes.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Adopt(me []byte) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
n := len(me)
|
||||
if n > b.N {
|
||||
b.A[0] = me
|
||||
b.A[1] = make([]byte, n, n)
|
||||
b.N = n
|
||||
b.Use = 0
|
||||
b.Beg = 0
|
||||
b.readable = n
|
||||
} else {
|
||||
// we already have a larger buffer, reuse it.
|
||||
copy(b.A[0], me)
|
||||
b.Use = 0
|
||||
b.Beg = 0
|
||||
b.readable = n
|
||||
}
|
||||
}
|
||||
|
||||
// keep the atomic_rbuf.go standalone and usable without
|
||||
// the rbuf.go file, by simply duplicating intMin from rbuf.go
|
||||
//
|
||||
func intMin2(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue