parent
b13f462c3c
commit
1b40e01933
@ -0,0 +1,24 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*~
|
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014, 2015 Jason E. Aten, Ph.D.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -0,0 +1,34 @@
|
||||
rbuf: a circular ring buffer in Golang
|
||||
====
|
||||
|
||||
|
||||
type FixedSizeRingBuf struct:
|
||||
|
||||
* is a fixed-size circular ring buffer. Yes, just what is says.
|
||||
This structure is only for bytes, as it was written to
|
||||
optimize I/O, but could be easily adapted to any other type.
|
||||
|
||||
* We keep a pair of ping/pong buffers so that we can linearize
|
||||
the circular buffer into a contiguous slice if need be.
|
||||
|
||||
For efficiency, a FixedSizeRingBuf may be vastly preferred to
|
||||
a bytes.Buffer. The ReadWithoutAdvance(), Advance(), and Adopt()
|
||||
methods are all non-standard methods written for speed.
|
||||
|
||||
For an I/O heavy application, I have replaced bytes.Buffer with
|
||||
FixedSizeRingBuf and seen memory consumption go from 8GB to 25MB.
|
||||
Yes, that is a 300x reduction in memory footprint. Everything ran
|
||||
faster too.
|
||||
|
||||
Note that Bytes(), while inescapable at times, is expensive: avoid
|
||||
it if possible. If all you need is len(Bytes()), then it is better
|
||||
to use the FixedSizeRingBuf.Readable member directly.
|
||||
Bytes() is expensive because it may copy the back and then
|
||||
the front of a wrapped buffer A[Use] into A[1-Use] in order to
|
||||
get a contiguous, unwrapped, slice. If possible use ContigLen()
|
||||
first to get the size that can be read without copying, Read() that
|
||||
amount, and then Read() a second time -- to avoid the copy.
|
||||
|
||||
copyright (c) 2014, Jason E. Aten
|
||||
|
||||
license: MIT
|
@ -0,0 +1,481 @@
|
||||
package rbuf
|
||||
|
||||
// AtomicFixedSizeRingBuf: Synchronized version of FixedSizeRingBuf,
|
||||
// safe for concurrent access.
|
||||
//
|
||||
// copyright (c) 2014, Jason E. Aten
|
||||
// license: MIT
|
||||
//
|
||||
// Some text from the Golang standard library doc is adapted and
|
||||
// reproduced in fragments below to document the expected behaviors
|
||||
// of the interface functions Read()/Write()/ReadFrom()/WriteTo() that
|
||||
// are implemented here. Those descriptions (see
|
||||
// http://golang.org/pkg/io/#Reader for example) are
|
||||
// copyright 2010 The Go Authors.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// AtomicFixedSizeRingBuf: see FixedSizeRingBuf for the full
|
||||
// details; this is the same, just safe for current access
|
||||
// (and thus paying the price of synchronization on each call
|
||||
// as well.)
|
||||
//
|
||||
type AtomicFixedSizeRingBuf struct {
|
||||
A [2][]byte // a pair of ping/pong buffers. Only one is active.
|
||||
Use int // which A buffer is in active use, 0 or 1
|
||||
N int // MaxViewInBytes, the size of A[0] and A[1] in bytes.
|
||||
Beg int // start of data in A[Use]
|
||||
readable int // number of bytes available to read in A[Use]
|
||||
tex sync.Mutex
|
||||
}
|
||||
|
||||
// Readable() returns the number of bytes available for reading.
|
||||
func (b *AtomicFixedSizeRingBuf) Readable() int {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
return b.readable
|
||||
}
|
||||
|
||||
// get the length of the largest read that we can provide to a contiguous slice
|
||||
// without an extra linearizing copy of all bytes internally.
|
||||
func (b *AtomicFixedSizeRingBuf) ContigLen() int {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
extent := b.Beg + b.readable
|
||||
firstContigLen := intMin2(extent, b.N) - b.Beg
|
||||
return firstContigLen
|
||||
}
|
||||
|
||||
// constructor. NewAtomicFixedSizeRingBuf will allocate internally
|
||||
// two buffers of size maxViewInBytes.
|
||||
func NewAtomicFixedSizeRingBuf(maxViewInBytes int) *AtomicFixedSizeRingBuf {
|
||||
n := maxViewInBytes
|
||||
r := &AtomicFixedSizeRingBuf{
|
||||
Use: 0, // 0 or 1, whichever is actually in use at the moment.
|
||||
// If we are asked for Bytes() and we wrap, linearize into the other.
|
||||
|
||||
N: n,
|
||||
Beg: 0,
|
||||
readable: 0,
|
||||
}
|
||||
r.A[0] = make([]byte, n, n)
|
||||
r.A[1] = make([]byte, n, n)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Bytes() returns a slice of the contents of the unread portion of the buffer.
|
||||
//
|
||||
// To avoid copying, see the companion BytesTwo() call.
|
||||
//
|
||||
// Unlike the standard library Bytes() method (on bytes.Buffer for example),
|
||||
// the result of the AtomicFixedSizeRingBuf::Bytes(true) is a completely new
|
||||
// returned slice, so modifying that slice will have no impact on the contents
|
||||
// of the internal ring.
|
||||
//
|
||||
// Bytes(false) acts like the standard library bytes.Buffer::Bytes() call,
|
||||
// in that it returns a slice which is backed by the buffer itself (so
|
||||
// no copy is involved).
|
||||
//
|
||||
// The largest slice Bytes ever returns is bounded above by the maxViewInBytes
|
||||
// value used when calling NewAtomicFixedSizeRingBuf().
|
||||
//
|
||||
// Possible side-effect: may modify b.Use, the buffer in use.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Bytes(makeCopy bool) []byte {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
extent := b.Beg + b.readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other
|
||||
return b.A[b.Use][b.Beg:(b.Beg + b.readable)]
|
||||
}
|
||||
|
||||
// wrap into the other buffer
|
||||
src := b.Use
|
||||
dest := 1 - b.Use
|
||||
|
||||
n := copy(b.A[dest], b.A[src][b.Beg:])
|
||||
n += copy(b.A[dest][n:], b.A[src][0:(extent%b.N)])
|
||||
|
||||
b.Use = dest
|
||||
b.Beg = 0
|
||||
|
||||
if makeCopy {
|
||||
ret := make([]byte, n)
|
||||
copy(ret, b.A[b.Use][:n])
|
||||
return ret
|
||||
}
|
||||
return b.A[b.Use][:n]
|
||||
}
|
||||
|
||||
// TwoBuffers: the return value of BytesTwo(). TwoBuffers
|
||||
// holds two slices to the contents of the readable
|
||||
// area of the internal buffer. The slices contents are logically
|
||||
// ordered First then Second, but the Second will actually
|
||||
// be physically before the First. Either or both of
|
||||
// First and Second may be empty slices.
|
||||
type TwoBuffers struct {
|
||||
First []byte // the first part of the contents
|
||||
Second []byte // the second part of the contents
|
||||
}
|
||||
|
||||
// BytesTwo returns all readable bytes, but in two separate slices,
|
||||
// to avoid copying. The two slices are from the same buffer, but
|
||||
// are not contiguous. Either or both may be empty slices.
|
||||
func (b *AtomicFixedSizeRingBuf) BytesTwo() TwoBuffers {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
return b.unatomic_BytesTwo()
|
||||
}
|
||||
|
||||
func (b *AtomicFixedSizeRingBuf) unatomic_BytesTwo() TwoBuffers {
|
||||
extent := b.Beg + b.readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other.
|
||||
// Let second stay an empty slice.
|
||||
return TwoBuffers{First: b.A[b.Use][b.Beg:(b.Beg + b.readable)], Second: []byte{}}
|
||||
}
|
||||
|
||||
return TwoBuffers{First: b.A[b.Use][b.Beg:(b.Beg + b.readable)], Second: b.A[b.Use][0:(extent % b.N)]}
|
||||
}
|
||||
|
||||
// Purpose of BytesTwo() and AdvanceBytesTwo(): avoid extra copying of data.
|
||||
//
|
||||
// AdvanceBytesTwo() takes a TwoBuffers as input, this must have been
|
||||
// from a previous call to BytesTwo(); no intervening calls to Bytes()
|
||||
// or Adopt() are allowed (or any other future routine or client data
|
||||
// access that changes the internal data location or contents) can have
|
||||
// been made.
|
||||
//
|
||||
// After sanity checks, AdvanceBytesTwo() advances the internal buffer, effectively
|
||||
// calling Advance( len(tb.First) + len(tb.Second)).
|
||||
//
|
||||
// If intervening-calls that changed the buffers (other than appending
|
||||
// data to the buffer) are detected, we will panic as a safety/sanity/
|
||||
// aid-to-debugging measure.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) AdvanceBytesTwo(tb TwoBuffers) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
tblen := len(tb.First) + len(tb.Second)
|
||||
|
||||
if tblen == 0 {
|
||||
return // nothing to do
|
||||
}
|
||||
|
||||
// sanity check: insure we have re-located in the meantime
|
||||
if tblen > b.readable {
|
||||
panic(fmt.Sprintf("tblen was %d, and this was greater than b.readerable = %d. Usage error detected and data loss may have occurred (available data appears to have shrunken out from under us!).", tblen, b.readable))
|
||||
}
|
||||
|
||||
tbnow := b.unatomic_BytesTwo()
|
||||
|
||||
if len(tb.First) > 0 {
|
||||
if tb.First[0] != tbnow.First[0] {
|
||||
panic(fmt.Sprintf("slice contents of First have changed out from under us!: '%s' vs '%s'", string(tb.First), string(tbnow.First)))
|
||||
}
|
||||
}
|
||||
if len(tb.Second) > 0 {
|
||||
if len(tb.First) > len(tbnow.First) {
|
||||
panic(fmt.Sprintf("slice contents of Second have changed out from under us! tbnow.First length(%d) is less than tb.First(%d.", len(tbnow.First), len(tb.First)))
|
||||
}
|
||||
if len(tbnow.Second) == 0 {
|
||||
panic(fmt.Sprintf("slice contents of Second have changed out from under us! tbnow.Second is empty, but tb.Second was not"))
|
||||
}
|
||||
if tb.Second[0] != tbnow.Second[0] {
|
||||
panic(fmt.Sprintf("slice contents of Second have changed out from under us!: '%s' vs '%s'", string(tb.Second), string(tbnow.Second)))
|
||||
}
|
||||
}
|
||||
|
||||
b.unatomic_advance(tblen)
|
||||
}
|
||||
|
||||
// Read():
|
||||
//
|
||||
// From bytes.Buffer.Read(): Read reads the next len(p) bytes
|
||||
// from the buffer or until the buffer is drained. The return
|
||||
// value n is the number of bytes read. If the buffer has no data
|
||||
// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil.
|
||||
//
|
||||
// from the description of the Reader interface,
|
||||
// http://golang.org/pkg/io/#Reader
|
||||
//
|
||||
/*
|
||||
Reader is the interface that wraps the basic Read method.
|
||||
|
||||
Read reads up to len(p) bytes into p. It returns the number
|
||||
of bytes read (0 <= n <= len(p)) and any error encountered.
|
||||
Even if Read returns n < len(p), it may use all of p as scratch
|
||||
space during the call. If some data is available but not
|
||||
len(p) bytes, Read conventionally returns what is available
|
||||
instead of waiting for more.
|
||||
|
||||
When Read encounters an error or end-of-file condition after
|
||||
successfully reading n > 0 bytes, it returns the number of bytes
|
||||
read. It may return the (non-nil) error from the same call or
|
||||
return the error (and n == 0) from a subsequent call. An instance
|
||||
of this general case is that a Reader returning a non-zero number
|
||||
of bytes at the end of the input stream may return
|
||||
either err == EOF or err == nil. The next Read should
|
||||
return 0, EOF regardless.
|
||||
|
||||
Callers should always process the n > 0 bytes returned before
|
||||
considering the error err. Doing so correctly handles I/O errors
|
||||
that happen after reading some bytes and also both of the
|
||||
allowed EOF behaviors.
|
||||
|
||||
Implementations of Read are discouraged from returning a zero
|
||||
byte count with a nil error, and callers should treat that
|
||||
situation as a no-op.
|
||||
*/
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Read(p []byte) (n int, err error) {
|
||||
return b.ReadAndMaybeAdvance(p, true)
|
||||
}
|
||||
|
||||
// ReadWithoutAdvance(): if you want to Read the data and leave
|
||||
// it in the buffer, so as to peek ahead for example.
|
||||
func (b *AtomicFixedSizeRingBuf) ReadWithoutAdvance(p []byte) (n int, err error) {
|
||||
return b.ReadAndMaybeAdvance(p, false)
|
||||
}
|
||||
|
||||
func (b *AtomicFixedSizeRingBuf) ReadAndMaybeAdvance(p []byte, doAdvance bool) (n int, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
extent := b.Beg + b.readable
|
||||
if extent <= b.N {
|
||||
n += copy(p, b.A[b.Use][b.Beg:extent])
|
||||
} else {
|
||||
n += copy(p, b.A[b.Use][b.Beg:b.N])
|
||||
if n < len(p) {
|
||||
n += copy(p[n:], b.A[b.Use][0:(extent%b.N)])
|
||||
}
|
||||
}
|
||||
if doAdvance {
|
||||
b.unatomic_advance(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
// Write writes len(p) bytes from p to the underlying data stream.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// Write must return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
// Write doesn't modify b.User, so once a []byte is pinned with
|
||||
// a call to Bytes(), it should remain valid even with additional
|
||||
// calls to Write() that come after the Bytes() call.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Write(p []byte) (n int, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// nothing (left) to copy in; notice we shorten our
|
||||
// local copy p (below) as we read from it.
|
||||
return
|
||||
}
|
||||
|
||||
writeCapacity := b.N - b.readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full up already.
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
if len(p) > writeCapacity {
|
||||
err = io.ErrShortWrite
|
||||
// leave err set and
|
||||
// keep going, write what we can.
|
||||
}
|
||||
|
||||
writeStart := (b.Beg + b.readable) % b.N
|
||||
|
||||
upperLim := intMin2(writeStart+writeCapacity, b.N)
|
||||
|
||||
k := copy(b.A[b.Use][writeStart:upperLim], p)
|
||||
|
||||
n += k
|
||||
b.readable += k
|
||||
p = p[k:]
|
||||
|
||||
// we can fill from b.A[b.Use][0:something] from
|
||||
// p's remainder, so loop
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo and ReadFrom avoid intermediate allocation and copies.
|
||||
|
||||
// WriteTo avoids intermediate allocation and copies.
|
||||
// WriteTo writes data to w until there's no more data to write
|
||||
// or when an error occurs. The return value n is the number of
|
||||
// bytes written. Any error encountered during the write is also returned.
|
||||
func (b *AtomicFixedSizeRingBuf) WriteTo(w io.Writer) (n int64, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
if b.readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
extent := b.Beg + b.readable
|
||||
firstWriteLen := intMin2(extent, b.N) - b.Beg
|
||||
secondWriteLen := b.readable - firstWriteLen
|
||||
if firstWriteLen > 0 {
|
||||
m, e := w.Write(b.A[b.Use][b.Beg:(b.Beg + firstWriteLen)])
|
||||
n += int64(m)
|
||||
b.unatomic_advance(m)
|
||||
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
// all bytes should have been written, by definition of
|
||||
// Write method in io.Writer
|
||||
if m != firstWriteLen {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if secondWriteLen > 0 {
|
||||
m, e := w.Write(b.A[b.Use][0:secondWriteLen])
|
||||
n += int64(m)
|
||||
b.unatomic_advance(m)
|
||||
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
// all bytes should have been written, by definition of
|
||||
// Write method in io.Writer
|
||||
if m != secondWriteLen {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadFrom avoids intermediate allocation and copies.
|
||||
// ReadFrom() reads data from r until EOF or error. The return value n
|
||||
// is the number of bytes read. Any error except io.EOF encountered
|
||||
// during the read is also returned.
|
||||
func (b *AtomicFixedSizeRingBuf) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
for {
|
||||
writeCapacity := b.N - b.readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full
|
||||
return n, nil
|
||||
}
|
||||
writeStart := (b.Beg + b.readable) % b.N
|
||||
upperLim := intMin2(writeStart+writeCapacity, b.N)
|
||||
|
||||
m, e := r.Read(b.A[b.Use][writeStart:upperLim])
|
||||
n += int64(m)
|
||||
b.readable += m
|
||||
if e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset quickly forgets any data stored in the ring buffer. The
|
||||
// data is still there, but the ring buffer will ignore it and
|
||||
// overwrite those buffers as new data comes in.
|
||||
func (b *AtomicFixedSizeRingBuf) Reset() {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
b.Beg = 0
|
||||
b.readable = 0
|
||||
b.Use = 0
|
||||
}
|
||||
|
||||
// Advance(): non-standard, but better than Next(),
|
||||
// because we don't have to unwrap our buffer and pay the cpu time
|
||||
// for the copy that unwrapping may need.
|
||||
// Useful in conjuction/after ReadWithoutAdvance() above.
|
||||
func (b *AtomicFixedSizeRingBuf) Advance(n int) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
b.unatomic_advance(n)
|
||||
}
|
||||
|
||||
// unatomic_advance(): private implementation of Advance() without
|
||||
// the locks. See Advance() above for description.
|
||||
// Necessary so that other methods that already hold
|
||||
// locks can advance, and there are no recursive mutexes
|
||||
// in Go.
|
||||
func (b *AtomicFixedSizeRingBuf) unatomic_advance(n int) {
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
if n > b.readable {
|
||||
n = b.readable
|
||||
}
|
||||
b.readable -= n
|
||||
b.Beg = (b.Beg + n) % b.N
|
||||
}
|
||||
|
||||
// Adopt(): non-standard.
|
||||
//
|
||||
// For efficiency's sake, (possibly) take ownership of
|
||||
// already allocated slice offered in me.
|
||||
//
|
||||
// If me is large we will adopt it, and we will potentially then
|
||||
// write to the me buffer.
|
||||
// If we already have a bigger buffer, copy me into the existing
|
||||
// buffer instead.
|
||||
//
|
||||
// Side-effect: may change b.Use, among other internal state changes.
|
||||
//
|
||||
func (b *AtomicFixedSizeRingBuf) Adopt(me []byte) {
|
||||
b.tex.Lock()
|
||||
defer b.tex.Unlock()
|
||||
|
||||
n := len(me)
|
||||
if n > b.N {
|
||||
b.A[0] = me
|
||||
b.A[1] = make([]byte, n, n)
|
||||
b.N = n
|
||||
b.Use = 0
|
||||
b.Beg = 0
|
||||
b.readable = n
|
||||
} else {
|
||||
// we already have a larger buffer, reuse it.
|
||||
copy(b.A[0], me)
|
||||
b.Use = 0
|
||||
b.Beg = 0
|
||||
b.readable = n
|
||||
}
|
||||
}
|
||||
|
||||
// keep the atomic_rbuf.go standalone and usable without
|
||||
// the rbuf.go file, by simply duplicating intMin from rbuf.go
|
||||
//
|
||||
func intMin2(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
@ -0,0 +1,172 @@
|
||||
package rbuf
|
||||
|
||||
// copyright (c) 2016, Jason E. Aten
|
||||
// license: MIT
|
||||
|
||||
import "io"
|
||||
|
||||
// PointerRingBuf:
|
||||
//
|
||||
// a fixed-size circular ring buffer of interface{}
|
||||
//
|
||||
type PointerRingBuf struct {
|
||||
A []interface{}
|
||||
N int // MaxView, the total size of A, whether or not in use.
|
||||
Beg int // start of in-use data in A
|
||||
Readable int // number of pointers available in A (in use)
|
||||
}
|
||||
|
||||
// constructor. NewPointerRingBuf will allocate internally
|
||||
// a slice of size maxViewInBytes.
|
||||
func NewPointerRingBuf(maxViewInBytes int) *PointerRingBuf {
|
||||
n := maxViewInBytes
|
||||
r := &PointerRingBuf{
|
||||
N: n,
|
||||
Beg: 0,
|
||||
Readable: 0,
|
||||
}
|
||||
r.A = make([]interface{}, n, n)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// TwoContig returns all readable pointers, but in two separate slices,
|
||||
// to avoid copying. The two slices are from the same buffer, but
|
||||
// are not contiguous. Either or both may be empty slices.
|
||||
func (b *PointerRingBuf) TwoContig(makeCopy bool) (first []interface{}, second []interface{}) {
|
||||
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other.
|
||||
// Let second stay an empty slice.
|
||||
return b.A[b.Beg:(b.Beg + b.Readable)], second
|
||||
}
|
||||
|
||||
return b.A[b.Beg:b.N], b.A[0:(extent % b.N)]
|
||||
}
|
||||
|
||||
// ReadPtrs():
|
||||
//
|
||||
// from bytes.Buffer.Read(): Read reads the next len(p) interface{}
|
||||
// pointers from the buffer or until the buffer is drained. The return
|
||||
// value n is the number of bytes read. If the buffer has no data
|
||||
// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil.
|
||||
func (b *PointerRingBuf) ReadPtrs(p []interface{}) (n int, err error) {
|
||||
return b.readAndMaybeAdvance(p, true)
|
||||
}
|
||||
|
||||
// ReadWithoutAdvance(): if you want to Read the data and leave
|
||||
// it in the buffer, so as to peek ahead for example.
|
||||
func (b *PointerRingBuf) ReadWithoutAdvance(p []interface{}) (n int, err error) {
|
||||
return b.readAndMaybeAdvance(p, false)
|
||||
}
|
||||
|
||||
func (b *PointerRingBuf) readAndMaybeAdvance(p []interface{}, doAdvance bool) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.Readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
n += copy(p, b.A[b.Beg:extent])
|
||||
} else {
|
||||
n += copy(p, b.A[b.Beg:b.N])
|
||||
if n < len(p) {
|
||||
n += copy(p[n:], b.A[0:(extent%b.N)])
|
||||
}
|
||||
}
|
||||
if doAdvance {
|
||||
b.Advance(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
// WritePtrs writes len(p) interface{} values from p to
|
||||
// the underlying data stream.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// Write must return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
func (b *PointerRingBuf) WritePtrs(p []interface{}) (n int, err error) {
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// nothing (left) to copy in; notice we shorten our
|
||||
// local copy p (below) as we read from it.
|
||||
return
|
||||
}
|
||||
|
||||
writeCapacity := b.N - b.Readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full up already.
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
if len(p) > writeCapacity {
|
||||
err = io.ErrShortWrite
|
||||
// leave err set and
|
||||
// keep going, write what we can.
|
||||
}
|
||||
|
||||
writeStart := (b.Beg + b.Readable) % b.N
|
||||
|
||||
upperLim := intMin(writeStart+writeCapacity, b.N)
|
||||
|
||||
k := copy(b.A[writeStart:upperLim], p)
|
||||
|
||||
n += k
|
||||
b.Readable += k
|
||||
p = p[k:]
|
||||
|
||||
// we can fill from b.A[0:something] from
|
||||
// p's remainder, so loop
|
||||
}
|
||||
}
|
||||
|
||||
// Reset quickly forgets any data stored in the ring buffer. The
|
||||
// data is still there, but the ring buffer will ignore it and
|
||||
// overwrite those buffers as new data comes in.
|
||||
func (b *PointerRingBuf) Reset() {
|
||||
b.Beg = 0
|
||||
b.Readable = 0
|
||||
}
|
||||
|
||||
// Advance(): non-standard, but better than Next(),
|
||||
// because we don't have to unwrap our buffer and pay the cpu time
|
||||
// for the copy that unwrapping may need.
|
||||
// Useful in conjuction/after ReadWithoutAdvance() above.
|
||||
func (b *PointerRingBuf) Advance(n int) {
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
if n > b.Readable {
|
||||
n = b.Readable
|
||||
}
|
||||
b.Readable -= n
|
||||
b.Beg = (b.Beg + n) % b.N
|
||||
}
|
||||
|
||||
// Adopt(): non-standard.
|
||||
//
|
||||
// For efficiency's sake, (possibly) take ownership of
|
||||
// already allocated slice offered in me.
|
||||
//
|
||||
// If me is large we will adopt it, and we will potentially then
|
||||
// write to the me buffer.
|
||||
// If we already have a bigger buffer, copy me into the existing
|
||||
// buffer instead.
|
||||
func (b *PointerRingBuf) Adopt(me []interface{}) {
|
||||
n := len(me)
|
||||
if n > b.N {
|
||||
b.A = me
|
||||
b.N = n
|
||||
b.Beg = 0
|
||||
b.Readable = n
|
||||
} else {
|
||||
// we already have a larger buffer, reuse it.
|
||||
copy(b.A, me)
|
||||
b.Beg = 0
|
||||
b.Readable = n
|
||||
}
|
||||
}
|
@ -0,0 +1,501 @@
|
||||
package rbuf
|
||||
|
||||
// copyright (c) 2014, Jason E. Aten
|
||||
// license: MIT
|
||||
|
||||
// Some text from the Golang standard library doc is adapted and
|
||||
// reproduced in fragments below to document the expected behaviors
|
||||
// of the interface functions Read()/Write()/ReadFrom()/WriteTo() that
|
||||
// are implemented here. Those descriptions (see
|
||||
// http://golang.org/pkg/io/#Reader for example) are
|
||||
// copyright 2010 The Go Authors.
|
||||
|
||||
import "io"
|
||||
|
||||
// FixedSizeRingBuf:
|
||||
//
|
||||
// a fixed-size circular ring buffer. Yes, just what is says.
|
||||
//
|
||||
// We keep a pair of ping/pong buffers so that we can linearize
|
||||
// the circular buffer into a contiguous slice if need be.
|
||||
//
|
||||
// For efficiency, a FixedSizeRingBuf may be vastly preferred to
|
||||
// a bytes.Buffer. The ReadWithoutAdvance(), Advance(), and Adopt()
|
||||
// methods are all non-standard methods written for speed.
|
||||
//
|
||||
// For an I/O heavy application, I have replaced bytes.Buffer with
|
||||
// FixedSizeRingBuf and seen memory consumption go from 8GB to 25MB.
|
||||
// Yes, that is a 300x reduction in memory footprint. Everything ran
|
||||
// faster too.
|
||||
//
|
||||
// Note that Bytes(), while inescapable at times, is expensive: avoid
|
||||
// it if possible. Instead it is better to use the FixedSizeRingBuf.Readable
|
||||
// member to get the number of bytes available. Bytes() is expensive because
|
||||
// it may copy the back and then the front of a wrapped buffer A[Use]
|
||||
// into A[1-Use] in order to get a contiguous slice. If possible use ContigLen()
|
||||
// first to get the size that can be read without copying, Read() that
|
||||
// amount, and then Read() a second time -- to avoid the copy. See
|
||||
// BytesTwo() for a method that does this for you.
|
||||
//
|
||||
type FixedSizeRingBuf struct {
|
||||
A [2][]byte // a pair of ping/pong buffers. Only one is active.
|
||||
Use int // which A buffer is in active use, 0 or 1
|
||||
N int // MaxViewInBytes, the size of A[0] and A[1] in bytes.
|
||||
Beg int // start of data in A[Use]
|
||||
Readable int // number of bytes available to read in A[Use]
|
||||
}
|
||||
|
||||
// get the length of the largest read that we can provide to a contiguous slice
|
||||
// without an extra linearizing copy of all bytes internally.
|
||||
func (b *FixedSizeRingBuf) ContigLen() int {
|
||||
extent := b.Beg + b.Readable
|
||||
firstContigLen := intMin(extent, b.N) - b.Beg
|
||||
return firstContigLen
|
||||
}
|
||||
|
||||
// constructor. NewFixedSizeRingBuf will allocate internally
|
||||
// two buffers of size maxViewInBytes.
|
||||
func NewFixedSizeRingBuf(maxViewInBytes int) *FixedSizeRingBuf {
|
||||
n := maxViewInBytes
|
||||
r := &FixedSizeRingBuf{
|
||||
Use: 0, // 0 or 1, whichever is actually in use at the moment.
|
||||
// If we are asked for Bytes() and we wrap, linearize into the other.
|
||||
|
||||
N: n,
|
||||
Beg: 0,
|
||||
Readable: 0,
|
||||
}
|
||||
r.A[0] = make([]byte, n, n)
|
||||
r.A[1] = make([]byte, n, n)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// from the standard library description of Bytes():
|
||||
// Bytes() returns a slice of the contents of the unread portion of the buffer.
|
||||
// If the caller changes the contents of the
|
||||
// returned slice, the contents of the buffer will change provided there
|
||||
// are no intervening method calls on the Buffer.
|
||||
//
|
||||
// The largest slice Bytes ever returns is bounded above by the maxViewInBytes
|
||||
// value used when calling NewFixedSizeRingBuf().
|
||||
func (b *FixedSizeRingBuf) Bytes() []byte {
|
||||
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other
|
||||
return b.A[b.Use][b.Beg:(b.Beg + b.Readable)]
|
||||
}
|
||||
|
||||
// wrap into the other buffer
|
||||
src := b.Use
|
||||
dest := 1 - b.Use
|
||||
|
||||
n := copy(b.A[dest], b.A[src][b.Beg:])
|
||||
n += copy(b.A[dest][n:], b.A[src][0:(extent%b.N)])
|
||||
|
||||
b.Use = dest
|
||||
b.Beg = 0
|
||||
|
||||
return b.A[b.Use][:n]
|
||||
}
|
||||
|
||||
// BytesTwo returns all readable bytes, but in two separate slices,
|
||||
// to avoid copying. The two slices are from the same buffer, but
|
||||
// are not contiguous. Either or both may be empty slices.
|
||||
func (b *FixedSizeRingBuf) BytesTwo(makeCopy bool) (first []byte, second []byte) {
|
||||
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
// we fit contiguously in this buffer without wrapping to the other.
|
||||
// Let second stay an empty slice.
|
||||
return b.A[b.Use][b.Beg:(b.Beg + b.Readable)], second
|
||||
}
|
||||
|
||||
return b.A[b.Use][b.Beg:b.N], b.A[b.Use][0:(extent % b.N)]
|
||||
}
|
||||
|
||||
// Read():
|
||||
//
|
||||
// from bytes.Buffer.Read(): Read reads the next len(p) bytes
|
||||
// from the buffer or until the buffer is drained. The return
|
||||
// value n is the number of bytes read. If the buffer has no data
|
||||
// to return, err is io.EOF (unless len(p) is zero); otherwise it is nil.
|
||||
//
|
||||
// from the description of the Reader interface,
|
||||
// http://golang.org/pkg/io/#Reader
|
||||
//
|
||||
/*
|
||||
Reader is the interface that wraps the basic Read method.
|
||||
|
||||
Read reads up to len(p) bytes into p. It returns the number
|
||||
of bytes read (0 <= n <= len(p)) and any error encountered.
|
||||
Even if Read returns n < len(p), it may use all of p as scratch
|
||||
space during the call. If some data is available but not
|
||||
len(p) bytes, Read conventionally returns what is available
|
||||
instead of waiting for more.
|
||||
|
||||
When Read encounters an error or end-of-file condition after
|
||||
successfully reading n > 0 bytes, it returns the number of bytes
|
||||
read. It may return the (non-nil) error from the same call or
|
||||
return the error (and n == 0) from a subsequent call. An instance
|
||||
of this general case is that a Reader returning a non-zero number
|
||||
of bytes at the end of the input stream may return
|
||||
either err == EOF or err == nil. The next Read should
|
||||
return 0, EOF regardless.
|
||||
|
||||
Callers should always process the n > 0 bytes returned before
|
||||
considering the error err. Doing so correctly handles I/O errors
|
||||
that happen after reading some bytes and also both of the
|
||||
allowed EOF behaviors.
|
||||
|
||||
Implementations of Read are discouraged from returning a zero
|
||||
byte count with a nil error, and callers should treat that
|
||||
situation as a no-op.
|
||||
*/
|
||||
//
|
||||
func (b *FixedSizeRingBuf) Read(p []byte) (n int, err error) {
|
||||
return b.ReadAndMaybeAdvance(p, true)
|
||||
}
|
||||
|
||||
// ReadWithoutAdvance(): if you want to Read the data and leave
|
||||
// it in the buffer, so as to peek ahead for example.
|
||||
func (b *FixedSizeRingBuf) ReadWithoutAdvance(p []byte) (n int, err error) {
|
||||
return b.ReadAndMaybeAdvance(p, false)
|
||||
}
|
||||
|
||||
func (b *FixedSizeRingBuf) ReadAndMaybeAdvance(p []byte, doAdvance bool) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if b.Readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
extent := b.Beg + b.Readable
|
||||
if extent <= b.N {
|
||||
n += copy(p, b.A[b.Use][b.Beg:extent])
|
||||
} else {
|
||||
n += copy(p, b.A[b.Use][b.Beg:b.N])
|
||||
if n < len(p) {
|
||||
n += copy(p[n:], b.A[b.Use][0:(extent%b.N)])
|
||||
}
|
||||
}
|
||||
if doAdvance {
|
||||
b.Advance(n)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
// Write writes len(p) bytes from p to the underlying data stream.
|
||||
// It returns the number of bytes written from p (0 <= n <= len(p))
|
||||
// and any error encountered that caused the write to stop early.
|
||||
// Write must return a non-nil error if it returns n < len(p).
|
||||
//
|
||||
func (b *FixedSizeRingBuf) Write(p []byte) (n int, err error) {
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// nothing (left) to copy in; notice we shorten our
|
||||
// local copy p (below) as we read from it.
|
||||
return
|
||||
}
|
||||
|
||||
writeCapacity := b.N - b.Readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full up already.
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
if len(p) > writeCapacity {
|
||||
err = io.ErrShortWrite
|
||||
// leave err set and
|
||||
// keep going, write what we can.
|
||||
}
|
||||
|
||||
writeStart := (b.Beg + b.Readable) % b.N
|
||||
|
||||
upperLim := intMin(writeStart+writeCapacity, b.N)
|
||||
|
||||
k := copy(b.A[b.Use][writeStart:upperLim], p)
|
||||
|
||||
n += k
|
||||
b.Readable += k
|
||||
p = p[k:]
|
||||
|
||||
// we can fill from b.A[b.Use][0:something] from
|
||||
// p's remainder, so loop
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo and ReadFrom avoid intermediate allocation and copies.
|
||||
|
||||
// WriteTo avoids intermediate allocation and copies.
|
||||
// WriteTo writes data to w until there's no more data to write
|
||||
// or when an error occurs. The return value n is the number of
|
||||
// bytes written. Any error encountered during the write is also returned.
|
||||
func (b *FixedSizeRingBuf) WriteTo(w io.Writer) (n int64, err error) {
|
||||
|
||||
if b.Readable == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
extent := b.Beg + b.Readable
|
||||
firstWriteLen := intMin(extent, b.N) - b.Beg
|
||||
secondWriteLen := b.Readable - firstWriteLen
|
||||
if firstWriteLen > 0 {
|
||||
m, e := w.Write(b.A[b.Use][b.Beg:(b.Beg + firstWriteLen)])
|
||||
n += int64(m)
|
||||
b.Advance(m)
|
||||
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
// all bytes should have been written, by definition of
|
||||
// Write method in io.Writer
|
||||
if m != firstWriteLen {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
if secondWriteLen > 0 {
|
||||
m, e := w.Write(b.A[b.Use][0:secondWriteLen])
|
||||
n += int64(m)
|
||||
b.Advance(m)
|
||||
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
// all bytes should have been written, by definition of
|
||||
// Write method in io.Writer
|
||||
if m != secondWriteLen {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// ReadFrom avoids intermediate allocation and copies.
|
||||
// ReadFrom() reads data from r until EOF or error. The return value n
|
||||
// is the number of bytes read. Any error except io.EOF encountered
|
||||
// during the read is also returned.
|
||||
func (b *FixedSizeRingBuf) ReadFrom(r io.Reader) (n int64, err error) {
|
||||
for {
|
||||
writeCapacity := b.N - b.Readable
|
||||
if writeCapacity <= 0 {
|
||||
// we are all full
|
||||
return n, nil
|
||||
}
|
||||
writeStart := (b.Beg + b.Readable) % b.N
|
||||
upperLim := intMin(writeStart+writeCapacity, b.N)
|
||||
|
||||
m, e := r.Read(b.A[b.Use][writeStart:upperLim])
|
||||
n += int64(m)
|
||||
b.Readable += m
|
||||
if e == io.EOF {
|
||||
return n, nil
|
||||
}
|
||||
if e != nil {
|
||||
return n, e
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset quickly forgets any data stored in the ring buffer. The
|
||||
// data is still there, but the ring buffer will ignore it and
|
||||
// overwrite those buffers as new data comes in.
|
||||
func (b *FixedSizeRingBuf) Reset() {
|
||||
b.Beg = 0
|
||||
b.Readable = 0
|
||||
b.Use = 0
|
||||
}
|
||||
|
||||
// Advance(): non-standard, but better than Next(),
|
||||
// because we don't have to unwrap our buffer and pay the cpu time
|
||||
// for the copy that unwrapping may need.
|
||||
// Useful in conjuction/after ReadWithoutAdvance() above.
|
||||
func (b *FixedSizeRingBuf) Advance(n int) {
|
||||
if n <= 0 {
|
||||
return
|
||||
}
|
||||
if n > b.Readable {
|
||||
n = b.Readable
|
||||
}
|
||||
b.Readable -= n
|
||||
b.Beg = (b.Beg + n) % b.N
|
||||
}
|
||||
|
||||
// Adopt(): non-standard.
|
||||
//
|
||||
// For efficiency's sake, (possibly) take ownership of
|
||||
// already allocated slice offered in me.
|
||||
//
|
||||
// If me is large we will adopt it, and we will potentially then
|
||||
// write to the me buffer.
|
||||
// If we already have a bigger buffer, copy me into the existing
|
||||
// buffer instead.
|
||||
func (b *FixedSizeRingBuf) Adopt(me []byte) {
|
||||
n := len(me)
|
||||
if n > b.N {
|
||||
b.A[0] = me
|
||||
b.A[1] = make([]byte, n, n)
|
||||
b.N = n
|
||||
b.Use = 0
|
||||
b.Beg = 0
|
||||
b.Readable = n
|
||||
} else {
|
||||
// we already have a larger buffer, reuse it.
|
||||
copy(b.A[0], me)
|
||||
b.Use = 0
|
||||
b.Beg = 0
|
||||
b.Readable = n
|
||||
}
|
||||
}
|
||||
|
||||
func intMax(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func intMin(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
} else {
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FixedSizeRingBuf) Avail() int {
|
||||
return f.Readable
|
||||
}
|
||||
|
||||
// returns the earliest index, or -1 if
|
||||
// the ring is empty
|
||||
func (f *FixedSizeRingBuf) First() int {
|
||||
if f.Readable == 0 {
|
||||
return -1
|
||||
}
|
||||
return f.Beg
|
||||
}
|
||||
|
||||
// Next returns the index of the element after
|
||||
// from, or -1 if no more. returns -2 if erroneous
|
||||
// input (bad from).
|
||||
func (f *FixedSizeRingBuf) Nextpos(from int) int {
|
||||
if from >= f.N || from < 0 {
|
||||
return -2
|
||||
}
|
||||
if f.Readable == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
last := f.Last()
|
||||
if from == last {
|
||||
return -1
|
||||
}
|
||||
a0, a1, b0, b1 := f.LegalPos()
|
||||
switch {
|
||||
case from >= a0 && from < a1:
|
||||
return from + 1
|
||||
case from == a1:
|
||||
return b0 // can be -1
|
||||
case from >= b0 && from < b1:
|
||||
return from + 1
|
||||
case from == b1:
|
||||
return -1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// LegalPos returns the legal index positions,
|
||||
// [a0,aLast] and [b0,bLast] inclusive, where the
|
||||
// [a0,aLast] holds the first FIFO ordered segment,
|
||||
// and the [b0,bLast] holds the second ordered segment,
|
||||
// if any.
|
||||
// A position of -1 means the segment is not used,
|
||||
// perhaps because b.Readable is zero, or because
|
||||
// the second segment [b0,bLast] is not in use (when
|
||||
// everything fits in the first [a0,aLast] segment).
|
||||
//
|
||||
func (b *FixedSizeRingBuf) LegalPos() (a0, aLast, b0, bLast int) {
|
||||
a0 = -1
|
||||
aLast = -1
|
||||
b0 = -1
|
||||
bLast = -1
|
||||
if b.Readable == 0 {
|
||||
return
|
||||
}
|
||||
a0 = b.Beg
|
||||
last := b.Beg + b.Readable - 1
|
||||
if last < b.N {
|
||||
aLast = last
|
||||
return
|
||||
}
|
||||
aLast = b.N - 1
|
||||
b0 = 0
|
||||
bLast = last % b.N
|
||||
return
|
||||
}
|
||||
|
||||
// Prevpos returns the index of the element before
|
||||
// from, or -1 if no more and from is the
|
||||
// first in the ring. Returns -2 on bad
|
||||
// from position.
|
||||
func (f *FixedSizeRingBuf) Prevpos(from int) int {
|
||||
if from >= f.N || from < 0 {
|
||||
return -2
|
||||
}
|
||||
if f.Readable == 0 {
|
||||
return -1
|
||||
}
|
||||
if from == f.Beg {
|
||||
return -1
|
||||
}
|
||||
a0, a1, b0, b1 := f.LegalPos()
|
||||
switch {
|
||||
case from == a0:
|
||||
return -1
|
||||
case from > a0 && from <= a1:
|
||||
return from - 1
|
||||
case from == b0:
|
||||
return a1
|
||||
case from > b0 && from <= b1:
|
||||
return from - 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// returns the index of the last element,
|
||||
// or -1 if the ring is empty.
|
||||
func (f *FixedSizeRingBuf) Last() int {
|
||||
if f.Readable == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
last := f.Beg + f.Readable - 1
|
||||
if last < f.N {
|
||||
// we fit without wrapping
|
||||
return last
|
||||
}
|
||||
|
||||
return last % f.N
|
||||
}
|
||||
|
||||
// Kth presents the contents of the
|
||||
// ring as a strictly linear sequence,
|
||||
// so the user doesn't need to think
|
||||
// about modular arithmetic. Here k indexes from
|
||||
// [0, f.Readable-1], assuming f.Avail()
|
||||
// is greater than 0. Kth() returns an
|
||||
// actual index where the logical k-th
|
||||
// element, starting from f.Beg, resides.
|
||||
// f.Beg itself lives at k = 0. If k is
|
||||
// out of bounds, or the ring is empty,
|
||||
// -1 is returned.
|
||||
func (f *FixedSizeRingBuf) Kth(k int) int {
|
||||
if f.Readable == 0 || k < 0 || k >= f.Readable {
|
||||
return -1
|
||||
}
|
||||
return (f.Beg + k) % f.N
|
||||
}
|
Loading…
Reference in new issue