parent
54373302cc
commit
649fd2b50f
@ -1,21 +0,0 @@
|
|||||||
package tianyancha
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"go.dtapp.net/library/utils/gohttp"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type App struct{}
|
|
||||||
|
|
||||||
func (app *App) request(url string, params map[string]interface{}, method string) (resp []byte, err error) {
|
|
||||||
// 请求
|
|
||||||
if method == http.MethodGet {
|
|
||||||
get, err := gohttp.Get(url, params)
|
|
||||||
return get.Body, err
|
|
||||||
} else {
|
|
||||||
paramsStr, err := json.Marshal(params)
|
|
||||||
postJson, err := gohttp.PostJson(url, paramsStr)
|
|
||||||
return postJson.Body, err
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
package tianyancha
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type EquityHumanIndexNodeResponse struct {
|
|
||||||
IsLogin int `json:"isLogin"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
Special string `json:"special"`
|
|
||||||
State string `json:"state"`
|
|
||||||
VipMessage string `json:"vipMessage"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type EquityHumanIndexNodeResult struct {
|
|
||||||
Result EquityHumanIndexNodeResponse // 结果
|
|
||||||
Body []byte // 内容
|
|
||||||
Err error // 错误
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEquityHumanIndexNodeResult(result EquityHumanIndexNodeResponse, body []byte, err error) *EquityHumanIndexNodeResult {
|
|
||||||
return &EquityHumanIndexNodeResult{Result: result, Body: body, Err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *App) EquityHumanIndexNode(notMustParams ...Params) *EquityHumanIndexNodeResult {
|
|
||||||
// 参数
|
|
||||||
params := app.NewParamsWith(notMustParams...)
|
|
||||||
// 请求
|
|
||||||
body, err := app.request("https://capi.tianyancha.com/cloud-equity-provider/v4/equity/humanIndexnode.json", params, http.MethodGet)
|
|
||||||
// 定义
|
|
||||||
var response EquityHumanIndexNodeResponse
|
|
||||||
err = json.Unmarshal(body, &response)
|
|
||||||
return NewEquityHumanIndexNodeResult(response, body, err)
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
package tianyancha
|
|
||||||
|
|
||||||
// Params 请求参数
|
|
||||||
type Params map[string]interface{}
|
|
||||||
|
|
||||||
func NewParams() Params {
|
|
||||||
p := make(Params)
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *App) NewParamsWith(params ...Params) Params {
|
|
||||||
p := make(Params)
|
|
||||||
for _, v := range params {
|
|
||||||
p.SetParams(v)
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Params) Set(key string, value interface{}) {
|
|
||||||
p[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Params) SetParams(params Params) {
|
|
||||||
for key, value := range params {
|
|
||||||
p[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
package tianyancha
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SearchHumanSuggestResponse struct {
|
|
||||||
Data struct {
|
|
||||||
Id int `json:"id"`
|
|
||||||
HumanName interface{} `json:"humanName"`
|
|
||||||
DistinctNum int `json:"distinctNum"`
|
|
||||||
ViewNum int `json:"viewNum"`
|
|
||||||
ResultCount int `json:"resultCount"`
|
|
||||||
ResultList []struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Hid int64 `json:"hid"`
|
|
||||||
HeadUrl interface{} `json:"headUrl"`
|
|
||||||
Introduction interface{} `json:"introduction"`
|
|
||||||
Event interface{} `json:"event"`
|
|
||||||
BossCertificate int `json:"bossCertificate"`
|
|
||||||
CompanyNum int `json:"companyNum"`
|
|
||||||
Office []interface{} `json:"office"`
|
|
||||||
Companys interface{} `json:"companys"`
|
|
||||||
PartnerNum int `json:"partnerNum"`
|
|
||||||
CoopCount int `json:"coopCount"`
|
|
||||||
Partners interface{} `json:"partners"`
|
|
||||||
Cid int64 `json:"cid"`
|
|
||||||
TypeJoin interface{} `json:"typeJoin"`
|
|
||||||
Alias interface{} `json:"alias"`
|
|
||||||
ServiceType int `json:"serviceType"`
|
|
||||||
ServiceCount int `json:"serviceCount"`
|
|
||||||
OfficeV1 []interface{} `json:"officeV1"`
|
|
||||||
Pid interface{} `json:"pid"`
|
|
||||||
Role interface{} `json:"role"`
|
|
||||||
} `json:"resultList"`
|
|
||||||
TotalPage int `json:"totalPage"`
|
|
||||||
CurrentPage int `json:"currentPage"`
|
|
||||||
RealName interface{} `json:"realName"`
|
|
||||||
AdviceQuery interface{} `json:"adviceQuery"`
|
|
||||||
} `json:"data"`
|
|
||||||
VipMessage string `json:"vipMessage"`
|
|
||||||
Special string `json:"special"`
|
|
||||||
State string `json:"state"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SearchHumanSuggestResult struct {
|
|
||||||
Result SearchHumanSuggestResponse // 结果
|
|
||||||
Body []byte // 内容
|
|
||||||
Err error // 错误
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSearchHumanSuggestResult(result SearchHumanSuggestResponse, body []byte, err error) *SearchHumanSuggestResult {
|
|
||||||
return &SearchHumanSuggestResult{Result: result, Body: body, Err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (app *App) SearchHumanSuggest(key string) *SearchHumanSuggestResult {
|
|
||||||
body, err := app.request(fmt.Sprintf("https://www.tianyancha.com/search/humanSuggest.json?key=%s", key), map[string]interface{}{}, http.MethodGet)
|
|
||||||
// 定义
|
|
||||||
var response SearchHumanSuggestResponse
|
|
||||||
err = json.Unmarshal(body, &response)
|
|
||||||
return NewSearchHumanSuggestResult(response, body, err)
|
|
||||||
}
|
|
@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
codecov:
|
||||||
|
require_ci_to_pass: true
|
||||||
|
comment:
|
||||||
|
behavior: default
|
||||||
|
layout: reach, diff, flags, files, footer
|
||||||
|
require_base: false
|
||||||
|
require_changes: false
|
||||||
|
require_head: true
|
||||||
|
coverage:
|
||||||
|
precision: 2
|
||||||
|
range:
|
||||||
|
- 70
|
||||||
|
- 100
|
||||||
|
round: down
|
||||||
|
status:
|
||||||
|
changes: false
|
||||||
|
patch: true
|
||||||
|
project: true
|
||||||
|
parsers:
|
||||||
|
gcov:
|
||||||
|
branch_detection:
|
||||||
|
conditional: true
|
||||||
|
loop: true
|
||||||
|
macro: false
|
||||||
|
method: false
|
||||||
|
javascript:
|
||||||
|
enable_partials: false
|
@ -0,0 +1,11 @@
|
|||||||
|
.idea
|
||||||
|
.DS_Store
|
||||||
|
/server/server.exe
|
||||||
|
/server/server
|
||||||
|
/server/server_dar*
|
||||||
|
/server/server_fre*
|
||||||
|
/server/server_win*
|
||||||
|
/server/server_net*
|
||||||
|
/server/server_ope*
|
||||||
|
/server/server_lin*
|
||||||
|
CHANGELOG.md
|
@ -0,0 +1,201 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
@ -0,0 +1,246 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
minimumEntriesInShard = 10 // Minimum number of entries in single shard
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance.
|
||||||
|
// It keeps entries on heap but omits GC for them. To achieve that, operations take place on byte arrays,
|
||||||
|
// therefore entries (de)serialization in front of the cache will be needed in most use cases.
|
||||||
|
type BigCache struct {
|
||||||
|
shards []*cacheShard
|
||||||
|
lifeWindow uint64
|
||||||
|
clock clock
|
||||||
|
hash Hasher
|
||||||
|
config Config
|
||||||
|
shardMask uint64
|
||||||
|
close chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Response will contain metadata about the entry for which GetWithInfo(key) was called
|
||||||
|
type Response struct {
|
||||||
|
EntryStatus RemoveReason
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveReason is a value used to signal to the user why a particular key was removed in the OnRemove callback.
|
||||||
|
type RemoveReason uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Expired means the key is past its LifeWindow.
|
||||||
|
Expired = RemoveReason(1)
|
||||||
|
// NoSpace means the key is the oldest and the cache size was at its maximum when Set was called, or the
|
||||||
|
// entry exceeded the maximum shard size.
|
||||||
|
NoSpace = RemoveReason(2)
|
||||||
|
// Deleted means Delete was called and this key was removed as a result.
|
||||||
|
Deleted = RemoveReason(3)
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewBigCache initialize new instance of BigCache
|
||||||
|
func NewBigCache(config Config) (*BigCache, error) {
|
||||||
|
return newBigCache(config, &systemClock{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBigCache(config Config, clock clock) (*BigCache, error) {
|
||||||
|
if !isPowerOfTwo(config.Shards) {
|
||||||
|
return nil, fmt.Errorf("Shards number must be power of two")
|
||||||
|
}
|
||||||
|
if config.MaxEntrySize < 0 {
|
||||||
|
return nil, fmt.Errorf("MaxEntrySize must be >= 0")
|
||||||
|
}
|
||||||
|
if config.MaxEntriesInWindow < 0 {
|
||||||
|
return nil, fmt.Errorf("MaxEntriesInWindow must be >= 0")
|
||||||
|
}
|
||||||
|
if config.HardMaxCacheSize < 0 {
|
||||||
|
return nil, fmt.Errorf("HardMaxCacheSize must be >= 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.Hasher == nil {
|
||||||
|
config.Hasher = newDefaultHasher()
|
||||||
|
}
|
||||||
|
|
||||||
|
cache := &BigCache{
|
||||||
|
shards: make([]*cacheShard, config.Shards),
|
||||||
|
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||||
|
clock: clock,
|
||||||
|
hash: config.Hasher,
|
||||||
|
config: config,
|
||||||
|
shardMask: uint64(config.Shards - 1),
|
||||||
|
close: make(chan struct{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
var onRemove func(wrappedEntry []byte, reason RemoveReason)
|
||||||
|
if config.OnRemoveWithMetadata != nil {
|
||||||
|
onRemove = cache.providedOnRemoveWithMetadata
|
||||||
|
} else if config.OnRemove != nil {
|
||||||
|
onRemove = cache.providedOnRemove
|
||||||
|
} else if config.OnRemoveWithReason != nil {
|
||||||
|
onRemove = cache.providedOnRemoveWithReason
|
||||||
|
} else {
|
||||||
|
onRemove = cache.notProvidedOnRemove
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < config.Shards; i++ {
|
||||||
|
cache.shards[i] = initNewShard(config, onRemove, clock)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.CleanWindow > 0 {
|
||||||
|
go func() {
|
||||||
|
ticker := time.NewTicker(config.CleanWindow)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case t := <-ticker.C:
|
||||||
|
cache.cleanUp(uint64(t.Unix()))
|
||||||
|
case <-cache.close:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
return cache, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close is used to signal a shutdown of the cache when you are done with it.
|
||||||
|
// This allows the cleaning goroutines to exit and ensures references are not
|
||||||
|
// kept to the cache preventing GC of the entire cache.
|
||||||
|
func (c *BigCache) Close() error {
|
||||||
|
close(c.close)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get reads entry for the key.
|
||||||
|
// It returns an ErrEntryNotFound when
|
||||||
|
// no entry exists for the given key.
|
||||||
|
func (c *BigCache) Get(key string) ([]byte, error) {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.get(key, hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetWithInfo reads entry for the key with Response info.
|
||||||
|
// It returns an ErrEntryNotFound when
|
||||||
|
// no entry exists for the given key.
|
||||||
|
func (c *BigCache) GetWithInfo(key string) ([]byte, Response, error) {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.getWithInfo(key, hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set saves entry under the key
|
||||||
|
func (c *BigCache) Set(key string, entry []byte) error {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.set(key, hashedKey, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append appends entry under the key if key exists, otherwise
|
||||||
|
// it will set the key (same behaviour as Set()). With Append() you can
|
||||||
|
// concatenate multiple entries under the same key in an lock optimized way.
|
||||||
|
func (c *BigCache) Append(key string, entry []byte) error {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.append(key, hashedKey, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the key
|
||||||
|
func (c *BigCache) Delete(key string) error {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.del(hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset empties all cache shards
|
||||||
|
func (c *BigCache) Reset() error {
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
shard.reset(c.config)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len computes number of entries in cache
|
||||||
|
func (c *BigCache) Len() int {
|
||||||
|
var len int
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
len += shard.len()
|
||||||
|
}
|
||||||
|
return len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capacity returns amount of bytes store in the cache.
|
||||||
|
func (c *BigCache) Capacity() int {
|
||||||
|
var len int
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
len += shard.capacity()
|
||||||
|
}
|
||||||
|
return len
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns cache's statistics
|
||||||
|
func (c *BigCache) Stats() Stats {
|
||||||
|
var s Stats
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
tmp := shard.getStats()
|
||||||
|
s.Hits += tmp.Hits
|
||||||
|
s.Misses += tmp.Misses
|
||||||
|
s.DelHits += tmp.DelHits
|
||||||
|
s.DelMisses += tmp.DelMisses
|
||||||
|
s.Collisions += tmp.Collisions
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyMetadata returns number of times a cached resource was requested.
|
||||||
|
func (c *BigCache) KeyMetadata(key string) Metadata {
|
||||||
|
hashedKey := c.hash.Sum64(key)
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
return shard.getKeyMetadataWithLock(hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterator returns iterator function to iterate over EntryInfo's from whole cache.
|
||||||
|
func (c *BigCache) Iterator() *EntryInfoIterator {
|
||||||
|
return newIterator(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||||
|
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||||
|
if currentTimestamp-oldestTimestamp > c.lifeWindow {
|
||||||
|
evict(Expired)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) cleanUp(currentTimestamp uint64) {
|
||||||
|
for _, shard := range c.shards {
|
||||||
|
shard.cleanUp(currentTimestamp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) {
|
||||||
|
return c.shards[hashedKey&c.shardMask]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) providedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) providedOnRemoveWithReason(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
if c.config.onRemoveFilter == 0 || (1<<uint(reason))&c.config.onRemoveFilter > 0 {
|
||||||
|
c.config.OnRemoveWithReason(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), reason)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *BigCache) providedOnRemoveWithMetadata(wrappedEntry []byte, reason RemoveReason) {
|
||||||
|
hashedKey := c.hash.Sum64(readKeyFromEntry(wrappedEntry))
|
||||||
|
shard := c.getShard(hashedKey)
|
||||||
|
c.config.OnRemoveWithMetadata(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry), shard.getKeyMetadata(hashedKey))
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func bytesToString(b []byte) string {
|
||||||
|
return *(*string)(unsafe.Pointer(&b))
|
||||||
|
}
|
@ -0,0 +1,7 @@
|
|||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package bigcache
|
||||||
|
|
||||||
|
func bytesToString(b []byte) string {
|
||||||
|
return string(b)
|
||||||
|
}
|
@ -0,0 +1,14 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
type clock interface {
|
||||||
|
Epoch() int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type systemClock struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c systemClock) Epoch() int64 {
|
||||||
|
return time.Now().Unix()
|
||||||
|
}
|
@ -0,0 +1,83 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
timestampSizeInBytes = 8 // Number of bytes used for timestamp
|
||||||
|
hashSizeInBytes = 8 // Number of bytes used for hash
|
||||||
|
keySizeInBytes = 2 // Number of bytes used for size of entry key
|
||||||
|
headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers
|
||||||
|
)
|
||||||
|
|
||||||
|
func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte {
|
||||||
|
keyLength := len(key)
|
||||||
|
blobLength := len(entry) + headersSizeInBytes + keyLength
|
||||||
|
|
||||||
|
if blobLength > len(*buffer) {
|
||||||
|
*buffer = make([]byte, blobLength)
|
||||||
|
}
|
||||||
|
blob := *buffer
|
||||||
|
|
||||||
|
binary.LittleEndian.PutUint64(blob, timestamp)
|
||||||
|
binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash)
|
||||||
|
binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength))
|
||||||
|
copy(blob[headersSizeInBytes:], key)
|
||||||
|
copy(blob[headersSizeInBytes+keyLength:], entry)
|
||||||
|
|
||||||
|
return blob[:blobLength]
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendToWrappedEntry(timestamp uint64, wrappedEntry []byte, entry []byte, buffer *[]byte) []byte {
|
||||||
|
blobLength := len(wrappedEntry) + len(entry)
|
||||||
|
if blobLength > len(*buffer) {
|
||||||
|
*buffer = make([]byte, blobLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
blob := *buffer
|
||||||
|
|
||||||
|
binary.LittleEndian.PutUint64(blob, timestamp)
|
||||||
|
copy(blob[timestampSizeInBytes:], wrappedEntry[timestampSizeInBytes:])
|
||||||
|
copy(blob[len(wrappedEntry):], entry)
|
||||||
|
|
||||||
|
return blob[:blobLength]
|
||||||
|
}
|
||||||
|
|
||||||
|
func readEntry(data []byte) []byte {
|
||||||
|
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||||
|
|
||||||
|
// copy on read
|
||||||
|
dst := make([]byte, len(data)-int(headersSizeInBytes+length))
|
||||||
|
copy(dst, data[headersSizeInBytes+length:])
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func readTimestampFromEntry(data []byte) uint64 {
|
||||||
|
return binary.LittleEndian.Uint64(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func readKeyFromEntry(data []byte) string {
|
||||||
|
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||||
|
|
||||||
|
// copy on read
|
||||||
|
dst := make([]byte, length)
|
||||||
|
copy(dst, data[headersSizeInBytes:headersSizeInBytes+length])
|
||||||
|
|
||||||
|
return bytesToString(dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareKeyFromEntry(data []byte, key string) bool {
|
||||||
|
length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:])
|
||||||
|
|
||||||
|
return bytesToString(data[headersSizeInBytes:headersSizeInBytes+length]) == key
|
||||||
|
}
|
||||||
|
|
||||||
|
func readHashFromEntry(data []byte) uint64 {
|
||||||
|
return binary.LittleEndian.Uint64(data[timestampSizeInBytes:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func resetKeyFromEntry(data []byte) {
|
||||||
|
binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0)
|
||||||
|
}
|
@ -0,0 +1,8 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrEntryNotFound is an error type struct which is returned when entry was not found for provided key
|
||||||
|
ErrEntryNotFound = errors.New("Entry not found")
|
||||||
|
)
|
@ -0,0 +1,8 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
// Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions
|
||||||
|
// (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e.
|
||||||
|
// you can use FarmHash family).
|
||||||
|
type Hasher interface {
|
||||||
|
Sum64(string) uint64
|
||||||
|
}
|
@ -0,0 +1,146 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type iteratorError string
|
||||||
|
|
||||||
|
func (e iteratorError) Error() string {
|
||||||
|
return string(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrInvalidIteratorState is reported when iterator is in invalid state
|
||||||
|
const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position")
|
||||||
|
|
||||||
|
// ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying
|
||||||
|
const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache")
|
||||||
|
|
||||||
|
var emptyEntryInfo = EntryInfo{}
|
||||||
|
|
||||||
|
// EntryInfo holds informations about entry in the cache
|
||||||
|
type EntryInfo struct {
|
||||||
|
timestamp uint64
|
||||||
|
hash uint64
|
||||||
|
key string
|
||||||
|
value []byte
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns entry's underlying key
|
||||||
|
func (e EntryInfo) Key() string {
|
||||||
|
return e.key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hash returns entry's hash value
|
||||||
|
func (e EntryInfo) Hash() uint64 {
|
||||||
|
return e.hash
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp returns entry's timestamp (time of insertion)
|
||||||
|
func (e EntryInfo) Timestamp() uint64 {
|
||||||
|
return e.timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns entry's underlying value
|
||||||
|
func (e EntryInfo) Value() []byte {
|
||||||
|
return e.value
|
||||||
|
}
|
||||||
|
|
||||||
|
// EntryInfoIterator allows to iterate over entries in the cache
|
||||||
|
type EntryInfoIterator struct {
|
||||||
|
mutex sync.Mutex
|
||||||
|
cache *BigCache
|
||||||
|
currentShard int
|
||||||
|
currentIndex int
|
||||||
|
currentEntryInfo EntryInfo
|
||||||
|
elements []uint64
|
||||||
|
elementsCount int
|
||||||
|
valid bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNext moves to next element and returns true if it exists.
|
||||||
|
func (it *EntryInfoIterator) SetNext() bool {
|
||||||
|
it.mutex.Lock()
|
||||||
|
|
||||||
|
it.valid = false
|
||||||
|
it.currentIndex++
|
||||||
|
|
||||||
|
if it.elementsCount > it.currentIndex {
|
||||||
|
it.valid = true
|
||||||
|
|
||||||
|
empty := it.setCurrentEntry()
|
||||||
|
it.mutex.Unlock()
|
||||||
|
|
||||||
|
if empty {
|
||||||
|
return it.SetNext()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := it.currentShard + 1; i < it.cache.config.Shards; i++ {
|
||||||
|
it.elements, it.elementsCount = it.cache.shards[i].copyHashedKeys()
|
||||||
|
|
||||||
|
// Non empty shard - stick with it
|
||||||
|
if it.elementsCount > 0 {
|
||||||
|
it.currentIndex = 0
|
||||||
|
it.currentShard = i
|
||||||
|
it.valid = true
|
||||||
|
|
||||||
|
empty := it.setCurrentEntry()
|
||||||
|
it.mutex.Unlock()
|
||||||
|
|
||||||
|
if empty {
|
||||||
|
return it.SetNext()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it.mutex.Unlock()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (it *EntryInfoIterator) setCurrentEntry() bool {
|
||||||
|
var entryNotFound = false
|
||||||
|
entry, err := it.cache.shards[it.currentShard].getEntry(it.elements[it.currentIndex])
|
||||||
|
|
||||||
|
if err == ErrEntryNotFound {
|
||||||
|
it.currentEntryInfo = emptyEntryInfo
|
||||||
|
entryNotFound = true
|
||||||
|
} else if err != nil {
|
||||||
|
it.currentEntryInfo = EntryInfo{
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
it.currentEntryInfo = EntryInfo{
|
||||||
|
timestamp: readTimestampFromEntry(entry),
|
||||||
|
hash: readHashFromEntry(entry),
|
||||||
|
key: readKeyFromEntry(entry),
|
||||||
|
value: readEntry(entry),
|
||||||
|
err: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
func newIterator(cache *BigCache) *EntryInfoIterator {
|
||||||
|
elements, count := cache.shards[0].copyHashedKeys()
|
||||||
|
|
||||||
|
return &EntryInfoIterator{
|
||||||
|
cache: cache,
|
||||||
|
currentShard: 0,
|
||||||
|
currentIndex: -1,
|
||||||
|
elements: elements,
|
||||||
|
elementsCount: count,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns current value from the iterator
|
||||||
|
func (it *EntryInfoIterator) Value() (EntryInfo, error) {
|
||||||
|
if !it.valid {
|
||||||
|
return emptyEntryInfo, ErrInvalidIteratorState
|
||||||
|
}
|
||||||
|
|
||||||
|
return it.currentEntryInfo, it.currentEntryInfo.err
|
||||||
|
}
|
@ -0,0 +1,30 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logger is invoked when `Config.Verbose=true`
|
||||||
|
type Logger interface {
|
||||||
|
Printf(format string, v ...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is a safeguard, breaking on compile time in case
|
||||||
|
// `log.Logger` does not adhere to our `Logger` interface.
|
||||||
|
// see https://golang.org/doc/faq#guarantee_satisfies_interface
|
||||||
|
var _ Logger = &log.Logger{}
|
||||||
|
|
||||||
|
// DefaultLogger returns a `Logger` implementation
|
||||||
|
// backed by stdlib's log
|
||||||
|
func DefaultLogger() *log.Logger {
|
||||||
|
return log.New(os.Stdout, "", log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLogger(custom Logger) Logger {
|
||||||
|
if custom != nil {
|
||||||
|
return custom
|
||||||
|
}
|
||||||
|
|
||||||
|
return DefaultLogger()
|
||||||
|
}
|
@ -0,0 +1,269 @@
|
|||||||
|
package queue
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Number of bytes to encode 0 in uvarint format
|
||||||
|
minimumHeaderSize = 17 // 1 byte blobsize + timestampSizeInBytes + hashSizeInBytes
|
||||||
|
// Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index
|
||||||
|
leftMarginIndex = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errEmptyQueue = &queueError{"Empty queue"}
|
||||||
|
errInvalidIndex = &queueError{"Index must be greater than zero. Invalid index."}
|
||||||
|
errIndexOutOfBounds = &queueError{"Index out of range"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesQueue is a non-thread safe queue type of fifo based on bytes array.
|
||||||
|
// For every push operation index of entry is returned. It can be used to read the entry later
|
||||||
|
type BytesQueue struct {
|
||||||
|
full bool
|
||||||
|
array []byte
|
||||||
|
capacity int
|
||||||
|
maxCapacity int
|
||||||
|
head int
|
||||||
|
tail int
|
||||||
|
count int
|
||||||
|
rightMargin int
|
||||||
|
headerBuffer []byte
|
||||||
|
verbose bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type queueError struct {
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNeededSize returns the number of bytes an entry of length need in the queue
|
||||||
|
func getNeededSize(length int) int {
|
||||||
|
var header int
|
||||||
|
switch {
|
||||||
|
case length < 127: // 1<<7-1
|
||||||
|
header = 1
|
||||||
|
case length < 16382: // 1<<14-2
|
||||||
|
header = 2
|
||||||
|
case length < 2097149: // 1<<21 -3
|
||||||
|
header = 3
|
||||||
|
case length < 268435452: // 1<<28 -4
|
||||||
|
header = 4
|
||||||
|
default:
|
||||||
|
header = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
return length + header
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesQueue initialize new bytes queue.
|
||||||
|
// capacity is used in bytes array allocation
|
||||||
|
// When verbose flag is set then information about memory allocation are printed
|
||||||
|
func NewBytesQueue(capacity int, maxCapacity int, verbose bool) *BytesQueue {
|
||||||
|
return &BytesQueue{
|
||||||
|
array: make([]byte, capacity),
|
||||||
|
capacity: capacity,
|
||||||
|
maxCapacity: maxCapacity,
|
||||||
|
headerBuffer: make([]byte, binary.MaxVarintLen32),
|
||||||
|
tail: leftMarginIndex,
|
||||||
|
head: leftMarginIndex,
|
||||||
|
rightMargin: leftMarginIndex,
|
||||||
|
verbose: verbose,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset removes all entries from queue
|
||||||
|
func (q *BytesQueue) Reset() {
|
||||||
|
// Just reset indexes
|
||||||
|
q.tail = leftMarginIndex
|
||||||
|
q.head = leftMarginIndex
|
||||||
|
q.rightMargin = leftMarginIndex
|
||||||
|
q.count = 0
|
||||||
|
q.full = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed.
|
||||||
|
// Returns index for pushed data or error if maximum size queue limit is reached.
|
||||||
|
func (q *BytesQueue) Push(data []byte) (int, error) {
|
||||||
|
neededSize := getNeededSize(len(data))
|
||||||
|
|
||||||
|
if !q.canInsertAfterTail(neededSize) {
|
||||||
|
if q.canInsertBeforeHead(neededSize) {
|
||||||
|
q.tail = leftMarginIndex
|
||||||
|
} else if q.capacity+neededSize >= q.maxCapacity && q.maxCapacity > 0 {
|
||||||
|
return -1, &queueError{"Full queue. Maximum size limit reached."}
|
||||||
|
} else {
|
||||||
|
q.allocateAdditionalMemory(neededSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
index := q.tail
|
||||||
|
|
||||||
|
q.push(data, neededSize)
|
||||||
|
|
||||||
|
return index, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *BytesQueue) allocateAdditionalMemory(minimum int) {
|
||||||
|
start := time.Now()
|
||||||
|
if q.capacity < minimum {
|
||||||
|
q.capacity += minimum
|
||||||
|
}
|
||||||
|
q.capacity = q.capacity * 2
|
||||||
|
if q.capacity > q.maxCapacity && q.maxCapacity > 0 {
|
||||||
|
q.capacity = q.maxCapacity
|
||||||
|
}
|
||||||
|
|
||||||
|
oldArray := q.array
|
||||||
|
q.array = make([]byte, q.capacity)
|
||||||
|
|
||||||
|
if leftMarginIndex != q.rightMargin {
|
||||||
|
copy(q.array, oldArray[:q.rightMargin])
|
||||||
|
|
||||||
|
if q.tail <= q.head {
|
||||||
|
if q.tail != q.head {
|
||||||
|
// created slice is slightly larger then need but this is fine after only the needed bytes are copied
|
||||||
|
q.push(make([]byte, q.head-q.tail), q.head-q.tail)
|
||||||
|
}
|
||||||
|
|
||||||
|
q.head = leftMarginIndex
|
||||||
|
q.tail = q.rightMargin
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
q.full = false
|
||||||
|
|
||||||
|
if q.verbose {
|
||||||
|
log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *BytesQueue) push(data []byte, len int) {
|
||||||
|
headerEntrySize := binary.PutUvarint(q.headerBuffer, uint64(len))
|
||||||
|
q.copy(q.headerBuffer, headerEntrySize)
|
||||||
|
|
||||||
|
q.copy(data, len-headerEntrySize)
|
||||||
|
|
||||||
|
if q.tail > q.head {
|
||||||
|
q.rightMargin = q.tail
|
||||||
|
}
|
||||||
|
if q.tail == q.head {
|
||||||
|
q.full = true
|
||||||
|
}
|
||||||
|
|
||||||
|
q.count++
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *BytesQueue) copy(data []byte, len int) {
|
||||||
|
q.tail += copy(q.array[q.tail:], data[:len])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pop reads the oldest entry from queue and moves head pointer to the next one
|
||||||
|
func (q *BytesQueue) Pop() ([]byte, error) {
|
||||||
|
data, blockSize, err := q.peek(q.head)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
q.head += blockSize
|
||||||
|
q.count--
|
||||||
|
|
||||||
|
if q.head == q.rightMargin {
|
||||||
|
q.head = leftMarginIndex
|
||||||
|
if q.tail == q.rightMargin {
|
||||||
|
q.tail = leftMarginIndex
|
||||||
|
}
|
||||||
|
q.rightMargin = q.tail
|
||||||
|
}
|
||||||
|
|
||||||
|
q.full = false
|
||||||
|
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peek reads the oldest entry from list without moving head pointer
|
||||||
|
func (q *BytesQueue) Peek() ([]byte, error) {
|
||||||
|
data, _, err := q.peek(q.head)
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get reads entry from index
|
||||||
|
func (q *BytesQueue) Get(index int) ([]byte, error) {
|
||||||
|
data, _, err := q.peek(index)
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckGet checks if an entry can be read from index
|
||||||
|
func (q *BytesQueue) CheckGet(index int) error {
|
||||||
|
return q.peekCheckErr(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capacity returns number of allocated bytes for queue
|
||||||
|
func (q *BytesQueue) Capacity() int {
|
||||||
|
return q.capacity
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns number of entries kept in queue
|
||||||
|
func (q *BytesQueue) Len() int {
|
||||||
|
return q.count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns error message
|
||||||
|
func (e *queueError) Error() string {
|
||||||
|
return e.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// peekCheckErr is identical to peek, but does not actually return any data
|
||||||
|
func (q *BytesQueue) peekCheckErr(index int) error {
|
||||||
|
|
||||||
|
if q.count == 0 {
|
||||||
|
return errEmptyQueue
|
||||||
|
}
|
||||||
|
|
||||||
|
if index <= 0 {
|
||||||
|
return errInvalidIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
if index >= len(q.array) {
|
||||||
|
return errIndexOutOfBounds
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// peek returns the data from index and the number of bytes to encode the length of the data in uvarint format
|
||||||
|
func (q *BytesQueue) peek(index int) ([]byte, int, error) {
|
||||||
|
err := q.peekCheckErr(index)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blockSize, n := binary.Uvarint(q.array[index:])
|
||||||
|
return q.array[index+n : index+int(blockSize)], int(blockSize), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// canInsertAfterTail returns true if it's possible to insert an entry of size of need after the tail of the queue
|
||||||
|
func (q *BytesQueue) canInsertAfterTail(need int) bool {
|
||||||
|
if q.full {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if q.tail >= q.head {
|
||||||
|
return q.capacity-q.tail >= need
|
||||||
|
}
|
||||||
|
// 1. there is exactly need bytes between head and tail, so we do not need
|
||||||
|
// to reserve extra space for a potential empty entry when realloc this queue
|
||||||
|
// 2. still have unused space between tail and head, then we must reserve
|
||||||
|
// at least headerEntrySize bytes so we can put an empty entry
|
||||||
|
return q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// canInsertBeforeHead returns true if it's possible to insert an entry of size of need before the head of the queue
|
||||||
|
func (q *BytesQueue) canInsertBeforeHead(need int) bool {
|
||||||
|
if q.full {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if q.tail >= q.head {
|
||||||
|
return q.head-leftMarginIndex == need || q.head-leftMarginIndex >= need+minimumHeaderSize
|
||||||
|
}
|
||||||
|
return q.head-q.tail == need || q.head-q.tail >= need+minimumHeaderSize
|
||||||
|
}
|
@ -0,0 +1,434 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/allegro/bigcache/v3/queue"
|
||||||
|
)
|
||||||
|
|
||||||
|
type onRemoveCallback func(wrappedEntry []byte, reason RemoveReason)
|
||||||
|
|
||||||
|
// Metadata contains information of a specific entry
|
||||||
|
type Metadata struct {
|
||||||
|
RequestCount uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type cacheShard struct {
|
||||||
|
hashmap map[uint64]uint32
|
||||||
|
entries queue.BytesQueue
|
||||||
|
lock sync.RWMutex
|
||||||
|
entryBuffer []byte
|
||||||
|
onRemove onRemoveCallback
|
||||||
|
|
||||||
|
isVerbose bool
|
||||||
|
statsEnabled bool
|
||||||
|
logger Logger
|
||||||
|
clock clock
|
||||||
|
lifeWindow uint64
|
||||||
|
|
||||||
|
hashmapStats map[uint64]uint32
|
||||||
|
stats Stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getWithInfo(key string, hashedKey uint64) (entry []byte, resp Response, err error) {
|
||||||
|
currentTime := uint64(s.clock.Epoch())
|
||||||
|
s.lock.RLock()
|
||||||
|
wrappedEntry, err := s.getWrappedEntry(hashedKey)
|
||||||
|
if err != nil {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return nil, resp, err
|
||||||
|
}
|
||||||
|
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.collision()
|
||||||
|
if s.isVerbose {
|
||||||
|
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
|
||||||
|
}
|
||||||
|
return nil, resp, ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
entry = readEntry(wrappedEntry)
|
||||||
|
oldestTimeStamp := readTimestampFromEntry(wrappedEntry)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.hit(hashedKey)
|
||||||
|
if currentTime-oldestTimeStamp >= s.lifeWindow {
|
||||||
|
resp.EntryStatus = Expired
|
||||||
|
}
|
||||||
|
return entry, resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
wrappedEntry, err := s.getWrappedEntry(hashedKey)
|
||||||
|
if err != nil {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.collision()
|
||||||
|
if s.isVerbose {
|
||||||
|
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
|
||||||
|
}
|
||||||
|
return nil, ErrEntryNotFound
|
||||||
|
}
|
||||||
|
entry := readEntry(wrappedEntry)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.hit(hashedKey)
|
||||||
|
|
||||||
|
return entry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getWrappedEntry(hashedKey uint64) ([]byte, error) {
|
||||||
|
itemIndex := s.hashmap[hashedKey]
|
||||||
|
|
||||||
|
if itemIndex == 0 {
|
||||||
|
s.miss()
|
||||||
|
return nil, ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||||
|
if err != nil {
|
||||||
|
s.miss()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return wrappedEntry, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getValidWrapEntry(key string, hashedKey uint64) ([]byte, error) {
|
||||||
|
wrappedEntry, err := s.getWrappedEntry(hashedKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !compareKeyFromEntry(wrappedEntry, key) {
|
||||||
|
s.collision()
|
||||||
|
if s.isVerbose {
|
||||||
|
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, readKeyFromEntry(wrappedEntry), hashedKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, ErrEntryNotFound
|
||||||
|
}
|
||||||
|
s.hitWithoutLock(hashedKey)
|
||||||
|
|
||||||
|
return wrappedEntry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
|
||||||
|
currentTimestamp := uint64(s.clock.Epoch())
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
|
||||||
|
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
|
||||||
|
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
|
||||||
|
resetKeyFromEntry(previousEntry)
|
||||||
|
//remove hashkey
|
||||||
|
delete(s.hashmap, hashedKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldestEntry, err := s.entries.Peek(); err == nil {
|
||||||
|
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
|
||||||
|
|
||||||
|
for {
|
||||||
|
if index, err := s.entries.Push(w); err == nil {
|
||||||
|
s.hashmap[hashedKey] = uint32(index)
|
||||||
|
s.lock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.removeOldestEntry(NoSpace) != nil {
|
||||||
|
s.lock.Unlock()
|
||||||
|
return fmt.Errorf("entry is bigger than max shard size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) addNewWithoutLock(key string, hashedKey uint64, entry []byte) error {
|
||||||
|
currentTimestamp := uint64(s.clock.Epoch())
|
||||||
|
|
||||||
|
if oldestEntry, err := s.entries.Peek(); err == nil {
|
||||||
|
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := wrapEntry(currentTimestamp, hashedKey, key, entry, &s.entryBuffer)
|
||||||
|
|
||||||
|
for {
|
||||||
|
if index, err := s.entries.Push(w); err == nil {
|
||||||
|
s.hashmap[hashedKey] = uint32(index)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.removeOldestEntry(NoSpace) != nil {
|
||||||
|
return fmt.Errorf("entry is bigger than max shard size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) setWrappedEntryWithoutLock(currentTimestamp uint64, w []byte, hashedKey uint64) error {
|
||||||
|
if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 {
|
||||||
|
if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil {
|
||||||
|
resetKeyFromEntry(previousEntry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if oldestEntry, err := s.entries.Peek(); err == nil {
|
||||||
|
s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
if index, err := s.entries.Push(w); err == nil {
|
||||||
|
s.hashmap[hashedKey] = uint32(index)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if s.removeOldestEntry(NoSpace) != nil {
|
||||||
|
return fmt.Errorf("entry is bigger than max shard size")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) append(key string, hashedKey uint64, entry []byte) error {
|
||||||
|
s.lock.Lock()
|
||||||
|
wrappedEntry, err := s.getValidWrapEntry(key, hashedKey)
|
||||||
|
|
||||||
|
if err == ErrEntryNotFound {
|
||||||
|
err = s.addNewWithoutLock(key, hashedKey, entry)
|
||||||
|
s.lock.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
s.lock.Unlock()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
currentTimestamp := uint64(s.clock.Epoch())
|
||||||
|
|
||||||
|
w := appendToWrappedEntry(currentTimestamp, wrappedEntry, entry, &s.entryBuffer)
|
||||||
|
|
||||||
|
err = s.setWrappedEntryWithoutLock(currentTimestamp, w, hashedKey)
|
||||||
|
s.lock.Unlock()
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) del(hashedKey uint64) error {
|
||||||
|
// Optimistic pre-check using only readlock
|
||||||
|
s.lock.RLock()
|
||||||
|
{
|
||||||
|
itemIndex := s.hashmap[hashedKey]
|
||||||
|
|
||||||
|
if itemIndex == 0 {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.delmiss()
|
||||||
|
return ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := s.entries.CheckGet(int(itemIndex)); err != nil {
|
||||||
|
s.lock.RUnlock()
|
||||||
|
s.delmiss()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.lock.RUnlock()
|
||||||
|
|
||||||
|
s.lock.Lock()
|
||||||
|
{
|
||||||
|
// After obtaining the writelock, we need to read the same again,
|
||||||
|
// since the data delivered earlier may be stale now
|
||||||
|
itemIndex := s.hashmap[hashedKey]
|
||||||
|
|
||||||
|
if itemIndex == 0 {
|
||||||
|
s.lock.Unlock()
|
||||||
|
s.delmiss()
|
||||||
|
return ErrEntryNotFound
|
||||||
|
}
|
||||||
|
|
||||||
|
wrappedEntry, err := s.entries.Get(int(itemIndex))
|
||||||
|
if err != nil {
|
||||||
|
s.lock.Unlock()
|
||||||
|
s.delmiss()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(s.hashmap, hashedKey)
|
||||||
|
s.onRemove(wrappedEntry, Deleted)
|
||||||
|
if s.statsEnabled {
|
||||||
|
delete(s.hashmapStats, hashedKey)
|
||||||
|
}
|
||||||
|
resetKeyFromEntry(wrappedEntry)
|
||||||
|
}
|
||||||
|
s.lock.Unlock()
|
||||||
|
|
||||||
|
s.delhit()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func(reason RemoveReason) error) bool {
|
||||||
|
oldestTimestamp := readTimestampFromEntry(oldestEntry)
|
||||||
|
if currentTimestamp-oldestTimestamp > s.lifeWindow {
|
||||||
|
evict(Expired)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) cleanUp(currentTimestamp uint64) {
|
||||||
|
s.lock.Lock()
|
||||||
|
for {
|
||||||
|
if oldestEntry, err := s.entries.Peek(); err != nil {
|
||||||
|
break
|
||||||
|
} else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getEntry(hashedKey uint64) ([]byte, error) {
|
||||||
|
s.lock.RLock()
|
||||||
|
|
||||||
|
entry, err := s.getWrappedEntry(hashedKey)
|
||||||
|
// copy entry
|
||||||
|
newEntry := make([]byte, len(entry))
|
||||||
|
copy(newEntry, entry)
|
||||||
|
|
||||||
|
s.lock.RUnlock()
|
||||||
|
|
||||||
|
return newEntry, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) copyHashedKeys() (keys []uint64, next int) {
|
||||||
|
s.lock.RLock()
|
||||||
|
keys = make([]uint64, len(s.hashmap))
|
||||||
|
|
||||||
|
for key := range s.hashmap {
|
||||||
|
keys[next] = key
|
||||||
|
next++
|
||||||
|
}
|
||||||
|
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return keys, next
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) removeOldestEntry(reason RemoveReason) error {
|
||||||
|
oldest, err := s.entries.Pop()
|
||||||
|
if err == nil {
|
||||||
|
hash := readHashFromEntry(oldest)
|
||||||
|
if hash == 0 {
|
||||||
|
// entry has been explicitly deleted with resetKeyFromEntry, ignore
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
delete(s.hashmap, hash)
|
||||||
|
s.onRemove(oldest, reason)
|
||||||
|
if s.statsEnabled {
|
||||||
|
delete(s.hashmapStats, hash)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) reset(config Config) {
|
||||||
|
s.lock.Lock()
|
||||||
|
s.hashmap = make(map[uint64]uint32, config.initialShardSize())
|
||||||
|
s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes)
|
||||||
|
s.entries.Reset()
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) len() int {
|
||||||
|
s.lock.RLock()
|
||||||
|
res := len(s.hashmap)
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) capacity() int {
|
||||||
|
s.lock.RLock()
|
||||||
|
res := s.entries.Capacity()
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getStats() Stats {
|
||||||
|
var stats = Stats{
|
||||||
|
Hits: atomic.LoadInt64(&s.stats.Hits),
|
||||||
|
Misses: atomic.LoadInt64(&s.stats.Misses),
|
||||||
|
DelHits: atomic.LoadInt64(&s.stats.DelHits),
|
||||||
|
DelMisses: atomic.LoadInt64(&s.stats.DelMisses),
|
||||||
|
Collisions: atomic.LoadInt64(&s.stats.Collisions),
|
||||||
|
}
|
||||||
|
return stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getKeyMetadataWithLock(key uint64) Metadata {
|
||||||
|
s.lock.RLock()
|
||||||
|
c := s.hashmapStats[key]
|
||||||
|
s.lock.RUnlock()
|
||||||
|
return Metadata{
|
||||||
|
RequestCount: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) getKeyMetadata(key uint64) Metadata {
|
||||||
|
return Metadata{
|
||||||
|
RequestCount: s.hashmapStats[key],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) hit(key uint64) {
|
||||||
|
atomic.AddInt64(&s.stats.Hits, 1)
|
||||||
|
if s.statsEnabled {
|
||||||
|
s.lock.Lock()
|
||||||
|
s.hashmapStats[key]++
|
||||||
|
s.lock.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) hitWithoutLock(key uint64) {
|
||||||
|
atomic.AddInt64(&s.stats.Hits, 1)
|
||||||
|
if s.statsEnabled {
|
||||||
|
s.hashmapStats[key]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) miss() {
|
||||||
|
atomic.AddInt64(&s.stats.Misses, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) delhit() {
|
||||||
|
atomic.AddInt64(&s.stats.DelHits, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) delmiss() {
|
||||||
|
atomic.AddInt64(&s.stats.DelMisses, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *cacheShard) collision() {
|
||||||
|
atomic.AddInt64(&s.stats.Collisions, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func initNewShard(config Config, callback onRemoveCallback, clock clock) *cacheShard {
|
||||||
|
bytesQueueInitialCapacity := config.initialShardSize() * config.MaxEntrySize
|
||||||
|
maximumShardSizeInBytes := config.maximumShardSizeInBytes()
|
||||||
|
if maximumShardSizeInBytes > 0 && bytesQueueInitialCapacity > maximumShardSizeInBytes {
|
||||||
|
bytesQueueInitialCapacity = maximumShardSizeInBytes
|
||||||
|
}
|
||||||
|
return &cacheShard{
|
||||||
|
hashmap: make(map[uint64]uint32, config.initialShardSize()),
|
||||||
|
hashmapStats: make(map[uint64]uint32, config.initialShardSize()),
|
||||||
|
entries: *queue.NewBytesQueue(bytesQueueInitialCapacity, maximumShardSizeInBytes, config.Verbose),
|
||||||
|
entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes),
|
||||||
|
onRemove: callback,
|
||||||
|
|
||||||
|
isVerbose: config.Verbose,
|
||||||
|
logger: newLogger(config.Logger),
|
||||||
|
clock: clock,
|
||||||
|
lifeWindow: uint64(config.LifeWindow.Seconds()),
|
||||||
|
statsEnabled: config.StatsEnabled,
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,15 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
// Stats stores cache statistics
|
||||||
|
type Stats struct {
|
||||||
|
// Hits is a number of successfully found keys
|
||||||
|
Hits int64 `json:"hits"`
|
||||||
|
// Misses is a number of not found keys
|
||||||
|
Misses int64 `json:"misses"`
|
||||||
|
// DelHits is a number of successfully deleted keys
|
||||||
|
DelHits int64 `json:"delete_hits"`
|
||||||
|
// DelMisses is a number of not deleted keys
|
||||||
|
DelMisses int64 `json:"delete_misses"`
|
||||||
|
// Collisions is a number of happened key-collisions
|
||||||
|
Collisions int64 `json:"collisions"`
|
||||||
|
}
|
@ -0,0 +1,23 @@
|
|||||||
|
package bigcache
|
||||||
|
|
||||||
|
func max(a, b int) int {
|
||||||
|
if a > b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertMBToBytes(value int) int {
|
||||||
|
return value * 1024 * 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPowerOfTwo(number int) bool {
|
||||||
|
return (number != 0) && (number&(number-1)) == 0
|
||||||
|
}
|
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
@ -0,0 +1,713 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2011 Google Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package memcache provides a client for the memcached cache server.
|
||||||
|
package memcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Similar to:
|
||||||
|
// https://godoc.org/google.golang.org/appengine/memcache
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrCacheMiss means that a Get failed because the item wasn't present.
|
||||||
|
ErrCacheMiss = errors.New("memcache: cache miss")
|
||||||
|
|
||||||
|
// ErrCASConflict means that a CompareAndSwap call failed due to the
|
||||||
|
// cached value being modified between the Get and the CompareAndSwap.
|
||||||
|
// If the cached value was simply evicted rather than replaced,
|
||||||
|
// ErrNotStored will be returned instead.
|
||||||
|
ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
|
||||||
|
|
||||||
|
// ErrNotStored means that a conditional write operation (i.e. Add or
|
||||||
|
// CompareAndSwap) failed because the condition was not satisfied.
|
||||||
|
ErrNotStored = errors.New("memcache: item not stored")
|
||||||
|
|
||||||
|
// ErrServer means that a server error occurred.
|
||||||
|
ErrServerError = errors.New("memcache: server error")
|
||||||
|
|
||||||
|
// ErrNoStats means that no statistics were available.
|
||||||
|
ErrNoStats = errors.New("memcache: no statistics available")
|
||||||
|
|
||||||
|
// ErrMalformedKey is returned when an invalid key is used.
|
||||||
|
// Keys must be at maximum 250 bytes long and not
|
||||||
|
// contain whitespace or control characters.
|
||||||
|
ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters")
|
||||||
|
|
||||||
|
// ErrNoServers is returned when no servers are configured or available.
|
||||||
|
ErrNoServers = errors.New("memcache: no servers configured or available")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DefaultTimeout is the default socket read/write timeout.
|
||||||
|
DefaultTimeout = 100 * time.Millisecond
|
||||||
|
|
||||||
|
// DefaultMaxIdleConns is the default maximum number of idle connections
|
||||||
|
// kept for any single address.
|
||||||
|
DefaultMaxIdleConns = 2
|
||||||
|
)
|
||||||
|
|
||||||
|
const buffered = 8 // arbitrary buffered channel size, for readability
|
||||||
|
|
||||||
|
// resumableError returns true if err is only a protocol-level cache error.
|
||||||
|
// This is used to determine whether or not a server connection should
|
||||||
|
// be re-used or not. If an error occurs, by default we don't reuse the
|
||||||
|
// connection, unless it was just a cache error.
|
||||||
|
func resumableError(err error) bool {
|
||||||
|
switch err {
|
||||||
|
case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func legalKey(key string) bool {
|
||||||
|
if len(key) > 250 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := 0; i < len(key); i++ {
|
||||||
|
if key[i] <= ' ' || key[i] == 0x7f {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
crlf = []byte("\r\n")
|
||||||
|
space = []byte(" ")
|
||||||
|
resultOK = []byte("OK\r\n")
|
||||||
|
resultStored = []byte("STORED\r\n")
|
||||||
|
resultNotStored = []byte("NOT_STORED\r\n")
|
||||||
|
resultExists = []byte("EXISTS\r\n")
|
||||||
|
resultNotFound = []byte("NOT_FOUND\r\n")
|
||||||
|
resultDeleted = []byte("DELETED\r\n")
|
||||||
|
resultEnd = []byte("END\r\n")
|
||||||
|
resultOk = []byte("OK\r\n")
|
||||||
|
resultTouched = []byte("TOUCHED\r\n")
|
||||||
|
|
||||||
|
resultClientErrorPrefix = []byte("CLIENT_ERROR ")
|
||||||
|
versionPrefix = []byte("VERSION")
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a memcache client using the provided server(s)
|
||||||
|
// with equal weight. If a server is listed multiple times,
|
||||||
|
// it gets a proportional amount of weight.
|
||||||
|
func New(server ...string) *Client {
|
||||||
|
ss := new(ServerList)
|
||||||
|
ss.SetServers(server...)
|
||||||
|
return NewFromSelector(ss)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFromSelector returns a new Client using the provided ServerSelector.
|
||||||
|
func NewFromSelector(ss ServerSelector) *Client {
|
||||||
|
return &Client{selector: ss}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client is a memcache client.
|
||||||
|
// It is safe for unlocked use by multiple concurrent goroutines.
|
||||||
|
type Client struct {
|
||||||
|
// Timeout specifies the socket read/write timeout.
|
||||||
|
// If zero, DefaultTimeout is used.
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
// MaxIdleConns specifies the maximum number of idle connections that will
|
||||||
|
// be maintained per address. If less than one, DefaultMaxIdleConns will be
|
||||||
|
// used.
|
||||||
|
//
|
||||||
|
// Consider your expected traffic rates and latency carefully. This should
|
||||||
|
// be set to a number higher than your peak parallel requests.
|
||||||
|
MaxIdleConns int
|
||||||
|
|
||||||
|
selector ServerSelector
|
||||||
|
|
||||||
|
lk sync.Mutex
|
||||||
|
freeconn map[string][]*conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// Item is an item to be got or stored in a memcached server.
|
||||||
|
type Item struct {
|
||||||
|
// Key is the Item's key (250 bytes maximum).
|
||||||
|
Key string
|
||||||
|
|
||||||
|
// Value is the Item's value.
|
||||||
|
Value []byte
|
||||||
|
|
||||||
|
// Flags are server-opaque flags whose semantics are entirely
|
||||||
|
// up to the app.
|
||||||
|
Flags uint32
|
||||||
|
|
||||||
|
// Expiration is the cache expiration time, in seconds: either a relative
|
||||||
|
// time from now (up to 1 month), or an absolute Unix epoch time.
|
||||||
|
// Zero means the Item has no expiration time.
|
||||||
|
Expiration int32
|
||||||
|
|
||||||
|
// Compare and swap ID.
|
||||||
|
casid uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// conn is a connection to a server.
|
||||||
|
type conn struct {
|
||||||
|
nc net.Conn
|
||||||
|
rw *bufio.ReadWriter
|
||||||
|
addr net.Addr
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// release returns this connection back to the client's free pool
|
||||||
|
func (cn *conn) release() {
|
||||||
|
cn.c.putFreeConn(cn.addr, cn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cn *conn) extendDeadline() {
|
||||||
|
cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// condRelease releases this connection if the error pointed to by err
|
||||||
|
// is nil (not an error) or is only a protocol level error (e.g. a
|
||||||
|
// cache miss). The purpose is to not recycle TCP connections that
|
||||||
|
// are bad.
|
||||||
|
func (cn *conn) condRelease(err *error) {
|
||||||
|
if *err == nil || resumableError(*err) {
|
||||||
|
cn.release()
|
||||||
|
} else {
|
||||||
|
cn.nc.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
|
||||||
|
c.lk.Lock()
|
||||||
|
defer c.lk.Unlock()
|
||||||
|
if c.freeconn == nil {
|
||||||
|
c.freeconn = make(map[string][]*conn)
|
||||||
|
}
|
||||||
|
freelist := c.freeconn[addr.String()]
|
||||||
|
if len(freelist) >= c.maxIdleConns() {
|
||||||
|
cn.nc.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.freeconn[addr.String()] = append(freelist, cn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
|
||||||
|
c.lk.Lock()
|
||||||
|
defer c.lk.Unlock()
|
||||||
|
if c.freeconn == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
freelist, ok := c.freeconn[addr.String()]
|
||||||
|
if !ok || len(freelist) == 0 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
cn = freelist[len(freelist)-1]
|
||||||
|
c.freeconn[addr.String()] = freelist[:len(freelist)-1]
|
||||||
|
return cn, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) netTimeout() time.Duration {
|
||||||
|
if c.Timeout != 0 {
|
||||||
|
return c.Timeout
|
||||||
|
}
|
||||||
|
return DefaultTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) maxIdleConns() int {
|
||||||
|
if c.MaxIdleConns > 0 {
|
||||||
|
return c.MaxIdleConns
|
||||||
|
}
|
||||||
|
return DefaultMaxIdleConns
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectTimeoutError is the error type used when it takes
|
||||||
|
// too long to connect to the desired host. This level of
|
||||||
|
// detail can generally be ignored.
|
||||||
|
type ConnectTimeoutError struct {
|
||||||
|
Addr net.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cte *ConnectTimeoutError) Error() string {
|
||||||
|
return "memcache: connect timeout to " + cte.Addr.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) dial(addr net.Addr) (net.Conn, error) {
|
||||||
|
nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
|
||||||
|
if err == nil {
|
||||||
|
return nc, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||||
|
return nil, &ConnectTimeoutError{addr}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getConn(addr net.Addr) (*conn, error) {
|
||||||
|
cn, ok := c.getFreeConn(addr)
|
||||||
|
if ok {
|
||||||
|
cn.extendDeadline()
|
||||||
|
return cn, nil
|
||||||
|
}
|
||||||
|
nc, err := c.dial(addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cn = &conn{
|
||||||
|
nc: nc,
|
||||||
|
addr: addr,
|
||||||
|
rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)),
|
||||||
|
c: c,
|
||||||
|
}
|
||||||
|
cn.extendDeadline()
|
||||||
|
return cn, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error {
|
||||||
|
addr, err := c.selector.PickServer(item.Key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cn, err := c.getConn(addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cn.condRelease(&err)
|
||||||
|
if err = fn(c, cn.rw, item); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) FlushAll() error {
|
||||||
|
return c.selector.Each(c.flushAllFromAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the item for the given key. ErrCacheMiss is returned for a
|
||||||
|
// memcache cache miss. The key must be at most 250 bytes in length.
|
||||||
|
func (c *Client) Get(key string) (item *Item, err error) {
|
||||||
|
err = c.withKeyAddr(key, func(addr net.Addr) error {
|
||||||
|
return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it })
|
||||||
|
})
|
||||||
|
if err == nil && item == nil {
|
||||||
|
err = ErrCacheMiss
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch updates the expiry for the given key. The seconds parameter is either
|
||||||
|
// a Unix timestamp or, if seconds is less than 1 month, the number of seconds
|
||||||
|
// into the future at which time the item will expire. Zero means the item has
|
||||||
|
// no expiration time. ErrCacheMiss is returned if the key is not in the cache.
|
||||||
|
// The key must be at most 250 bytes in length.
|
||||||
|
func (c *Client) Touch(key string, seconds int32) (err error) {
|
||||||
|
return c.withKeyAddr(key, func(addr net.Addr) error {
|
||||||
|
return c.touchFromAddr(addr, []string{key}, seconds)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
|
||||||
|
if !legalKey(key) {
|
||||||
|
return ErrMalformedKey
|
||||||
|
}
|
||||||
|
addr, err := c.selector.PickServer(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return fn(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
|
||||||
|
cn, err := c.getConn(addr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer cn.condRelease(&err)
|
||||||
|
return fn(cn.rw)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
|
||||||
|
return c.withKeyAddr(key, func(addr net.Addr) error {
|
||||||
|
return c.withAddrRw(addr, fn)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
|
||||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||||
|
if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := parseGetResponse(rw.Reader, cb); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// flushAllFromAddr send the flush_all command to the given addr
|
||||||
|
func (c *Client) flushAllFromAddr(addr net.Addr) error {
|
||||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||||
|
if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
line, err := rw.ReadSlice('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case bytes.Equal(line, resultOk):
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ping sends the version command to the given addr
|
||||||
|
func (c *Client) ping(addr net.Addr) error {
|
||||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||||
|
if _, err := fmt.Fprintf(rw, "version\r\n"); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
line, err := rw.ReadSlice('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case bytes.HasPrefix(line, versionPrefix):
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("memcache: unexpected response line from ping: %q", string(line))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
|
||||||
|
return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
|
||||||
|
for _, key := range keys {
|
||||||
|
if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
line, err := rw.ReadSlice('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case bytes.Equal(line, resultTouched):
|
||||||
|
break
|
||||||
|
case bytes.Equal(line, resultNotFound):
|
||||||
|
return ErrCacheMiss
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMulti is a batch version of Get. The returned map from keys to
|
||||||
|
// items may have fewer elements than the input slice, due to memcache
|
||||||
|
// cache misses. Each key must be at most 250 bytes in length.
|
||||||
|
// If no error is returned, the returned map will also be non-nil.
|
||||||
|
func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
|
||||||
|
var lk sync.Mutex
|
||||||
|
m := make(map[string]*Item)
|
||||||
|
addItemToMap := func(it *Item) {
|
||||||
|
lk.Lock()
|
||||||
|
defer lk.Unlock()
|
||||||
|
m[it.Key] = it
|
||||||
|
}
|
||||||
|
|
||||||
|
keyMap := make(map[net.Addr][]string)
|
||||||
|
for _, key := range keys {
|
||||||
|
if !legalKey(key) {
|
||||||
|
return nil, ErrMalformedKey
|
||||||
|
}
|
||||||
|
addr, err := c.selector.PickServer(key)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keyMap[addr] = append(keyMap[addr], key)
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := make(chan error, buffered)
|
||||||
|
for addr, keys := range keyMap {
|
||||||
|
go func(addr net.Addr, keys []string) {
|
||||||
|
ch <- c.getFromAddr(addr, keys, addItemToMap)
|
||||||
|
}(addr, keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
for _ = range keyMap {
|
||||||
|
if ge := <-ch; ge != nil {
|
||||||
|
err = ge
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseGetResponse reads a GET response from r and calls cb for each
|
||||||
|
// read and allocated Item
|
||||||
|
func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
|
||||||
|
for {
|
||||||
|
line, err := r.ReadSlice('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bytes.Equal(line, resultEnd) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
it := new(Item)
|
||||||
|
size, err := scanGetResponseLine(line, it)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
it.Value = make([]byte, size+2)
|
||||||
|
_, err = io.ReadFull(r, it.Value)
|
||||||
|
if err != nil {
|
||||||
|
it.Value = nil
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !bytes.HasSuffix(it.Value, crlf) {
|
||||||
|
it.Value = nil
|
||||||
|
return fmt.Errorf("memcache: corrupt get result read")
|
||||||
|
}
|
||||||
|
it.Value = it.Value[:size]
|
||||||
|
cb(it)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanGetResponseLine populates it and returns the declared size of the item.
|
||||||
|
// It does not read the bytes of the item.
|
||||||
|
func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
|
||||||
|
pattern := "VALUE %s %d %d %d\r\n"
|
||||||
|
dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
|
||||||
|
if bytes.Count(line, space) == 3 {
|
||||||
|
pattern = "VALUE %s %d %d\r\n"
|
||||||
|
dest = dest[:3]
|
||||||
|
}
|
||||||
|
n, err := fmt.Sscanf(string(line), pattern, dest...)
|
||||||
|
if err != nil || n != len(dest) {
|
||||||
|
return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
|
||||||
|
}
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set writes the given item, unconditionally.
|
||||||
|
func (c *Client) Set(item *Item) error {
|
||||||
|
return c.onItem(item, (*Client).set)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) set(rw *bufio.ReadWriter, item *Item) error {
|
||||||
|
return c.populateOne(rw, "set", item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add writes the given item, if no value already exists for its
|
||||||
|
// key. ErrNotStored is returned if that condition is not met.
|
||||||
|
func (c *Client) Add(item *Item) error {
|
||||||
|
return c.onItem(item, (*Client).add)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) add(rw *bufio.ReadWriter, item *Item) error {
|
||||||
|
return c.populateOne(rw, "add", item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replace writes the given item, but only if the server *does*
|
||||||
|
// already hold data for this key
|
||||||
|
func (c *Client) Replace(item *Item) error {
|
||||||
|
return c.onItem(item, (*Client).replace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
|
||||||
|
return c.populateOne(rw, "replace", item)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CompareAndSwap writes the given item that was previously returned
|
||||||
|
// by Get, if the value was neither modified or evicted between the
|
||||||
|
// Get and the CompareAndSwap calls. The item's Key should not change
|
||||||
|
// between calls but all other item fields may differ. ErrCASConflict
|
||||||
|
// is returned if the value was modified in between the
|
||||||
|
// calls. ErrNotStored is returned if the value was evicted in between
|
||||||
|
// the calls.
|
||||||
|
func (c *Client) CompareAndSwap(item *Item) error {
|
||||||
|
return c.onItem(item, (*Client).cas)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error {
|
||||||
|
return c.populateOne(rw, "cas", item)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error {
|
||||||
|
if !legalKey(item.Key) {
|
||||||
|
return ErrMalformedKey
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
if verb == "cas" {
|
||||||
|
_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
|
||||||
|
verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
|
||||||
|
} else {
|
||||||
|
_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
|
||||||
|
verb, item.Key, item.Flags, item.Expiration, len(item.Value))
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err = rw.Write(item.Value); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if _, err := rw.Write(crlf); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := rw.Flush(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
line, err := rw.ReadSlice('\n')
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case bytes.Equal(line, resultStored):
|
||||||
|
return nil
|
||||||
|
case bytes.Equal(line, resultNotStored):
|
||||||
|
return ErrNotStored
|
||||||
|
case bytes.Equal(line, resultExists):
|
||||||
|
return ErrCASConflict
|
||||||
|
case bytes.Equal(line, resultNotFound):
|
||||||
|
return ErrCacheMiss
|
||||||
|
}
|
||||||
|
return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line))
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) {
|
||||||
|
_, err := fmt.Fprintf(rw, format, args...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := rw.Flush(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
line, err := rw.ReadSlice('\n')
|
||||||
|
return line, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error {
|
||||||
|
line, err := writeReadLine(rw, format, args...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case bytes.Equal(line, resultOK):
|
||||||
|
return nil
|
||||||
|
case bytes.Equal(line, expect):
|
||||||
|
return nil
|
||||||
|
case bytes.Equal(line, resultNotStored):
|
||||||
|
return ErrNotStored
|
||||||
|
case bytes.Equal(line, resultExists):
|
||||||
|
return ErrCASConflict
|
||||||
|
case bytes.Equal(line, resultNotFound):
|
||||||
|
return ErrCacheMiss
|
||||||
|
}
|
||||||
|
return fmt.Errorf("memcache: unexpected response line: %q", string(line))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes the item with the provided key. The error ErrCacheMiss is
|
||||||
|
// returned if the item didn't already exist in the cache.
|
||||||
|
func (c *Client) Delete(key string) error {
|
||||||
|
return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
|
||||||
|
return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteAll deletes all items in the cache.
|
||||||
|
func (c *Client) DeleteAll() error {
|
||||||
|
return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
|
||||||
|
return writeExpectf(rw, resultDeleted, "flush_all\r\n")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping checks all instances if they are alive. Returns error if any
|
||||||
|
// of them is down.
|
||||||
|
func (c *Client) Ping() error {
|
||||||
|
return c.selector.Each(c.ping)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment atomically increments key by delta. The return value is
|
||||||
|
// the new value after being incremented or an error. If the value
|
||||||
|
// didn't exist in memcached the error is ErrCacheMiss. The value in
|
||||||
|
// memcached must be an decimal number, or an error will be returned.
|
||||||
|
// On 64-bit overflow, the new value wraps around.
|
||||||
|
func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) {
|
||||||
|
return c.incrDecr("incr", key, delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrement atomically decrements key by delta. The return value is
|
||||||
|
// the new value after being decremented or an error. If the value
|
||||||
|
// didn't exist in memcached the error is ErrCacheMiss. The value in
|
||||||
|
// memcached must be an decimal number, or an error will be returned.
|
||||||
|
// On underflow, the new value is capped at zero and does not wrap
|
||||||
|
// around.
|
||||||
|
func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) {
|
||||||
|
return c.incrDecr("decr", key, delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
|
||||||
|
var val uint64
|
||||||
|
err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
|
||||||
|
line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case bytes.Equal(line, resultNotFound):
|
||||||
|
return ErrCacheMiss
|
||||||
|
case bytes.HasPrefix(line, resultClientErrorPrefix):
|
||||||
|
errMsg := line[len(resultClientErrorPrefix) : len(line)-2]
|
||||||
|
return errors.New("memcache: client error: " + string(errMsg))
|
||||||
|
}
|
||||||
|
val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return val, err
|
||||||
|
}
|
@ -0,0 +1,129 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2011 Google Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package memcache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"hash/crc32"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServerSelector is the interface that selects a memcache server
|
||||||
|
// as a function of the item's key.
|
||||||
|
//
|
||||||
|
// All ServerSelector implementations must be safe for concurrent use
|
||||||
|
// by multiple goroutines.
|
||||||
|
type ServerSelector interface {
|
||||||
|
// PickServer returns the server address that a given item
|
||||||
|
// should be shared onto.
|
||||||
|
PickServer(key string) (net.Addr, error)
|
||||||
|
Each(func(net.Addr) error) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerList is a simple ServerSelector. Its zero value is usable.
|
||||||
|
type ServerList struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
addrs []net.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// staticAddr caches the Network() and String() values from any net.Addr.
|
||||||
|
type staticAddr struct {
|
||||||
|
ntw, str string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStaticAddr(a net.Addr) net.Addr {
|
||||||
|
return &staticAddr{
|
||||||
|
ntw: a.Network(),
|
||||||
|
str: a.String(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *staticAddr) Network() string { return s.ntw }
|
||||||
|
func (s *staticAddr) String() string { return s.str }
|
||||||
|
|
||||||
|
// SetServers changes a ServerList's set of servers at runtime and is
|
||||||
|
// safe for concurrent use by multiple goroutines.
|
||||||
|
//
|
||||||
|
// Each server is given equal weight. A server is given more weight
|
||||||
|
// if it's listed multiple times.
|
||||||
|
//
|
||||||
|
// SetServers returns an error if any of the server names fail to
|
||||||
|
// resolve. No attempt is made to connect to the server. If any error
|
||||||
|
// is returned, no changes are made to the ServerList.
|
||||||
|
func (ss *ServerList) SetServers(servers ...string) error {
|
||||||
|
naddr := make([]net.Addr, len(servers))
|
||||||
|
for i, server := range servers {
|
||||||
|
if strings.Contains(server, "/") {
|
||||||
|
addr, err := net.ResolveUnixAddr("unix", server)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
naddr[i] = newStaticAddr(addr)
|
||||||
|
} else {
|
||||||
|
tcpaddr, err := net.ResolveTCPAddr("tcp", server)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
naddr[i] = newStaticAddr(tcpaddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ss.mu.Lock()
|
||||||
|
defer ss.mu.Unlock()
|
||||||
|
ss.addrs = naddr
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each iterates over each server calling the given function
|
||||||
|
func (ss *ServerList) Each(f func(net.Addr) error) error {
|
||||||
|
ss.mu.RLock()
|
||||||
|
defer ss.mu.RUnlock()
|
||||||
|
for _, a := range ss.addrs {
|
||||||
|
if err := f(a); nil != err {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// keyBufPool returns []byte buffers for use by PickServer's call to
|
||||||
|
// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the
|
||||||
|
// copies, which at least are bounded in size and small)
|
||||||
|
var keyBufPool = sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
b := make([]byte, 256)
|
||||||
|
return &b
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ServerList) PickServer(key string) (net.Addr, error) {
|
||||||
|
ss.mu.RLock()
|
||||||
|
defer ss.mu.RUnlock()
|
||||||
|
if len(ss.addrs) == 0 {
|
||||||
|
return nil, ErrNoServers
|
||||||
|
}
|
||||||
|
if len(ss.addrs) == 1 {
|
||||||
|
return ss.addrs[0], nil
|
||||||
|
}
|
||||||
|
bufp := keyBufPool.Get().(*[]byte)
|
||||||
|
n := copy(*bufp, key)
|
||||||
|
cs := crc32.ChecksumIEEE((*bufp)[:n])
|
||||||
|
keyBufPool.Put(bufp)
|
||||||
|
|
||||||
|
return ss.addrs[cs%uint32(len(ss.addrs))], nil
|
||||||
|
}
|
@ -0,0 +1,202 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
@ -0,0 +1,5 @@
|
|||||||
|
CoreOS Project
|
||||||
|
Copyright 2018 CoreOS, Inc
|
||||||
|
|
||||||
|
This product includes software developed at CoreOS, Inc.
|
||||||
|
(http://www.coreos.com/).
|
@ -0,0 +1,296 @@
|
|||||||
|
// Copyright 2013-2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Semantic Versions http://semver.org
|
||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Version struct {
|
||||||
|
Major int64
|
||||||
|
Minor int64
|
||||||
|
Patch int64
|
||||||
|
PreRelease PreRelease
|
||||||
|
Metadata string
|
||||||
|
}
|
||||||
|
|
||||||
|
type PreRelease string
|
||||||
|
|
||||||
|
func splitOff(input *string, delim string) (val string) {
|
||||||
|
parts := strings.SplitN(*input, delim, 2)
|
||||||
|
|
||||||
|
if len(parts) == 2 {
|
||||||
|
*input = parts[0]
|
||||||
|
val = parts[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(version string) *Version {
|
||||||
|
return Must(NewVersion(version))
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewVersion(version string) (*Version, error) {
|
||||||
|
v := Version{}
|
||||||
|
|
||||||
|
if err := v.Set(version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||||
|
func Must(v *Version, err error) *Version {
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set parses and updates v from the given version string. Implements flag.Value
|
||||||
|
func (v *Version) Set(version string) error {
|
||||||
|
metadata := splitOff(&version, "+")
|
||||||
|
preRelease := PreRelease(splitOff(&version, "-"))
|
||||||
|
dotParts := strings.SplitN(version, ".", 3)
|
||||||
|
|
||||||
|
if len(dotParts) != 3 {
|
||||||
|
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||||
|
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := validateIdentifier(metadata); err != nil {
|
||||||
|
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed := make([]int64, 3, 3)
|
||||||
|
|
||||||
|
for i, v := range dotParts[:3] {
|
||||||
|
val, err := strconv.ParseInt(v, 10, 64)
|
||||||
|
parsed[i] = val
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Metadata = metadata
|
||||||
|
v.PreRelease = preRelease
|
||||||
|
v.Major = parsed[0]
|
||||||
|
v.Minor = parsed[1]
|
||||||
|
v.Patch = parsed[2]
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) String() string {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
|
||||||
|
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||||
|
|
||||||
|
if v.PreRelease != "" {
|
||||||
|
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.Metadata != "" {
|
||||||
|
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
|
var data string
|
||||||
|
if err := unmarshal(&data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return v.Set(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v Version) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(`"` + v.String() + `"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||||
|
l := len(data)
|
||||||
|
if l == 0 || string(data) == `""` {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||||
|
return errors.New("invalid semver string")
|
||||||
|
}
|
||||||
|
return v.Set(string(data[1 : l-1]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||||
|
// returning -1, 0, or +1 respectively.
|
||||||
|
func (v Version) Compare(versionB Version) int {
|
||||||
|
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||||
|
return cmp
|
||||||
|
}
|
||||||
|
return preReleaseCompare(v, versionB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equal tests if v is equal to versionB.
|
||||||
|
func (v Version) Equal(versionB Version) bool {
|
||||||
|
return v.Compare(versionB) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// LessThan tests if v is less than versionB.
|
||||||
|
func (v Version) LessThan(versionB Version) bool {
|
||||||
|
return v.Compare(versionB) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||||
|
func (v Version) Slice() []int64 {
|
||||||
|
return []int64{v.Major, v.Minor, v.Patch}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p PreRelease) Slice() []string {
|
||||||
|
preRelease := string(p)
|
||||||
|
return strings.Split(preRelease, ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||||
|
a := versionA.PreRelease
|
||||||
|
b := versionB.PreRelease
|
||||||
|
|
||||||
|
/* Handle the case where if two versions are otherwise equal it is the
|
||||||
|
* one without a PreRelease that is greater */
|
||||||
|
if len(a) == 0 && (len(b) > 0) {
|
||||||
|
return 1
|
||||||
|
} else if len(b) == 0 && (len(a) > 0) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is a prerelease, check and compare each part.
|
||||||
|
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||||
|
if len(versionA) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
a := versionA[0]
|
||||||
|
b := versionB[0]
|
||||||
|
|
||||||
|
if a > b {
|
||||||
|
return 1
|
||||||
|
} else if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return recursiveCompare(versionA[1:], versionB[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||||
|
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||||
|
// if all of the preceding identifiers are equal.
|
||||||
|
if len(versionA) == 0 {
|
||||||
|
if len(versionB) > 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
} else if len(versionB) == 0 {
|
||||||
|
// We're longer than versionB so return 1.
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
a := versionA[0]
|
||||||
|
b := versionB[0]
|
||||||
|
|
||||||
|
aInt := false
|
||||||
|
bInt := false
|
||||||
|
|
||||||
|
aI, err := strconv.Atoi(versionA[0])
|
||||||
|
if err == nil {
|
||||||
|
aInt = true
|
||||||
|
}
|
||||||
|
|
||||||
|
bI, err := strconv.Atoi(versionB[0])
|
||||||
|
if err == nil {
|
||||||
|
bInt = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||||
|
if aInt && !bInt {
|
||||||
|
return -1
|
||||||
|
} else if !aInt && bInt {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle Integer Comparison
|
||||||
|
if aInt && bInt {
|
||||||
|
if aI > bI {
|
||||||
|
return 1
|
||||||
|
} else if aI < bI {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle String Comparison
|
||||||
|
if a > b {
|
||||||
|
return 1
|
||||||
|
} else if a < b {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpMajor() {
|
||||||
|
v.Major += 1
|
||||||
|
v.Minor = 0
|
||||||
|
v.Patch = 0
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpMinor() {
|
||||||
|
v.Minor += 1
|
||||||
|
v.Patch = 0
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||||
|
func (v *Version) BumpPatch() {
|
||||||
|
v.Patch += 1
|
||||||
|
v.PreRelease = PreRelease("")
|
||||||
|
v.Metadata = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||||
|
func validateIdentifier(id string) error {
|
||||||
|
if id != "" && !reIdentifier.MatchString(id) {
|
||||||
|
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||||
|
// identifiers satisfy the spec requirements
|
||||||
|
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2013-2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package semver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Versions []*Version
|
||||||
|
|
||||||
|
func (s Versions) Len() int {
|
||||||
|
return len(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Versions) Swap(i, j int) {
|
||||||
|
s[i], s[j] = s[j], s[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s Versions) Less(i, j int) bool {
|
||||||
|
return s[i].LessThan(*s[j])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort sorts the given slice of Version
|
||||||
|
func Sort(versions []*Version) {
|
||||||
|
sort.Sort(Versions(versions))
|
||||||
|
}
|
@ -0,0 +1,191 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
@ -0,0 +1,5 @@
|
|||||||
|
CoreOS Project
|
||||||
|
Copyright 2018 CoreOS, Inc
|
||||||
|
|
||||||
|
This product includes software developed at CoreOS, Inc.
|
||||||
|
(http://www.coreos.com/).
|
@ -0,0 +1,46 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package journal provides write bindings to the local systemd journal.
|
||||||
|
// It is implemented in pure Go and connects to the journal directly over its
|
||||||
|
// unix socket.
|
||||||
|
//
|
||||||
|
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||||
|
// sd-journal a C API.
|
||||||
|
//
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Priority of a journal message
|
||||||
|
type Priority int
|
||||||
|
|
||||||
|
const (
|
||||||
|
PriEmerg Priority = iota
|
||||||
|
PriAlert
|
||||||
|
PriCrit
|
||||||
|
PriErr
|
||||||
|
PriWarning
|
||||||
|
PriNotice
|
||||||
|
PriInfo
|
||||||
|
PriDebug
|
||||||
|
)
|
||||||
|
|
||||||
|
// Print prints a message to the local systemd journal using Send().
|
||||||
|
func Print(priority Priority, format string, a ...interface{}) error {
|
||||||
|
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||||
|
}
|
@ -0,0 +1,210 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
// Package journal provides write bindings to the local systemd journal.
|
||||||
|
// It is implemented in pure Go and connects to the journal directly over its
|
||||||
|
// unix socket.
|
||||||
|
//
|
||||||
|
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||||
|
// sd-journal a C API.
|
||||||
|
//
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// This can be overridden at build-time:
|
||||||
|
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||||
|
journalSocket = "/run/systemd/journal/socket"
|
||||||
|
|
||||||
|
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||||
|
// Concrete safe pointer type: *net.UnixConn
|
||||||
|
unixConnPtr unsafe.Pointer
|
||||||
|
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||||
|
onceConn sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
onceConn.Do(initConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled checks whether the local systemd journal is available for logging.
|
||||||
|
func Enabled() bool {
|
||||||
|
onceConn.Do(initConn)
|
||||||
|
|
||||||
|
if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := net.Dial("unixgram", journalSocket)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a message to the local systemd journal. vars is a map of journald
|
||||||
|
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||||
|
// and underscores, but must not start with an underscore. Within these
|
||||||
|
// restrictions, any arbitrary field name may be used. Some names have special
|
||||||
|
// significance: see the journalctl documentation
|
||||||
|
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||||
|
// for more details. vars may be nil.
|
||||||
|
func Send(message string, priority Priority, vars map[string]string) error {
|
||||||
|
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||||
|
if conn == nil {
|
||||||
|
return errors.New("could not initialize socket to journald")
|
||||||
|
}
|
||||||
|
|
||||||
|
socketAddr := &net.UnixAddr{
|
||||||
|
Name: journalSocket,
|
||||||
|
Net: "unixgram",
|
||||||
|
}
|
||||||
|
|
||||||
|
data := new(bytes.Buffer)
|
||||||
|
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||||
|
appendVariable(data, "MESSAGE", message)
|
||||||
|
for k, v := range vars {
|
||||||
|
appendVariable(data, k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !isSocketSpaceError(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Large log entry, send it via tempfile and ancillary-fd.
|
||||||
|
file, err := tempFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
_, err = io.Copy(file, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rights := syscall.UnixRights(int(file.Fd()))
|
||||||
|
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendVariable(w io.Writer, name, value string) {
|
||||||
|
if err := validVarName(name); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
|
||||||
|
}
|
||||||
|
if strings.ContainsRune(value, '\n') {
|
||||||
|
/* When the value contains a newline, we write:
|
||||||
|
* - the variable name, followed by a newline
|
||||||
|
* - the size (in 64bit little endian format)
|
||||||
|
* - the data, followed by a newline
|
||||||
|
*/
|
||||||
|
fmt.Fprintln(w, name)
|
||||||
|
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||||
|
fmt.Fprintln(w, value)
|
||||||
|
} else {
|
||||||
|
/* just write the variable and value all on one line */
|
||||||
|
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validVarName validates a variable name to make sure journald will accept it.
|
||||||
|
// The variable name must be in uppercase and consist only of characters,
|
||||||
|
// numbers and underscores, and may not begin with an underscore:
|
||||||
|
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||||
|
func validVarName(name string) error {
|
||||||
|
if name == "" {
|
||||||
|
return errors.New("Empty variable name")
|
||||||
|
} else if name[0] == '_' {
|
||||||
|
return errors.New("Variable name begins with an underscore")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range name {
|
||||||
|
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
|
||||||
|
return errors.New("Variable name contains invalid characters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSocketSpaceError checks whether the error is signaling
|
||||||
|
// an "overlarge message" condition.
|
||||||
|
func isSocketSpaceError(err error) bool {
|
||||||
|
opErr, ok := err.(*net.OpError)
|
||||||
|
if !ok || opErr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
sysErr, ok := opErr.Err.(*os.SyscallError)
|
||||||
|
if !ok || sysErr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||||
|
func tempFd() (*os.File, error) {
|
||||||
|
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = syscall.Unlink(file.Name())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initConn initializes the global `unixConnPtr` socket.
|
||||||
|
// It is meant to be called exactly once, at program startup.
|
||||||
|
func initConn() {
|
||||||
|
autobind, err := net.ResolveUnixAddr("unixgram", "")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sock, err := net.ListenUnixgram("unixgram", autobind)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
|
||||||
|
}
|
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package journal provides write bindings to the local systemd journal.
|
||||||
|
// It is implemented in pure Go and connects to the journal directly over its
|
||||||
|
// unix socket.
|
||||||
|
//
|
||||||
|
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||||
|
// sd-journal a C API.
|
||||||
|
//
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Enabled() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func Send(message string, priority Priority, vars map[string]string) error {
|
||||||
|
return errors.New("could not initialize socket to journald")
|
||||||
|
}
|
@ -0,0 +1,17 @@
|
|||||||
|
version = 1
|
||||||
|
|
||||||
|
test_patterns = [
|
||||||
|
'**/*_test.go'
|
||||||
|
]
|
||||||
|
|
||||||
|
exclude_patterns = [
|
||||||
|
|
||||||
|
]
|
||||||
|
|
||||||
|
[[analyzers]]
|
||||||
|
name = 'go'
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
|
||||||
|
[analyzers.meta]
|
||||||
|
import_path = 'github.com/dgraph-io/ristretto'
|
@ -0,0 +1,172 @@
|
|||||||
|
# Changelog
|
||||||
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||||
|
and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.0.0.html) starting v1.0.0.
|
||||||
|
|
||||||
|
## Unreleased
|
||||||
|
|
||||||
|
## [0.1.0] - 2021-06-03
|
||||||
|
|
||||||
|
[0.1.0]: https://github.com/dgraph-io/ristretto/compare/v0.1.0..v0.0.3
|
||||||
|
This release contains bug fixes and improvements to Ristretto. It also contains
|
||||||
|
major updates to the z package. The z package contains types such as Tree (B+
|
||||||
|
tree), Buffer, Mmap file, etc. All these types are used in Badger and Dgraph to
|
||||||
|
improve performance and reduce memory requirements.
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
- Make item public. Add a new onReject call for rejected items. (#180)
|
||||||
|
|
||||||
|
### Added
|
||||||
|
- Use z.Buffer backing for B+ tree (#268)
|
||||||
|
- expose GetTTL function (#270)
|
||||||
|
- docs(README): Ristretto is production-ready. (#267)
|
||||||
|
- Add IterateKV (#265)
|
||||||
|
- feat(super-flags): Add GetPath method in superflags (#258)
|
||||||
|
- add GetDuration to SuperFlag (#248)
|
||||||
|
- add Has, GetFloat64, and GetInt64 to SuperFlag (#247)
|
||||||
|
- move SuperFlag to Ristretto (#246)
|
||||||
|
- add SuperFlagHelp tool to generate flag help text (#251)
|
||||||
|
- allow empty defaults in SuperFlag (#254)
|
||||||
|
- add mmaped b+ tree (#207)
|
||||||
|
- Add API to allow the MaxCost of an existing cache to be updated. (#200)
|
||||||
|
- Add OnExit handler which can be used for manual memory management (#183)
|
||||||
|
- Add life expectancy histogram (#182)
|
||||||
|
- Add mechanism to wait for items to be processed. (#184)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
- change expiration type from int64 to time.Time (#277)
|
||||||
|
- fix(buffer): make buffer capacity atleast defaultCapacity (#273)
|
||||||
|
- Fixes for z.PersistentTree (#272)
|
||||||
|
- Initialize persistent tree correctly (#271)
|
||||||
|
- use xxhash v2 (#266)
|
||||||
|
- update comments to correctly reflect counter space usage (#189)
|
||||||
|
- enable riscv64 builds (#264)
|
||||||
|
- Switch from log to glog (#263)
|
||||||
|
- Use Fibonacci for latency numbers
|
||||||
|
- cache: fix race when clearning a cache (#261)
|
||||||
|
- Check for keys without values in superflags (#259)
|
||||||
|
- chore(perf): using tags instead of runtime callers to improve the performance of leak detection (#255)
|
||||||
|
- fix(Flags): panic on user errors (#256)
|
||||||
|
- fix SuperFlagHelp newline (#252)
|
||||||
|
- fix(arm): Fix crashing under ARMv6 due to memory mis-alignment (#239)
|
||||||
|
- Fix incorrect unit test coverage depiction (#245)
|
||||||
|
- chore(histogram): adding percentile in histogram (#241)
|
||||||
|
- fix(windows): use filepath instead of path (#244)
|
||||||
|
- fix(MmapFile): Close the fd before deleting the file (#242)
|
||||||
|
- Fixes CGO_ENABLED=0 compilation error (#240)
|
||||||
|
- fix(build): fix build on non-amd64 architectures (#238)
|
||||||
|
- fix(b+tree): Do not double the size of btree (#237)
|
||||||
|
- fix(jemalloc): Fix the stats of jemalloc (#236)
|
||||||
|
- Don't print stuff, only return strings.
|
||||||
|
- Bring memclrNoHeapPointers to z (#235)
|
||||||
|
- increase number of buffers from 32 to 64 in allocator (#234)
|
||||||
|
- Set minSize to 1MB.
|
||||||
|
- Opt(btree): Use Go memory instead of mmap files
|
||||||
|
- Opt(btree): Lightweight stats calculation
|
||||||
|
- Put padding internally to z.Buffer
|
||||||
|
- Chore(z): Add SetTmpDir API to set the temp directory (#233)
|
||||||
|
- Add a BufferFrom
|
||||||
|
- Bring z.Allocator and z.AllocatorPool back
|
||||||
|
- Fix(z.Allocator): Make Allocator use Go memory
|
||||||
|
- Updated ZeroOut to use a simple for loop. (#231)
|
||||||
|
- Add concurrency back
|
||||||
|
- Add a test to check concurrency of Allocator.
|
||||||
|
- Fix(buffer): Expose padding by z.Buffer's APIs and fix test (#222)
|
||||||
|
- AllocateSlice should Truncate if the file is not big enough (#226)
|
||||||
|
- Zero out allocations for structs now that we're reusing Allocators.
|
||||||
|
- Fix the ristretto substring
|
||||||
|
- Deal with nil z.AllocatorPool
|
||||||
|
- Create an AllocatorPool class.
|
||||||
|
- chore(btree): clean NewTree API (#225)
|
||||||
|
- fix(MmapFile): Don't error out if fileSize > sz (#224)
|
||||||
|
- feat(btree): allow option to reset btree and mmaping it to specified file. (#223)
|
||||||
|
- Use mremap on Linux instead of munmap+mmap (#221)
|
||||||
|
- Reuse pages in B+ tree (#220)
|
||||||
|
- fix(allocator): make nil allocator return go byte slice (#217)
|
||||||
|
- fix(buffer): Make padding internal to z.buffer (#216)
|
||||||
|
- chore(buffer): add a parent directory field in z.Buffer (#215)
|
||||||
|
- Make Allocator concurrent
|
||||||
|
- Fix infinite loop in allocator (#214)
|
||||||
|
- Add trim func
|
||||||
|
- Use allocator pool. Turn off freelist.
|
||||||
|
- Add freelists to Allocator to reuse.
|
||||||
|
- make DeleteBelow delete values that are less than lo (#211)
|
||||||
|
- Avoid an unnecessary Load procedure in IncrementOffset.
|
||||||
|
- Add Stats method in Btree.
|
||||||
|
- chore(script): fix local test script (#210)
|
||||||
|
- fix(btree): Increase buffer size if needed. (#209)
|
||||||
|
- chore(btree): add occupancy ratio, search benchmark and compact bug fix (#208)
|
||||||
|
- Add licenses, remove prints, and fix a bug in compact
|
||||||
|
- Add IncrementOffset API for z.buffers (#206)
|
||||||
|
- Show count when printing histogram (#201)
|
||||||
|
- Zbuffer: Add LenNoPadding and make padding 8 bytes (#204)
|
||||||
|
- Allocate Go memory in case allocator is nil.
|
||||||
|
- Add leak detection via leak build flag and fix a leak during cache.Close.
|
||||||
|
- Add some APIs for allocator and buffer
|
||||||
|
- Sync before truncation or close.
|
||||||
|
- Handle nil MmapFile for Sync.
|
||||||
|
- Public methods must not panic after Close() (#202)
|
||||||
|
- Check for RD_ONLY correctly.
|
||||||
|
- Modify MmapFile APIs
|
||||||
|
- Add a bunch of APIs around MmapFile
|
||||||
|
- Move APIs for mmapfile creation over to z package.
|
||||||
|
- Add ZeroOut func
|
||||||
|
- Add SliceOffsets
|
||||||
|
- z: Add TotalSize method on bloom filter (#197)
|
||||||
|
- Add Msync func
|
||||||
|
- Buffer: Use 256 GB mmap size instead of MaxInt64 (#198)
|
||||||
|
- Add a simple test to check next2Pow
|
||||||
|
- Improve memory performance (#195)
|
||||||
|
- Have a way to automatically mmap a growing buffer (#196)
|
||||||
|
- Introduce Mmapped buffers and Merge Sort (#194)
|
||||||
|
- Add a way to access an allocator via reference.
|
||||||
|
- Use jemalloc.a to ensure compilation with the Go binary
|
||||||
|
- Fix up a build issue with ReadMemStats
|
||||||
|
- Add ReadMemStats function (#193)
|
||||||
|
- Allocator helps allocate memory to be used by unsafe structs (#192)
|
||||||
|
- Improve histogram output
|
||||||
|
- Move Closer from y to z (#191)
|
||||||
|
- Add histogram.Mean() method (#188)
|
||||||
|
- Introduce Calloc: Manual Memory Management via jemalloc (#186)
|
||||||
|
|
||||||
|
## [0.0.3] - 2020-07-06
|
||||||
|
|
||||||
|
[0.0.3]: https://github.com/dgraph-io/ristretto/compare/v0.0.2..v0.0.3
|
||||||
|
|
||||||
|
### Changed
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- z: use MemHashString and xxhash.Sum64String ([#153][])
|
||||||
|
- Check conflict key before updating expiration map. ([#154][])
|
||||||
|
- Fix race condition in Cache.Clear ([#133][])
|
||||||
|
- Improve handling of updated items ([#168][])
|
||||||
|
- Fix droppedSets count while updating the item ([#171][])
|
||||||
|
|
||||||
|
## [0.0.2] - 2020-02-24
|
||||||
|
|
||||||
|
[0.0.2]: https://github.com/dgraph-io/ristretto/compare/v0.0.1..v0.0.2
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Sets with TTL. ([#122][])
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix the way metrics are handled for deletions. ([#111][])
|
||||||
|
- Support nil `*Cache` values in `Clear` and `Close`. ([#119][])
|
||||||
|
- Delete item immediately. ([#113][])
|
||||||
|
- Remove key from policy after TTL eviction. ([#130][])
|
||||||
|
|
||||||
|
[#111]: https://github.com/dgraph-io/ristretto/issues/111
|
||||||
|
[#113]: https://github.com/dgraph-io/ristretto/issues/113
|
||||||
|
[#119]: https://github.com/dgraph-io/ristretto/issues/119
|
||||||
|
[#122]: https://github.com/dgraph-io/ristretto/issues/122
|
||||||
|
[#130]: https://github.com/dgraph-io/ristretto/issues/130
|
||||||
|
|
||||||
|
## 0.0.1
|
||||||
|
|
||||||
|
First release. Basic cache functionality based on a LFU policy.
|
@ -0,0 +1,176 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
@ -0,0 +1,220 @@
|
|||||||
|
# Ristretto
|
||||||
|
[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/dgraph-io/ristretto)
|
||||||
|
[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto)
|
||||||
|
[![Coverage](https://gocover.io/_badge/github.com/dgraph-io/ristretto)](https://gocover.io/github.com/dgraph-io/ristretto)
|
||||||
|
![Tests](https://github.com/dgraph-io/ristretto/workflows/tests/badge.svg)
|
||||||
|
|
||||||
|
Ristretto is a fast, concurrent cache library built with a focus on performance and correctness.
|
||||||
|
|
||||||
|
The motivation to build Ristretto comes from the need for a contention-free
|
||||||
|
cache in [Dgraph][].
|
||||||
|
|
||||||
|
**Use [Discuss Issues](https://discuss.dgraph.io/tags/c/issues/35/ristretto/40) for reporting issues about this repository.**
|
||||||
|
|
||||||
|
[Dgraph]: https://github.com/dgraph-io/dgraph
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class.
|
||||||
|
* **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces.
|
||||||
|
* **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter).
|
||||||
|
* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput.
|
||||||
|
* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything).
|
||||||
|
* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation.
|
||||||
|
* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats.
|
||||||
|
* **Simple API** - just figure out your ideal `Config` values and you're off and running.
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
Ristretto is production-ready. See [Projects using Ristretto](#projects-using-ristretto).
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
* [Usage](#Usage)
|
||||||
|
* [Example](#Example)
|
||||||
|
* [Config](#Config)
|
||||||
|
* [NumCounters](#Config)
|
||||||
|
* [MaxCost](#Config)
|
||||||
|
* [BufferItems](#Config)
|
||||||
|
* [Metrics](#Config)
|
||||||
|
* [OnEvict](#Config)
|
||||||
|
* [KeyToHash](#Config)
|
||||||
|
* [Cost](#Config)
|
||||||
|
* [Benchmarks](#Benchmarks)
|
||||||
|
* [Hit Ratios](#Hit-Ratios)
|
||||||
|
* [Search](#Search)
|
||||||
|
* [Database](#Database)
|
||||||
|
* [Looping](#Looping)
|
||||||
|
* [CODASYL](#CODASYL)
|
||||||
|
* [Throughput](#Throughput)
|
||||||
|
* [Mixed](#Mixed)
|
||||||
|
* [Read](#Read)
|
||||||
|
* [Write](#Write)
|
||||||
|
* [Projects using Ristretto](#projects-using-ristretto)
|
||||||
|
* [FAQ](#FAQ)
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
```go
|
||||||
|
func main() {
|
||||||
|
cache, err := ristretto.NewCache(&ristretto.Config{
|
||||||
|
NumCounters: 1e7, // number of keys to track frequency of (10M).
|
||||||
|
MaxCost: 1 << 30, // maximum cost of cache (1GB).
|
||||||
|
BufferItems: 64, // number of keys per Get buffer.
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// set a value with a cost of 1
|
||||||
|
cache.Set("key", "value", 1)
|
||||||
|
|
||||||
|
// wait for value to pass through buffers
|
||||||
|
time.Sleep(10 * time.Millisecond)
|
||||||
|
|
||||||
|
value, found := cache.Get("key")
|
||||||
|
if !found {
|
||||||
|
panic("missing value")
|
||||||
|
}
|
||||||
|
fmt.Println(value)
|
||||||
|
cache.Del("key")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Config
|
||||||
|
|
||||||
|
The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above).
|
||||||
|
|
||||||
|
**NumCounters** `int64`
|
||||||
|
|
||||||
|
NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full.
|
||||||
|
|
||||||
|
For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value.
|
||||||
|
|
||||||
|
**MaxCost** `int64`
|
||||||
|
|
||||||
|
MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted.
|
||||||
|
|
||||||
|
MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted.
|
||||||
|
|
||||||
|
MaxCost could be anything as long as it matches how you're using the cost values when calling Set.
|
||||||
|
|
||||||
|
**BufferItems** `int64`
|
||||||
|
|
||||||
|
BufferItems is the size of the Get buffers. The best value we've found for this is 64.
|
||||||
|
|
||||||
|
If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this.
|
||||||
|
|
||||||
|
**Metrics** `bool`
|
||||||
|
|
||||||
|
Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead.
|
||||||
|
|
||||||
|
**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)`
|
||||||
|
|
||||||
|
OnEvict is called for every eviction.
|
||||||
|
|
||||||
|
**KeyToHash** `func(key interface{}) [2]uint64`
|
||||||
|
|
||||||
|
KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41).
|
||||||
|
|
||||||
|
Note that if you want 128bit hashes you should use the full `[2]uint64`,
|
||||||
|
otherwise just fill the `uint64` at the `0` position and it will behave like
|
||||||
|
any 64bit hash.
|
||||||
|
|
||||||
|
**Cost** `func(value interface{}) int64`
|
||||||
|
|
||||||
|
Cost is an optional function you can pass to the Config in order to evaluate
|
||||||
|
item cost at runtime, and only for the Set calls that aren't dropped (this is
|
||||||
|
useful if calculating item cost is particularly expensive and you don't want to
|
||||||
|
waste time on items that will be dropped anyways).
|
||||||
|
|
||||||
|
To signal to Ristretto that you'd like to use this Cost function:
|
||||||
|
|
||||||
|
1. Set the Cost field to a non-nil function.
|
||||||
|
2. When calling Set for new items or item updates, use a `cost` of 0.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto.
|
||||||
|
|
||||||
|
### Hit Ratios
|
||||||
|
|
||||||
|
#### Search
|
||||||
|
|
||||||
|
This trace is described as "disk read accesses initiated by a large commercial
|
||||||
|
search engine in response to various web search requests."
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20Search%20(ARC-S3).svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
#### Database
|
||||||
|
|
||||||
|
This trace is described as "a database server running at a commercial site
|
||||||
|
running an ERP application on top of a commercial database."
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20Database%20(ARC-DS1).svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
#### Looping
|
||||||
|
|
||||||
|
This trace demonstrates a looping access pattern.
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20Glimpse%20(LIRS-GLI).svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
#### CODASYL
|
||||||
|
|
||||||
|
This trace is described as "references to a CODASYL database for a one hour
|
||||||
|
period."
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Hit%20Ratios%20-%20CODASYL%20(ARC-OLTP).svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
### Throughput
|
||||||
|
|
||||||
|
All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb
|
||||||
|
of RAM.
|
||||||
|
|
||||||
|
#### Mixed
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Throughput%20-%20Mixed.svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
#### Read
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Throughput%20-%20Read%20(Zipfian).svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
#### Write
|
||||||
|
|
||||||
|
<p align="center">
|
||||||
|
<img src="https://raw.githubusercontent.com/dgraph-io/ristretto/master/benchmarks/Throughput%20-%20Write%20(Zipfian).svg">
|
||||||
|
</p>
|
||||||
|
|
||||||
|
## Projects Using Ristretto
|
||||||
|
|
||||||
|
Below is a list of known projects that use Ristretto:
|
||||||
|
|
||||||
|
- [Badger](https://github.com/dgraph-io/badger) - Embeddable key-value DB in Go
|
||||||
|
- [Dgraph](https://github.com/dgraph-io/dgraph) - Horizontally scalable and distributed GraphQL database with a graph backend
|
||||||
|
- [Vitess](https://github.com/vitessio/vitess) - database clustering system for horizontal scaling of MySQL
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
### How are you achieving this performance? What shortcuts are you taking?
|
||||||
|
|
||||||
|
We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy.
|
||||||
|
|
||||||
|
As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache.
|
||||||
|
|
||||||
|
### Is Ristretto distributed?
|
||||||
|
|
||||||
|
No, it's just like any other Go library that you can import into your project and use in a single process.
|
@ -0,0 +1,719 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Ristretto is a fast, fixed size, in-memory cache with a dual focus on
|
||||||
|
// throughput and hit ratio performance. You can easily add Ristretto to an
|
||||||
|
// existing system and keep the most valuable data where you need it.
|
||||||
|
package ristretto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/dgraph-io/ristretto/z"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO: find the optimal value for this or make it configurable
|
||||||
|
setBufSize = 32 * 1024
|
||||||
|
)
|
||||||
|
|
||||||
|
type itemCallback func(*Item)
|
||||||
|
|
||||||
|
const itemSize = int64(unsafe.Sizeof(storeItem{}))
|
||||||
|
|
||||||
|
// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
|
||||||
|
// policy and a Sampled LFU eviction policy. You can use the same Cache instance
|
||||||
|
// from as many goroutines as you want.
|
||||||
|
type Cache struct {
|
||||||
|
// store is the central concurrent hashmap where key-value items are stored.
|
||||||
|
store store
|
||||||
|
// policy determines what gets let in to the cache and what gets kicked out.
|
||||||
|
policy policy
|
||||||
|
// getBuf is a custom ring buffer implementation that gets pushed to when
|
||||||
|
// keys are read.
|
||||||
|
getBuf *ringBuffer
|
||||||
|
// setBuf is a buffer allowing us to batch/drop Sets during times of high
|
||||||
|
// contention.
|
||||||
|
setBuf chan *Item
|
||||||
|
// onEvict is called for item evictions.
|
||||||
|
onEvict itemCallback
|
||||||
|
// onReject is called when an item is rejected via admission policy.
|
||||||
|
onReject itemCallback
|
||||||
|
// onExit is called whenever a value goes out of scope from the cache.
|
||||||
|
onExit (func(interface{}))
|
||||||
|
// KeyToHash function is used to customize the key hashing algorithm.
|
||||||
|
// Each key will be hashed using the provided function. If keyToHash value
|
||||||
|
// is not set, the default keyToHash function is used.
|
||||||
|
keyToHash func(interface{}) (uint64, uint64)
|
||||||
|
// stop is used to stop the processItems goroutine.
|
||||||
|
stop chan struct{}
|
||||||
|
// indicates whether cache is closed.
|
||||||
|
isClosed bool
|
||||||
|
// cost calculates cost from a value.
|
||||||
|
cost func(value interface{}) int64
|
||||||
|
// ignoreInternalCost dictates whether to ignore the cost of internally storing
|
||||||
|
// the item in the cost calculation.
|
||||||
|
ignoreInternalCost bool
|
||||||
|
// cleanupTicker is used to periodically check for entries whose TTL has passed.
|
||||||
|
cleanupTicker *time.Ticker
|
||||||
|
// Metrics contains a running log of important statistics like hits, misses,
|
||||||
|
// and dropped items.
|
||||||
|
Metrics *Metrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is passed to NewCache for creating new Cache instances.
|
||||||
|
type Config struct {
|
||||||
|
// NumCounters determines the number of counters (keys) to keep that hold
|
||||||
|
// access frequency information. It's generally a good idea to have more
|
||||||
|
// counters than the max cache capacity, as this will improve eviction
|
||||||
|
// accuracy and subsequent hit ratios.
|
||||||
|
//
|
||||||
|
// For example, if you expect your cache to hold 1,000,000 items when full,
|
||||||
|
// NumCounters should be 10,000,000 (10x). Each counter takes up roughly
|
||||||
|
// 3 bytes (4 bits for each counter * 4 copies plus about a byte per
|
||||||
|
// counter for the bloom filter). Note that the number of counters is
|
||||||
|
// internally rounded up to the nearest power of 2, so the space usage
|
||||||
|
// may be a little larger than 3 bytes * NumCounters.
|
||||||
|
NumCounters int64
|
||||||
|
// MaxCost can be considered as the cache capacity, in whatever units you
|
||||||
|
// choose to use.
|
||||||
|
//
|
||||||
|
// For example, if you want the cache to have a max capacity of 100MB, you
|
||||||
|
// would set MaxCost to 100,000,000 and pass an item's number of bytes as
|
||||||
|
// the `cost` parameter for calls to Set. If new items are accepted, the
|
||||||
|
// eviction process will take care of making room for the new item and not
|
||||||
|
// overflowing the MaxCost value.
|
||||||
|
MaxCost int64
|
||||||
|
// BufferItems determines the size of Get buffers.
|
||||||
|
//
|
||||||
|
// Unless you have a rare use case, using `64` as the BufferItems value
|
||||||
|
// results in good performance.
|
||||||
|
BufferItems int64
|
||||||
|
// Metrics determines whether cache statistics are kept during the cache's
|
||||||
|
// lifetime. There *is* some overhead to keeping statistics, so you should
|
||||||
|
// only set this flag to true when testing or throughput performance isn't a
|
||||||
|
// major factor.
|
||||||
|
Metrics bool
|
||||||
|
// OnEvict is called for every eviction and passes the hashed key, value,
|
||||||
|
// and cost to the function.
|
||||||
|
OnEvict func(item *Item)
|
||||||
|
// OnReject is called for every rejection done via the policy.
|
||||||
|
OnReject func(item *Item)
|
||||||
|
// OnExit is called whenever a value is removed from cache. This can be
|
||||||
|
// used to do manual memory deallocation. Would also be called on eviction
|
||||||
|
// and rejection of the value.
|
||||||
|
OnExit func(val interface{})
|
||||||
|
// KeyToHash function is used to customize the key hashing algorithm.
|
||||||
|
// Each key will be hashed using the provided function. If keyToHash value
|
||||||
|
// is not set, the default keyToHash function is used.
|
||||||
|
KeyToHash func(key interface{}) (uint64, uint64)
|
||||||
|
// Cost evaluates a value and outputs a corresponding cost. This function
|
||||||
|
// is ran after Set is called for a new item or an item update with a cost
|
||||||
|
// param of 0.
|
||||||
|
Cost func(value interface{}) int64
|
||||||
|
// IgnoreInternalCost set to true indicates to the cache that the cost of
|
||||||
|
// internally storing the value should be ignored. This is useful when the
|
||||||
|
// cost passed to set is not using bytes as units. Keep in mind that setting
|
||||||
|
// this to true will increase the memory usage.
|
||||||
|
IgnoreInternalCost bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type itemFlag byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
itemNew itemFlag = iota
|
||||||
|
itemDelete
|
||||||
|
itemUpdate
|
||||||
|
)
|
||||||
|
|
||||||
|
// Item is passed to setBuf so items can eventually be added to the cache.
|
||||||
|
type Item struct {
|
||||||
|
flag itemFlag
|
||||||
|
Key uint64
|
||||||
|
Conflict uint64
|
||||||
|
Value interface{}
|
||||||
|
Cost int64
|
||||||
|
Expiration time.Time
|
||||||
|
wg *sync.WaitGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCache returns a new Cache instance and any configuration errors, if any.
|
||||||
|
func NewCache(config *Config) (*Cache, error) {
|
||||||
|
switch {
|
||||||
|
case config.NumCounters == 0:
|
||||||
|
return nil, errors.New("NumCounters can't be zero")
|
||||||
|
case config.MaxCost == 0:
|
||||||
|
return nil, errors.New("MaxCost can't be zero")
|
||||||
|
case config.BufferItems == 0:
|
||||||
|
return nil, errors.New("BufferItems can't be zero")
|
||||||
|
}
|
||||||
|
policy := newPolicy(config.NumCounters, config.MaxCost)
|
||||||
|
cache := &Cache{
|
||||||
|
store: newStore(),
|
||||||
|
policy: policy,
|
||||||
|
getBuf: newRingBuffer(policy, config.BufferItems),
|
||||||
|
setBuf: make(chan *Item, setBufSize),
|
||||||
|
keyToHash: config.KeyToHash,
|
||||||
|
stop: make(chan struct{}),
|
||||||
|
cost: config.Cost,
|
||||||
|
ignoreInternalCost: config.IgnoreInternalCost,
|
||||||
|
cleanupTicker: time.NewTicker(time.Duration(bucketDurationSecs) * time.Second / 2),
|
||||||
|
}
|
||||||
|
cache.onExit = func(val interface{}) {
|
||||||
|
if config.OnExit != nil && val != nil {
|
||||||
|
config.OnExit(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cache.onEvict = func(item *Item) {
|
||||||
|
if config.OnEvict != nil {
|
||||||
|
config.OnEvict(item)
|
||||||
|
}
|
||||||
|
cache.onExit(item.Value)
|
||||||
|
}
|
||||||
|
cache.onReject = func(item *Item) {
|
||||||
|
if config.OnReject != nil {
|
||||||
|
config.OnReject(item)
|
||||||
|
}
|
||||||
|
cache.onExit(item.Value)
|
||||||
|
}
|
||||||
|
if cache.keyToHash == nil {
|
||||||
|
cache.keyToHash = z.KeyToHash
|
||||||
|
}
|
||||||
|
if config.Metrics {
|
||||||
|
cache.collectMetrics()
|
||||||
|
}
|
||||||
|
// NOTE: benchmarks seem to show that performance decreases the more
|
||||||
|
// goroutines we have running cache.processItems(), so 1 should
|
||||||
|
// usually be sufficient
|
||||||
|
go cache.processItems()
|
||||||
|
return cache, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cache) Wait() {
|
||||||
|
if c == nil || c.isClosed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(1)
|
||||||
|
c.setBuf <- &Item{wg: wg}
|
||||||
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the value (if any) and a boolean representing whether the
|
||||||
|
// value was found or not. The value can be nil and the boolean can be true at
|
||||||
|
// the same time.
|
||||||
|
func (c *Cache) Get(key interface{}) (interface{}, bool) {
|
||||||
|
if c == nil || c.isClosed || key == nil {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
keyHash, conflictHash := c.keyToHash(key)
|
||||||
|
c.getBuf.Push(keyHash)
|
||||||
|
value, ok := c.store.Get(keyHash, conflictHash)
|
||||||
|
if ok {
|
||||||
|
c.Metrics.add(hit, keyHash, 1)
|
||||||
|
} else {
|
||||||
|
c.Metrics.add(miss, keyHash, 1)
|
||||||
|
}
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set attempts to add the key-value item to the cache. If it returns false,
|
||||||
|
// then the Set was dropped and the key-value item isn't added to the cache. If
|
||||||
|
// it returns true, there's still a chance it could be dropped by the policy if
|
||||||
|
// its determined that the key-value item isn't worth keeping, but otherwise the
|
||||||
|
// item will be added and other items will be evicted in order to make room.
|
||||||
|
//
|
||||||
|
// To dynamically evaluate the items cost using the Config.Coster function, set
|
||||||
|
// the cost parameter to 0 and Coster will be ran when needed in order to find
|
||||||
|
// the items true cost.
|
||||||
|
func (c *Cache) Set(key, value interface{}, cost int64) bool {
|
||||||
|
return c.SetWithTTL(key, value, cost, 0*time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetWithTTL works like Set but adds a key-value pair to the cache that will expire
|
||||||
|
// after the specified TTL (time to live) has passed. A zero value means the value never
|
||||||
|
// expires, which is identical to calling Set. A negative value is a no-op and the value
|
||||||
|
// is discarded.
|
||||||
|
func (c *Cache) SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool {
|
||||||
|
if c == nil || c.isClosed || key == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var expiration time.Time
|
||||||
|
switch {
|
||||||
|
case ttl == 0:
|
||||||
|
// No expiration.
|
||||||
|
break
|
||||||
|
case ttl < 0:
|
||||||
|
// Treat this a a no-op.
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
expiration = time.Now().Add(ttl)
|
||||||
|
}
|
||||||
|
|
||||||
|
keyHash, conflictHash := c.keyToHash(key)
|
||||||
|
i := &Item{
|
||||||
|
flag: itemNew,
|
||||||
|
Key: keyHash,
|
||||||
|
Conflict: conflictHash,
|
||||||
|
Value: value,
|
||||||
|
Cost: cost,
|
||||||
|
Expiration: expiration,
|
||||||
|
}
|
||||||
|
// cost is eventually updated. The expiration must also be immediately updated
|
||||||
|
// to prevent items from being prematurely removed from the map.
|
||||||
|
if prev, ok := c.store.Update(i); ok {
|
||||||
|
c.onExit(prev)
|
||||||
|
i.flag = itemUpdate
|
||||||
|
}
|
||||||
|
// Attempt to send item to policy.
|
||||||
|
select {
|
||||||
|
case c.setBuf <- i:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
if i.flag == itemUpdate {
|
||||||
|
// Return true if this was an update operation since we've already
|
||||||
|
// updated the store. For all the other operations (set/delete), we
|
||||||
|
// return false which means the item was not inserted.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
c.Metrics.add(dropSets, keyHash, 1)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Del deletes the key-value item from the cache if it exists.
|
||||||
|
func (c *Cache) Del(key interface{}) {
|
||||||
|
if c == nil || c.isClosed || key == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
keyHash, conflictHash := c.keyToHash(key)
|
||||||
|
// Delete immediately.
|
||||||
|
_, prev := c.store.Del(keyHash, conflictHash)
|
||||||
|
c.onExit(prev)
|
||||||
|
// If we've set an item, it would be applied slightly later.
|
||||||
|
// So we must push the same item to `setBuf` with the deletion flag.
|
||||||
|
// This ensures that if a set is followed by a delete, it will be
|
||||||
|
// applied in the correct order.
|
||||||
|
c.setBuf <- &Item{
|
||||||
|
flag: itemDelete,
|
||||||
|
Key: keyHash,
|
||||||
|
Conflict: conflictHash,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTTL returns the TTL for the specified key and a bool that is true if the
|
||||||
|
// item was found and is not expired.
|
||||||
|
func (c *Cache) GetTTL(key interface{}) (time.Duration, bool) {
|
||||||
|
if c == nil || key == nil {
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
keyHash, conflictHash := c.keyToHash(key)
|
||||||
|
if _, ok := c.store.Get(keyHash, conflictHash); !ok {
|
||||||
|
// not found
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
expiration := c.store.Expiration(keyHash)
|
||||||
|
if expiration.IsZero() {
|
||||||
|
// found but no expiration
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().After(expiration) {
|
||||||
|
// found but expired
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Until(expiration), true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close stops all goroutines and closes all channels.
|
||||||
|
func (c *Cache) Close() {
|
||||||
|
if c == nil || c.isClosed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.Clear()
|
||||||
|
|
||||||
|
// Block until processItems goroutine is returned.
|
||||||
|
c.stop <- struct{}{}
|
||||||
|
close(c.stop)
|
||||||
|
close(c.setBuf)
|
||||||
|
c.policy.Close()
|
||||||
|
c.isClosed = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear empties the hashmap and zeroes all policy counters. Note that this is
|
||||||
|
// not an atomic operation (but that shouldn't be a problem as it's assumed that
|
||||||
|
// Set/Get calls won't be occurring until after this).
|
||||||
|
func (c *Cache) Clear() {
|
||||||
|
if c == nil || c.isClosed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Block until processItems goroutine is returned.
|
||||||
|
c.stop <- struct{}{}
|
||||||
|
|
||||||
|
// Clear out the setBuf channel.
|
||||||
|
loop:
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case i := <-c.setBuf:
|
||||||
|
if i.wg != nil {
|
||||||
|
i.wg.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i.flag != itemUpdate {
|
||||||
|
// In itemUpdate, the value is already set in the store. So, no need to call
|
||||||
|
// onEvict here.
|
||||||
|
c.onEvict(i)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear value hashmap and policy data.
|
||||||
|
c.policy.Clear()
|
||||||
|
c.store.Clear(c.onEvict)
|
||||||
|
// Only reset metrics if they're enabled.
|
||||||
|
if c.Metrics != nil {
|
||||||
|
c.Metrics.Clear()
|
||||||
|
}
|
||||||
|
// Restart processItems goroutine.
|
||||||
|
go c.processItems()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxCost returns the max cost of the cache.
|
||||||
|
func (c *Cache) MaxCost() int64 {
|
||||||
|
if c == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return c.policy.MaxCost()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateMaxCost updates the maxCost of an existing cache.
|
||||||
|
func (c *Cache) UpdateMaxCost(maxCost int64) {
|
||||||
|
if c == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.policy.UpdateMaxCost(maxCost)
|
||||||
|
}
|
||||||
|
|
||||||
|
// processItems is ran by goroutines processing the Set buffer.
|
||||||
|
func (c *Cache) processItems() {
|
||||||
|
startTs := make(map[uint64]time.Time)
|
||||||
|
numToKeep := 100000 // TODO: Make this configurable via options.
|
||||||
|
|
||||||
|
trackAdmission := func(key uint64) {
|
||||||
|
if c.Metrics == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
startTs[key] = time.Now()
|
||||||
|
if len(startTs) > numToKeep {
|
||||||
|
for k := range startTs {
|
||||||
|
if len(startTs) <= numToKeep {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
delete(startTs, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
onEvict := func(i *Item) {
|
||||||
|
if ts, has := startTs[i.Key]; has {
|
||||||
|
c.Metrics.trackEviction(int64(time.Since(ts) / time.Second))
|
||||||
|
delete(startTs, i.Key)
|
||||||
|
}
|
||||||
|
if c.onEvict != nil {
|
||||||
|
c.onEvict(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case i := <-c.setBuf:
|
||||||
|
if i.wg != nil {
|
||||||
|
i.wg.Done()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Calculate item cost value if new or update.
|
||||||
|
if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
|
||||||
|
i.Cost = c.cost(i.Value)
|
||||||
|
}
|
||||||
|
if !c.ignoreInternalCost {
|
||||||
|
// Add the cost of internally storing the object.
|
||||||
|
i.Cost += itemSize
|
||||||
|
}
|
||||||
|
|
||||||
|
switch i.flag {
|
||||||
|
case itemNew:
|
||||||
|
victims, added := c.policy.Add(i.Key, i.Cost)
|
||||||
|
if added {
|
||||||
|
c.store.Set(i)
|
||||||
|
c.Metrics.add(keyAdd, i.Key, 1)
|
||||||
|
trackAdmission(i.Key)
|
||||||
|
} else {
|
||||||
|
c.onReject(i)
|
||||||
|
}
|
||||||
|
for _, victim := range victims {
|
||||||
|
victim.Conflict, victim.Value = c.store.Del(victim.Key, 0)
|
||||||
|
onEvict(victim)
|
||||||
|
}
|
||||||
|
|
||||||
|
case itemUpdate:
|
||||||
|
c.policy.Update(i.Key, i.Cost)
|
||||||
|
|
||||||
|
case itemDelete:
|
||||||
|
c.policy.Del(i.Key) // Deals with metrics updates.
|
||||||
|
_, val := c.store.Del(i.Key, i.Conflict)
|
||||||
|
c.onExit(val)
|
||||||
|
}
|
||||||
|
case <-c.cleanupTicker.C:
|
||||||
|
c.store.Cleanup(c.policy, onEvict)
|
||||||
|
case <-c.stop:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectMetrics just creates a new *Metrics instance and adds the pointers
|
||||||
|
// to the cache and policy instances.
|
||||||
|
func (c *Cache) collectMetrics() {
|
||||||
|
c.Metrics = newMetrics()
|
||||||
|
c.policy.CollectMetrics(c.Metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// The following 2 keep track of hits and misses.
|
||||||
|
hit = iota
|
||||||
|
miss
|
||||||
|
// The following 3 keep track of number of keys added, updated and evicted.
|
||||||
|
keyAdd
|
||||||
|
keyUpdate
|
||||||
|
keyEvict
|
||||||
|
// The following 2 keep track of cost of keys added and evicted.
|
||||||
|
costAdd
|
||||||
|
costEvict
|
||||||
|
// The following keep track of how many sets were dropped or rejected later.
|
||||||
|
dropSets
|
||||||
|
rejectSets
|
||||||
|
// The following 2 keep track of how many gets were kept and dropped on the
|
||||||
|
// floor.
|
||||||
|
dropGets
|
||||||
|
keepGets
|
||||||
|
// This should be the final enum. Other enums should be set before this.
|
||||||
|
doNotUse
|
||||||
|
)
|
||||||
|
|
||||||
|
func stringFor(t metricType) string {
|
||||||
|
switch t {
|
||||||
|
case hit:
|
||||||
|
return "hit"
|
||||||
|
case miss:
|
||||||
|
return "miss"
|
||||||
|
case keyAdd:
|
||||||
|
return "keys-added"
|
||||||
|
case keyUpdate:
|
||||||
|
return "keys-updated"
|
||||||
|
case keyEvict:
|
||||||
|
return "keys-evicted"
|
||||||
|
case costAdd:
|
||||||
|
return "cost-added"
|
||||||
|
case costEvict:
|
||||||
|
return "cost-evicted"
|
||||||
|
case dropSets:
|
||||||
|
return "sets-dropped"
|
||||||
|
case rejectSets:
|
||||||
|
return "sets-rejected" // by policy.
|
||||||
|
case dropGets:
|
||||||
|
return "gets-dropped"
|
||||||
|
case keepGets:
|
||||||
|
return "gets-kept"
|
||||||
|
default:
|
||||||
|
return "unidentified"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
|
||||||
|
type Metrics struct {
|
||||||
|
all [doNotUse][]*uint64
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
life *z.HistogramData // Tracks the life expectancy of a key.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMetrics() *Metrics {
|
||||||
|
s := &Metrics{
|
||||||
|
life: z.NewHistogramData(z.HistogramBounds(1, 16)),
|
||||||
|
}
|
||||||
|
for i := 0; i < doNotUse; i++ {
|
||||||
|
s.all[i] = make([]*uint64, 256)
|
||||||
|
slice := s.all[i]
|
||||||
|
for j := range slice {
|
||||||
|
slice[j] = new(uint64)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Metrics) add(t metricType, hash, delta uint64) {
|
||||||
|
if p == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
valp := p.all[t]
|
||||||
|
// Avoid false sharing by padding at least 64 bytes of space between two
|
||||||
|
// atomic counters which would be incremented.
|
||||||
|
idx := (hash % 25) * 10
|
||||||
|
atomic.AddUint64(valp[idx], delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Metrics) get(t metricType) uint64 {
|
||||||
|
if p == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
valp := p.all[t]
|
||||||
|
var total uint64
|
||||||
|
for i := range valp {
|
||||||
|
total += atomic.LoadUint64(valp[i])
|
||||||
|
}
|
||||||
|
return total
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hits is the number of Get calls where a value was found for the corresponding key.
|
||||||
|
func (p *Metrics) Hits() uint64 {
|
||||||
|
return p.get(hit)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Misses is the number of Get calls where a value was not found for the corresponding key.
|
||||||
|
func (p *Metrics) Misses() uint64 {
|
||||||
|
return p.get(miss)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysAdded is the total number of Set calls where a new key-value item was added.
|
||||||
|
func (p *Metrics) KeysAdded() uint64 {
|
||||||
|
return p.get(keyAdd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysUpdated is the total number of Set calls where the value was updated.
|
||||||
|
func (p *Metrics) KeysUpdated() uint64 {
|
||||||
|
return p.get(keyUpdate)
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeysEvicted is the total number of keys evicted.
|
||||||
|
func (p *Metrics) KeysEvicted() uint64 {
|
||||||
|
return p.get(keyEvict)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CostAdded is the sum of costs that have been added (successful Set calls).
|
||||||
|
func (p *Metrics) CostAdded() uint64 {
|
||||||
|
return p.get(costAdd)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CostEvicted is the sum of all costs that have been evicted.
|
||||||
|
func (p *Metrics) CostEvicted() uint64 {
|
||||||
|
return p.get(costEvict)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetsDropped is the number of Set calls that don't make it into internal
|
||||||
|
// buffers (due to contention or some other reason).
|
||||||
|
func (p *Metrics) SetsDropped() uint64 {
|
||||||
|
return p.get(dropSets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
|
||||||
|
func (p *Metrics) SetsRejected() uint64 {
|
||||||
|
return p.get(rejectSets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetsDropped is the number of Get counter increments that are dropped
|
||||||
|
// internally.
|
||||||
|
func (p *Metrics) GetsDropped() uint64 {
|
||||||
|
return p.get(dropGets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetsKept is the number of Get counter increments that are kept.
|
||||||
|
func (p *Metrics) GetsKept() uint64 {
|
||||||
|
return p.get(keepGets)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ratio is the number of Hits over all accesses (Hits + Misses). This is the
|
||||||
|
// percentage of successful Get calls.
|
||||||
|
func (p *Metrics) Ratio() float64 {
|
||||||
|
if p == nil {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
hits, misses := p.get(hit), p.get(miss)
|
||||||
|
if hits == 0 && misses == 0 {
|
||||||
|
return 0.0
|
||||||
|
}
|
||||||
|
return float64(hits) / float64(hits+misses)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Metrics) trackEviction(numSeconds int64) {
|
||||||
|
if p == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
p.life.Update(numSeconds)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Metrics) LifeExpectancySeconds() *z.HistogramData {
|
||||||
|
if p == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
p.mu.RLock()
|
||||||
|
defer p.mu.RUnlock()
|
||||||
|
return p.life.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear resets all the metrics.
|
||||||
|
func (p *Metrics) Clear() {
|
||||||
|
if p == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for i := 0; i < doNotUse; i++ {
|
||||||
|
for j := range p.all[i] {
|
||||||
|
atomic.StoreUint64(p.all[i][j], 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p.mu.Lock()
|
||||||
|
p.life = z.NewHistogramData(z.HistogramBounds(1, 16))
|
||||||
|
p.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a string representation of the metrics.
|
||||||
|
func (p *Metrics) String() string {
|
||||||
|
if p == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for i := 0; i < doNotUse; i++ {
|
||||||
|
t := metricType(i)
|
||||||
|
fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
|
||||||
|
}
|
||||||
|
fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
|
||||||
|
fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
|
||||||
|
return buf.String()
|
||||||
|
}
|
@ -0,0 +1,91 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ristretto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ringConsumer is the user-defined object responsible for receiving and
|
||||||
|
// processing items in batches when buffers are drained.
|
||||||
|
type ringConsumer interface {
|
||||||
|
Push([]uint64) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ringStripe is a singular ring buffer that is not concurrent safe.
|
||||||
|
type ringStripe struct {
|
||||||
|
cons ringConsumer
|
||||||
|
data []uint64
|
||||||
|
capa int
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRingStripe(cons ringConsumer, capa int64) *ringStripe {
|
||||||
|
return &ringStripe{
|
||||||
|
cons: cons,
|
||||||
|
data: make([]uint64, 0, capa),
|
||||||
|
capa: int(capa),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push appends an item in the ring buffer and drains (copies items and
|
||||||
|
// sends to Consumer) if full.
|
||||||
|
func (s *ringStripe) Push(item uint64) {
|
||||||
|
s.data = append(s.data, item)
|
||||||
|
// Decide if the ring buffer should be drained.
|
||||||
|
if len(s.data) >= s.capa {
|
||||||
|
// Send elements to consumer and create a new ring stripe.
|
||||||
|
if s.cons.Push(s.data) {
|
||||||
|
s.data = make([]uint64, 0, s.capa)
|
||||||
|
} else {
|
||||||
|
s.data = s.data[:0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ringBuffer stores multiple buffers (stripes) and distributes Pushed items
|
||||||
|
// between them to lower contention.
|
||||||
|
//
|
||||||
|
// This implements the "batching" process described in the BP-Wrapper paper
|
||||||
|
// (section III part A).
|
||||||
|
type ringBuffer struct {
|
||||||
|
pool *sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will
|
||||||
|
// be called when individual stripes are full and need to drain their elements.
|
||||||
|
func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer {
|
||||||
|
// LOSSY buffers use a very simple sync.Pool for concurrently reusing
|
||||||
|
// stripes. We do lose some stripes due to GC (unheld items in sync.Pool
|
||||||
|
// are cleared), but the performance gains generally outweigh the small
|
||||||
|
// percentage of elements lost. The performance primarily comes from
|
||||||
|
// low-level runtime functions used in the standard library that aren't
|
||||||
|
// available to us (such as runtime_procPin()).
|
||||||
|
return &ringBuffer{
|
||||||
|
pool: &sync.Pool{
|
||||||
|
New: func() interface{} { return newRingStripe(cons, capa) },
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push adds an element to one of the internal stripes and possibly drains if
|
||||||
|
// the stripe becomes full.
|
||||||
|
func (b *ringBuffer) Push(item uint64) {
|
||||||
|
// Reuse or create a new stripe.
|
||||||
|
stripe := b.pool.Get().(*ringStripe)
|
||||||
|
stripe.Push(item)
|
||||||
|
b.pool.Put(stripe)
|
||||||
|
}
|
@ -0,0 +1,155 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This package includes multiple probabalistic data structures needed for
|
||||||
|
// admission/eviction metadata. Most are Counting Bloom Filter variations, but
|
||||||
|
// a caching-specific feature that is also required is a "freshness" mechanism,
|
||||||
|
// which basically serves as a "lifetime" process. This freshness mechanism
|
||||||
|
// was described in the original TinyLFU paper [1], but other mechanisms may
|
||||||
|
// be better suited for certain data distributions.
|
||||||
|
//
|
||||||
|
// [1]: https://arxiv.org/abs/1512.00727
|
||||||
|
package ristretto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily
|
||||||
|
// based on Damian Gryski's CM4 [1].
|
||||||
|
//
|
||||||
|
// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go
|
||||||
|
type cmSketch struct {
|
||||||
|
rows [cmDepth]cmRow
|
||||||
|
seed [cmDepth]uint64
|
||||||
|
mask uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// cmDepth is the number of counter copies to store (think of it as rows).
|
||||||
|
cmDepth = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
func newCmSketch(numCounters int64) *cmSketch {
|
||||||
|
if numCounters == 0 {
|
||||||
|
panic("cmSketch: bad numCounters")
|
||||||
|
}
|
||||||
|
// Get the next power of 2 for better cache performance.
|
||||||
|
numCounters = next2Power(numCounters)
|
||||||
|
sketch := &cmSketch{mask: uint64(numCounters - 1)}
|
||||||
|
// Initialize rows of counters and seeds.
|
||||||
|
source := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||||
|
for i := 0; i < cmDepth; i++ {
|
||||||
|
sketch.seed[i] = source.Uint64()
|
||||||
|
sketch.rows[i] = newCmRow(numCounters)
|
||||||
|
}
|
||||||
|
return sketch
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment increments the count(ers) for the specified key.
|
||||||
|
func (s *cmSketch) Increment(hashed uint64) {
|
||||||
|
for i := range s.rows {
|
||||||
|
s.rows[i].increment((hashed ^ s.seed[i]) & s.mask)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Estimate returns the value of the specified key.
|
||||||
|
func (s *cmSketch) Estimate(hashed uint64) int64 {
|
||||||
|
min := byte(255)
|
||||||
|
for i := range s.rows {
|
||||||
|
val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask)
|
||||||
|
if val < min {
|
||||||
|
min = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return int64(min)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset halves all counter values.
|
||||||
|
func (s *cmSketch) Reset() {
|
||||||
|
for _, r := range s.rows {
|
||||||
|
r.reset()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear zeroes all counters.
|
||||||
|
func (s *cmSketch) Clear() {
|
||||||
|
for _, r := range s.rows {
|
||||||
|
r.clear()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cmRow is a row of bytes, with each byte holding two counters.
|
||||||
|
type cmRow []byte
|
||||||
|
|
||||||
|
func newCmRow(numCounters int64) cmRow {
|
||||||
|
return make(cmRow, numCounters/2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r cmRow) get(n uint64) byte {
|
||||||
|
return byte(r[n/2]>>((n&1)*4)) & 0x0f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r cmRow) increment(n uint64) {
|
||||||
|
// Index of the counter.
|
||||||
|
i := n / 2
|
||||||
|
// Shift distance (even 0, odd 4).
|
||||||
|
s := (n & 1) * 4
|
||||||
|
// Counter value.
|
||||||
|
v := (r[i] >> s) & 0x0f
|
||||||
|
// Only increment if not max value (overflow wrap is bad for LFU).
|
||||||
|
if v < 15 {
|
||||||
|
r[i] += 1 << s
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r cmRow) reset() {
|
||||||
|
// Halve each counter.
|
||||||
|
for i := range r {
|
||||||
|
r[i] = (r[i] >> 1) & 0x77
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r cmRow) clear() {
|
||||||
|
// Zero each counter.
|
||||||
|
for i := range r {
|
||||||
|
r[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r cmRow) string() string {
|
||||||
|
s := ""
|
||||||
|
for i := uint64(0); i < uint64(len(r)*2); i++ {
|
||||||
|
s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f)
|
||||||
|
}
|
||||||
|
s = s[:len(s)-1]
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// next2Power rounds x up to the next power of 2, if it's not already one.
|
||||||
|
func next2Power(x int64) int64 {
|
||||||
|
x--
|
||||||
|
x |= x >> 1
|
||||||
|
x |= x >> 2
|
||||||
|
x |= x >> 4
|
||||||
|
x |= x >> 8
|
||||||
|
x |= x >> 16
|
||||||
|
x |= x >> 32
|
||||||
|
x++
|
||||||
|
return x
|
||||||
|
}
|
@ -0,0 +1,242 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ristretto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: Do we need this to be a separate struct from Item?
|
||||||
|
type storeItem struct {
|
||||||
|
key uint64
|
||||||
|
conflict uint64
|
||||||
|
value interface{}
|
||||||
|
expiration time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// store is the interface fulfilled by all hash map implementations in this
|
||||||
|
// file. Some hash map implementations are better suited for certain data
|
||||||
|
// distributions than others, so this allows us to abstract that out for use
|
||||||
|
// in Ristretto.
|
||||||
|
//
|
||||||
|
// Every store is safe for concurrent usage.
|
||||||
|
type store interface {
|
||||||
|
// Get returns the value associated with the key parameter.
|
||||||
|
Get(uint64, uint64) (interface{}, bool)
|
||||||
|
// Expiration returns the expiration time for this key.
|
||||||
|
Expiration(uint64) time.Time
|
||||||
|
// Set adds the key-value pair to the Map or updates the value if it's
|
||||||
|
// already present. The key-value pair is passed as a pointer to an
|
||||||
|
// item object.
|
||||||
|
Set(*Item)
|
||||||
|
// Del deletes the key-value pair from the Map.
|
||||||
|
Del(uint64, uint64) (uint64, interface{})
|
||||||
|
// Update attempts to update the key with a new value and returns true if
|
||||||
|
// successful.
|
||||||
|
Update(*Item) (interface{}, bool)
|
||||||
|
// Cleanup removes items that have an expired TTL.
|
||||||
|
Cleanup(policy policy, onEvict itemCallback)
|
||||||
|
// Clear clears all contents of the store.
|
||||||
|
Clear(onEvict itemCallback)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newStore returns the default store implementation.
|
||||||
|
func newStore() store {
|
||||||
|
return newShardedMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
const numShards uint64 = 256
|
||||||
|
|
||||||
|
type shardedMap struct {
|
||||||
|
shards []*lockedMap
|
||||||
|
expiryMap *expirationMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func newShardedMap() *shardedMap {
|
||||||
|
sm := &shardedMap{
|
||||||
|
shards: make([]*lockedMap, int(numShards)),
|
||||||
|
expiryMap: newExpirationMap(),
|
||||||
|
}
|
||||||
|
for i := range sm.shards {
|
||||||
|
sm.shards[i] = newLockedMap(sm.expiryMap)
|
||||||
|
}
|
||||||
|
return sm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) {
|
||||||
|
return sm.shards[key%numShards].get(key, conflict)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Expiration(key uint64) time.Time {
|
||||||
|
return sm.shards[key%numShards].Expiration(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Set(i *Item) {
|
||||||
|
if i == nil {
|
||||||
|
// If item is nil make this Set a no-op.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sm.shards[i.Key%numShards].Set(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) {
|
||||||
|
return sm.shards[key%numShards].Del(key, conflict)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Update(newItem *Item) (interface{}, bool) {
|
||||||
|
return sm.shards[newItem.Key%numShards].Update(newItem)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Cleanup(policy policy, onEvict itemCallback) {
|
||||||
|
sm.expiryMap.cleanup(sm, policy, onEvict)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *shardedMap) Clear(onEvict itemCallback) {
|
||||||
|
for i := uint64(0); i < numShards; i++ {
|
||||||
|
sm.shards[i].Clear(onEvict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type lockedMap struct {
|
||||||
|
sync.RWMutex
|
||||||
|
data map[uint64]storeItem
|
||||||
|
em *expirationMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func newLockedMap(em *expirationMap) *lockedMap {
|
||||||
|
return &lockedMap{
|
||||||
|
data: make(map[uint64]storeItem),
|
||||||
|
em: em,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *lockedMap) get(key, conflict uint64) (interface{}, bool) {
|
||||||
|
m.RLock()
|
||||||
|
item, ok := m.data[key]
|
||||||
|
m.RUnlock()
|
||||||
|
if !ok {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if conflict != 0 && (conflict != item.conflict) {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle expired items.
|
||||||
|
if !item.expiration.IsZero() && time.Now().After(item.expiration) {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return item.value, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *lockedMap) Expiration(key uint64) time.Time {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
return m.data[key].expiration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *lockedMap) Set(i *Item) {
|
||||||
|
if i == nil {
|
||||||
|
// If the item is nil make this Set a no-op.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
item, ok := m.data[i.Key]
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
// The item existed already. We need to check the conflict key and reject the
|
||||||
|
// update if they do not match. Only after that the expiration map is updated.
|
||||||
|
if i.Conflict != 0 && (i.Conflict != item.conflict) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.em.update(i.Key, i.Conflict, item.expiration, i.Expiration)
|
||||||
|
} else {
|
||||||
|
// The value is not in the map already. There's no need to return anything.
|
||||||
|
// Simply add the expiration map.
|
||||||
|
m.em.add(i.Key, i.Conflict, i.Expiration)
|
||||||
|
}
|
||||||
|
|
||||||
|
m.data[i.Key] = storeItem{
|
||||||
|
key: i.Key,
|
||||||
|
conflict: i.Conflict,
|
||||||
|
value: i.Value,
|
||||||
|
expiration: i.Expiration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) {
|
||||||
|
m.Lock()
|
||||||
|
item, ok := m.data[key]
|
||||||
|
if !ok {
|
||||||
|
m.Unlock()
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
if conflict != 0 && (conflict != item.conflict) {
|
||||||
|
m.Unlock()
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !item.expiration.IsZero() {
|
||||||
|
m.em.del(key, item.expiration)
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(m.data, key)
|
||||||
|
m.Unlock()
|
||||||
|
return item.conflict, item.value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *lockedMap) Update(newItem *Item) (interface{}, bool) {
|
||||||
|
m.Lock()
|
||||||
|
item, ok := m.data[newItem.Key]
|
||||||
|
if !ok {
|
||||||
|
m.Unlock()
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) {
|
||||||
|
m.Unlock()
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
m.em.update(newItem.Key, newItem.Conflict, item.expiration, newItem.Expiration)
|
||||||
|
m.data[newItem.Key] = storeItem{
|
||||||
|
key: newItem.Key,
|
||||||
|
conflict: newItem.Conflict,
|
||||||
|
value: newItem.Value,
|
||||||
|
expiration: newItem.Expiration,
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Unlock()
|
||||||
|
return item.value, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *lockedMap) Clear(onEvict itemCallback) {
|
||||||
|
m.Lock()
|
||||||
|
i := &Item{}
|
||||||
|
if onEvict != nil {
|
||||||
|
for _, si := range m.data {
|
||||||
|
i.Key = si.key
|
||||||
|
i.Conflict = si.conflict
|
||||||
|
i.Value = si.value
|
||||||
|
onEvict(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.data = make(map[uint64]storeItem)
|
||||||
|
m.Unlock()
|
||||||
|
}
|
@ -0,0 +1,20 @@
|
|||||||
|
#! /bin/sh
|
||||||
|
|
||||||
|
starttest() {
|
||||||
|
set -e
|
||||||
|
GO111MODULE=on go test -race ./...
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -z "${TEAMCITY_VERSION}" ]; then
|
||||||
|
# running locally, so start test in a container
|
||||||
|
# TEAMCITY_VERSION=local will avoid recursive calls, when it would be running in container
|
||||||
|
docker run --rm --name ristretto-test -ti \
|
||||||
|
-v `pwd`:/go/src/github.com/dgraph-io/ristretto \
|
||||||
|
--workdir /go/src/github.com/dgraph-io/ristretto \
|
||||||
|
--env TEAMCITY_VERSION=local \
|
||||||
|
golang:1.13 \
|
||||||
|
sh test.sh
|
||||||
|
else
|
||||||
|
# running in teamcity, since teamcity itself run this in container, let's simply run this
|
||||||
|
starttest
|
||||||
|
fi
|
@ -0,0 +1,147 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package ristretto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// TODO: find the optimal value or make it configurable.
|
||||||
|
bucketDurationSecs = int64(5)
|
||||||
|
)
|
||||||
|
|
||||||
|
func storageBucket(t time.Time) int64 {
|
||||||
|
return (t.Unix() / bucketDurationSecs) + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanupBucket(t time.Time) int64 {
|
||||||
|
// The bucket to cleanup is always behind the storage bucket by one so that
|
||||||
|
// no elements in that bucket (which might not have expired yet) are deleted.
|
||||||
|
return storageBucket(t) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// bucket type is a map of key to conflict.
|
||||||
|
type bucket map[uint64]uint64
|
||||||
|
|
||||||
|
// expirationMap is a map of bucket number to the corresponding bucket.
|
||||||
|
type expirationMap struct {
|
||||||
|
sync.RWMutex
|
||||||
|
buckets map[int64]bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
func newExpirationMap() *expirationMap {
|
||||||
|
return &expirationMap{
|
||||||
|
buckets: make(map[int64]bucket),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *expirationMap) add(key, conflict uint64, expiration time.Time) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Items that don't expire don't need to be in the expiration map.
|
||||||
|
if expiration.IsZero() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketNum := storageBucket(expiration)
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
|
b, ok := m.buckets[bucketNum]
|
||||||
|
if !ok {
|
||||||
|
b = make(bucket)
|
||||||
|
m.buckets[bucketNum] = b
|
||||||
|
}
|
||||||
|
b[key] = conflict
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *expirationMap) update(key, conflict uint64, oldExpTime, newExpTime time.Time) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
|
oldBucketNum := storageBucket(oldExpTime)
|
||||||
|
oldBucket, ok := m.buckets[oldBucketNum]
|
||||||
|
if ok {
|
||||||
|
delete(oldBucket, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
newBucketNum := storageBucket(newExpTime)
|
||||||
|
newBucket, ok := m.buckets[newBucketNum]
|
||||||
|
if !ok {
|
||||||
|
newBucket = make(bucket)
|
||||||
|
m.buckets[newBucketNum] = newBucket
|
||||||
|
}
|
||||||
|
newBucket[key] = conflict
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *expirationMap) del(key uint64, expiration time.Time) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketNum := storageBucket(expiration)
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
_, ok := m.buckets[bucketNum]
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(m.buckets[bucketNum], key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanup removes all the items in the bucket that was just completed. It deletes
|
||||||
|
// those items from the store, and calls the onEvict function on those items.
|
||||||
|
// This function is meant to be called periodically.
|
||||||
|
func (m *expirationMap) cleanup(store store, policy policy, onEvict itemCallback) {
|
||||||
|
if m == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
m.Lock()
|
||||||
|
now := time.Now()
|
||||||
|
bucketNum := cleanupBucket(now)
|
||||||
|
keys := m.buckets[bucketNum]
|
||||||
|
delete(m.buckets, bucketNum)
|
||||||
|
m.Unlock()
|
||||||
|
|
||||||
|
for key, conflict := range keys {
|
||||||
|
// Sanity check. Verify that the store agrees that this key is expired.
|
||||||
|
if store.Expiration(key).After(now) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
cost := policy.Cost(key)
|
||||||
|
policy.Del(key)
|
||||||
|
_, value := store.Del(key, conflict)
|
||||||
|
|
||||||
|
if onEvict != nil {
|
||||||
|
onEvict(&Item{Key: key,
|
||||||
|
Conflict: conflict,
|
||||||
|
Value: value,
|
||||||
|
Cost: cost,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,64 @@
|
|||||||
|
bbloom.go
|
||||||
|
|
||||||
|
// The MIT License (MIT)
|
||||||
|
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
// this software and associated documentation files (the "Software"), to deal in
|
||||||
|
// the Software without restriction, including without limitation the rights to
|
||||||
|
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
// subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
rtutil.go
|
||||||
|
|
||||||
|
// MIT License
|
||||||
|
|
||||||
|
// Copyright (c) 2019 Ewan Chou
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
|
||||||
|
Modifications:
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
@ -0,0 +1,129 @@
|
|||||||
|
## bbloom: a bitset Bloom filter for go/golang
|
||||||
|
===
|
||||||
|
|
||||||
|
package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
|
||||||
|
|
||||||
|
NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
|
||||||
|
|
||||||
|
===
|
||||||
|
|
||||||
|
changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
|
||||||
|
|
||||||
|
This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
|
||||||
|
Nonetheless bbloom should work with any other form of entries.
|
||||||
|
|
||||||
|
~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
|
||||||
|
|
||||||
|
Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
|
||||||
|
|
||||||
|
Minimum hashset size is: 512 ([4]uint64; will be set automatically).
|
||||||
|
|
||||||
|
###install
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get github.com/AndreasBriese/bbloom
|
||||||
|
```
|
||||||
|
|
||||||
|
###test
|
||||||
|
+ change to folder ../bbloom
|
||||||
|
+ create wordlist in file "words.txt" (you might use `python permut.py`)
|
||||||
|
+ run 'go test -bench=.' within the folder
|
||||||
|
|
||||||
|
```go
|
||||||
|
go test -bench=.
|
||||||
|
```
|
||||||
|
|
||||||
|
~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
|
||||||
|
|
||||||
|
using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
|
||||||
|
|
||||||
|
### usage
|
||||||
|
|
||||||
|
after installation add
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
...
|
||||||
|
"github.com/AndreasBriese/bbloom"
|
||||||
|
...
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
at your header. In the program use
|
||||||
|
|
||||||
|
```go
|
||||||
|
// create a bloom filter for 65536 items and 1 % wrong-positive ratio
|
||||||
|
bf := bbloom.New(float64(1<<16), float64(0.01))
|
||||||
|
|
||||||
|
// or
|
||||||
|
// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
|
||||||
|
// bf = bbloom.New(float64(650000), float64(7))
|
||||||
|
// or
|
||||||
|
bf = bbloom.New(650000.0, 7.0)
|
||||||
|
|
||||||
|
// add one item
|
||||||
|
bf.Add([]byte("butter"))
|
||||||
|
|
||||||
|
// Number of elements added is exposed now
|
||||||
|
// Note: ElemNum will not be included in JSON export (for compatability to older version)
|
||||||
|
nOfElementsInFilter := bf.ElemNum
|
||||||
|
|
||||||
|
// check if item is in the filter
|
||||||
|
isIn := bf.Has([]byte("butter")) // should be true
|
||||||
|
isNotIn := bf.Has([]byte("Butter")) // should be false
|
||||||
|
|
||||||
|
// 'add only if item is new' to the bloomfilter
|
||||||
|
added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
|
||||||
|
added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
|
||||||
|
|
||||||
|
// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
|
||||||
|
// add one item
|
||||||
|
bf.AddTS([]byte("peanutbutter"))
|
||||||
|
// check if item is in the filter
|
||||||
|
isIn = bf.HasTS([]byte("peanutbutter")) // should be true
|
||||||
|
isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
|
||||||
|
// 'add only if item is new' to the bloomfilter
|
||||||
|
added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
|
||||||
|
added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
|
||||||
|
|
||||||
|
// convert to JSON ([]byte)
|
||||||
|
Json := bf.JSONMarshal()
|
||||||
|
|
||||||
|
// bloomfilters Mutex is exposed for external un-/locking
|
||||||
|
// i.e. mutex lock while doing JSON conversion
|
||||||
|
bf.Mtx.Lock()
|
||||||
|
Json = bf.JSONMarshal()
|
||||||
|
bf.Mtx.Unlock()
|
||||||
|
|
||||||
|
// restore a bloom filter from storage
|
||||||
|
bfNew := bbloom.JSONUnmarshal(Json)
|
||||||
|
|
||||||
|
isInNew := bfNew.Has([]byte("butter")) // should be true
|
||||||
|
isNotInNew := bfNew.Has([]byte("Butter")) // should be false
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
to work with the bloom filter.
|
||||||
|
|
||||||
|
### why 'fast'?
|
||||||
|
|
||||||
|
It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
|
||||||
|
|
||||||
|
|
||||||
|
Bloom filter (filter size 524288, 7 hashlocs)
|
||||||
|
github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
|
||||||
|
github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
|
||||||
|
github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
|
||||||
|
github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
|
||||||
|
|
||||||
|
github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
|
||||||
|
github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
|
||||||
|
github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
|
||||||
|
github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
|
||||||
|
github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
|
||||||
|
github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
|
||||||
|
|
||||||
|
(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
|
||||||
|
|
||||||
|
|
||||||
|
With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.
|
@ -0,0 +1,403 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/bits"
|
||||||
|
"math/rand"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Allocator amortizes the cost of small allocations by allocating memory in
|
||||||
|
// bigger chunks. Internally it uses z.Calloc to allocate memory. Once
|
||||||
|
// allocated, the memory is not moved, so it is safe to use the allocated bytes
|
||||||
|
// to unsafe cast them to Go struct pointers. Maintaining a freelist is slow.
|
||||||
|
// Instead, Allocator only allocates memory, with the idea that finally we
|
||||||
|
// would just release the entire Allocator.
|
||||||
|
type Allocator struct {
|
||||||
|
sync.Mutex
|
||||||
|
compIdx uint64 // Stores bufIdx in 32 MSBs and posIdx in 32 LSBs.
|
||||||
|
buffers [][]byte
|
||||||
|
Ref uint64
|
||||||
|
Tag string
|
||||||
|
}
|
||||||
|
|
||||||
|
// allocs keeps references to all Allocators, so we can safely discard them later.
|
||||||
|
var allocsMu *sync.Mutex
|
||||||
|
var allocRef uint64
|
||||||
|
var allocs map[uint64]*Allocator
|
||||||
|
var calculatedLog2 []int
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
allocsMu = new(sync.Mutex)
|
||||||
|
allocs = make(map[uint64]*Allocator)
|
||||||
|
|
||||||
|
// Set up a unique Ref per process.
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
allocRef = uint64(rand.Int63n(1<<16)) << 48
|
||||||
|
|
||||||
|
calculatedLog2 = make([]int, 1025)
|
||||||
|
for i := 1; i <= 1024; i++ {
|
||||||
|
calculatedLog2[i] = int(math.Log2(float64(i)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAllocator creates an allocator starting with the given size.
|
||||||
|
func NewAllocator(sz int, tag string) *Allocator {
|
||||||
|
ref := atomic.AddUint64(&allocRef, 1)
|
||||||
|
// We should not allow a zero sized page because addBufferWithMinSize
|
||||||
|
// will run into an infinite loop trying to double the pagesize.
|
||||||
|
if sz < 512 {
|
||||||
|
sz = 512
|
||||||
|
}
|
||||||
|
a := &Allocator{
|
||||||
|
Ref: ref,
|
||||||
|
buffers: make([][]byte, 64),
|
||||||
|
Tag: tag,
|
||||||
|
}
|
||||||
|
l2 := uint64(log2(sz))
|
||||||
|
if bits.OnesCount64(uint64(sz)) > 1 {
|
||||||
|
l2 += 1
|
||||||
|
}
|
||||||
|
a.buffers[0] = Calloc(1<<l2, a.Tag)
|
||||||
|
|
||||||
|
allocsMu.Lock()
|
||||||
|
allocs[ref] = a
|
||||||
|
allocsMu.Unlock()
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) Reset() {
|
||||||
|
atomic.StoreUint64(&a.compIdx, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Allocators() string {
|
||||||
|
allocsMu.Lock()
|
||||||
|
tags := make(map[string]uint64)
|
||||||
|
num := make(map[string]int)
|
||||||
|
for _, ac := range allocs {
|
||||||
|
tags[ac.Tag] += ac.Allocated()
|
||||||
|
num[ac.Tag] += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
for tag, sz := range tags {
|
||||||
|
fmt.Fprintf(&buf, "Tag: %s Num: %d Size: %s . ", tag, num[tag], humanize.IBytes(sz))
|
||||||
|
}
|
||||||
|
allocsMu.Unlock()
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) String() string {
|
||||||
|
var s strings.Builder
|
||||||
|
s.WriteString(fmt.Sprintf("Allocator: %x\n", a.Ref))
|
||||||
|
var cum int
|
||||||
|
for i, b := range a.buffers {
|
||||||
|
cum += len(b)
|
||||||
|
if len(b) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
s.WriteString(fmt.Sprintf("idx: %d len: %d cum: %d\n", i, len(b), cum))
|
||||||
|
}
|
||||||
|
pos := atomic.LoadUint64(&a.compIdx)
|
||||||
|
bi, pi := parse(pos)
|
||||||
|
s.WriteString(fmt.Sprintf("bi: %d pi: %d\n", bi, pi))
|
||||||
|
s.WriteString(fmt.Sprintf("Size: %d\n", a.Size()))
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocatorFrom would return the allocator corresponding to the ref.
|
||||||
|
func AllocatorFrom(ref uint64) *Allocator {
|
||||||
|
allocsMu.Lock()
|
||||||
|
a := allocs[ref]
|
||||||
|
allocsMu.Unlock()
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func parse(pos uint64) (bufIdx, posIdx int) {
|
||||||
|
return int(pos >> 32), int(pos & 0xFFFFFFFF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the allocations so far.
|
||||||
|
func (a *Allocator) Size() int {
|
||||||
|
pos := atomic.LoadUint64(&a.compIdx)
|
||||||
|
bi, pi := parse(pos)
|
||||||
|
var sz int
|
||||||
|
for i, b := range a.buffers {
|
||||||
|
if i < bi {
|
||||||
|
sz += len(b)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sz += pi
|
||||||
|
return sz
|
||||||
|
}
|
||||||
|
panic("Size should not reach here")
|
||||||
|
}
|
||||||
|
|
||||||
|
func log2(sz int) int {
|
||||||
|
if sz < len(calculatedLog2) {
|
||||||
|
return calculatedLog2[sz]
|
||||||
|
}
|
||||||
|
pow := 10
|
||||||
|
sz >>= 10
|
||||||
|
for sz > 1 {
|
||||||
|
sz >>= 1
|
||||||
|
pow++
|
||||||
|
}
|
||||||
|
return pow
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) Allocated() uint64 {
|
||||||
|
var alloc int
|
||||||
|
for _, b := range a.buffers {
|
||||||
|
alloc += cap(b)
|
||||||
|
}
|
||||||
|
return uint64(alloc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) TrimTo(max int) {
|
||||||
|
var alloc int
|
||||||
|
for i, b := range a.buffers {
|
||||||
|
if len(b) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
alloc += len(b)
|
||||||
|
if alloc < max {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
Free(b)
|
||||||
|
a.buffers[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release would release the memory back. Remember to make this call to avoid memory leaks.
|
||||||
|
func (a *Allocator) Release() {
|
||||||
|
if a == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var alloc int
|
||||||
|
for _, b := range a.buffers {
|
||||||
|
if len(b) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
alloc += len(b)
|
||||||
|
Free(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
allocsMu.Lock()
|
||||||
|
delete(allocs, a.Ref)
|
||||||
|
allocsMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxAlloc = 1 << 30
|
||||||
|
|
||||||
|
func (a *Allocator) MaxAlloc() int {
|
||||||
|
return maxAlloc
|
||||||
|
}
|
||||||
|
|
||||||
|
const nodeAlign = unsafe.Sizeof(uint64(0)) - 1
|
||||||
|
|
||||||
|
func (a *Allocator) AllocateAligned(sz int) []byte {
|
||||||
|
tsz := sz + int(nodeAlign)
|
||||||
|
out := a.Allocate(tsz)
|
||||||
|
// We are reusing allocators. In that case, it's important to zero out the memory allocated
|
||||||
|
// here. We don't always zero it out (in Allocate), because other functions would be immediately
|
||||||
|
// overwriting the allocated slices anyway (see Copy).
|
||||||
|
ZeroOut(out, 0, len(out))
|
||||||
|
|
||||||
|
addr := uintptr(unsafe.Pointer(&out[0]))
|
||||||
|
aligned := (addr + nodeAlign) & ^nodeAlign
|
||||||
|
start := int(aligned - addr)
|
||||||
|
|
||||||
|
return out[start : start+sz]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) Copy(buf []byte) []byte {
|
||||||
|
if a == nil {
|
||||||
|
return append([]byte{}, buf...)
|
||||||
|
}
|
||||||
|
out := a.Allocate(len(buf))
|
||||||
|
copy(out, buf)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) addBufferAt(bufIdx, minSz int) {
|
||||||
|
for {
|
||||||
|
if bufIdx >= len(a.buffers) {
|
||||||
|
panic(fmt.Sprintf("Allocator can not allocate more than %d buffers", len(a.buffers)))
|
||||||
|
}
|
||||||
|
if len(a.buffers[bufIdx]) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if minSz <= len(a.buffers[bufIdx]) {
|
||||||
|
// No need to do anything. We already have a buffer which can satisfy minSz.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bufIdx++
|
||||||
|
}
|
||||||
|
assert(bufIdx > 0)
|
||||||
|
// We need to allocate a new buffer.
|
||||||
|
// Make pageSize double of the last allocation.
|
||||||
|
pageSize := 2 * len(a.buffers[bufIdx-1])
|
||||||
|
// Ensure pageSize is bigger than sz.
|
||||||
|
for pageSize < minSz {
|
||||||
|
pageSize *= 2
|
||||||
|
}
|
||||||
|
// If bigger than maxAlloc, trim to maxAlloc.
|
||||||
|
if pageSize > maxAlloc {
|
||||||
|
pageSize = maxAlloc
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := Calloc(pageSize, a.Tag)
|
||||||
|
assert(len(a.buffers[bufIdx]) == 0)
|
||||||
|
a.buffers[bufIdx] = buf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Allocator) Allocate(sz int) []byte {
|
||||||
|
if a == nil {
|
||||||
|
return make([]byte, sz)
|
||||||
|
}
|
||||||
|
if sz > maxAlloc {
|
||||||
|
panic(fmt.Sprintf("Unable to allocate more than %d\n", maxAlloc))
|
||||||
|
}
|
||||||
|
if sz == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
pos := atomic.AddUint64(&a.compIdx, uint64(sz))
|
||||||
|
bufIdx, posIdx := parse(pos)
|
||||||
|
buf := a.buffers[bufIdx]
|
||||||
|
if posIdx > len(buf) {
|
||||||
|
a.Lock()
|
||||||
|
newPos := atomic.LoadUint64(&a.compIdx)
|
||||||
|
newBufIdx, _ := parse(newPos)
|
||||||
|
if newBufIdx != bufIdx {
|
||||||
|
a.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
a.addBufferAt(bufIdx+1, sz)
|
||||||
|
atomic.StoreUint64(&a.compIdx, uint64((bufIdx+1)<<32))
|
||||||
|
a.Unlock()
|
||||||
|
// We added a new buffer. Let's acquire slice the right way by going back to the top.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data := buf[posIdx-sz : posIdx]
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AllocatorPool struct {
|
||||||
|
numGets int64
|
||||||
|
allocCh chan *Allocator
|
||||||
|
closer *Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAllocatorPool(sz int) *AllocatorPool {
|
||||||
|
a := &AllocatorPool{
|
||||||
|
allocCh: make(chan *Allocator, sz),
|
||||||
|
closer: NewCloser(1),
|
||||||
|
}
|
||||||
|
go a.freeupAllocators()
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *AllocatorPool) Get(sz int, tag string) *Allocator {
|
||||||
|
if p == nil {
|
||||||
|
return NewAllocator(sz, tag)
|
||||||
|
}
|
||||||
|
atomic.AddInt64(&p.numGets, 1)
|
||||||
|
select {
|
||||||
|
case alloc := <-p.allocCh:
|
||||||
|
alloc.Reset()
|
||||||
|
alloc.Tag = tag
|
||||||
|
return alloc
|
||||||
|
default:
|
||||||
|
return NewAllocator(sz, tag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func (p *AllocatorPool) Return(a *Allocator) {
|
||||||
|
if a == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if p == nil {
|
||||||
|
a.Release()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
a.TrimTo(400 << 20)
|
||||||
|
|
||||||
|
select {
|
||||||
|
case p.allocCh <- a:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
a.Release()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *AllocatorPool) Release() {
|
||||||
|
if p == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.closer.SignalAndWait()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *AllocatorPool) freeupAllocators() {
|
||||||
|
defer p.closer.Done()
|
||||||
|
|
||||||
|
ticker := time.NewTicker(2 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
releaseOne := func() bool {
|
||||||
|
select {
|
||||||
|
case alloc := <-p.allocCh:
|
||||||
|
alloc.Release()
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var last int64
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-p.closer.HasBeenClosed():
|
||||||
|
close(p.allocCh)
|
||||||
|
for alloc := range p.allocCh {
|
||||||
|
alloc.Release()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
gets := atomic.LoadInt64(&p.numGets)
|
||||||
|
if gets != last {
|
||||||
|
// Some retrievals were made since the last time. So, let's avoid doing a release.
|
||||||
|
last = gets
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
releaseOne()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,211 @@
|
|||||||
|
// The MIT License (MIT)
|
||||||
|
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
// this software and associated documentation files (the "Software"), to deal in
|
||||||
|
// the Software without restriction, including without limitation the rights to
|
||||||
|
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
// subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"math"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// helper
|
||||||
|
var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
|
||||||
|
|
||||||
|
func getSize(ui64 uint64) (size uint64, exponent uint64) {
|
||||||
|
if ui64 < uint64(512) {
|
||||||
|
ui64 = uint64(512)
|
||||||
|
}
|
||||||
|
size = uint64(1)
|
||||||
|
for size < ui64 {
|
||||||
|
size <<= 1
|
||||||
|
exponent++
|
||||||
|
}
|
||||||
|
return size, exponent
|
||||||
|
}
|
||||||
|
|
||||||
|
func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
|
||||||
|
size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
|
||||||
|
locs := math.Ceil(float64(0.69314718056) * size / numEntries)
|
||||||
|
return uint64(size), uint64(locs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBloomFilter returns a new bloomfilter.
|
||||||
|
func NewBloomFilter(params ...float64) (bloomfilter *Bloom) {
|
||||||
|
var entries, locs uint64
|
||||||
|
if len(params) == 2 {
|
||||||
|
if params[1] < 1 {
|
||||||
|
entries, locs = calcSizeByWrongPositives(params[0], params[1])
|
||||||
|
} else {
|
||||||
|
entries, locs = uint64(params[0]), uint64(params[1])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
glog.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" +
|
||||||
|
" i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," +
|
||||||
|
" float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
|
||||||
|
}
|
||||||
|
size, exponent := getSize(entries)
|
||||||
|
bloomfilter = &Bloom{
|
||||||
|
sizeExp: exponent,
|
||||||
|
size: size - 1,
|
||||||
|
setLocs: locs,
|
||||||
|
shift: 64 - exponent,
|
||||||
|
}
|
||||||
|
bloomfilter.Size(size)
|
||||||
|
return bloomfilter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bloom filter
|
||||||
|
type Bloom struct {
|
||||||
|
bitset []uint64
|
||||||
|
ElemNum uint64
|
||||||
|
sizeExp uint64
|
||||||
|
size uint64
|
||||||
|
setLocs uint64
|
||||||
|
shift uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// <--- http://www.cse.yorku.ca/~oz/hash.html
|
||||||
|
// modified Berkeley DB Hash (32bit)
|
||||||
|
// hash is casted to l, h = 16bit fragments
|
||||||
|
// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
|
||||||
|
// hash := uint64(len(*b))
|
||||||
|
// for _, c := range *b {
|
||||||
|
// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
|
||||||
|
// }
|
||||||
|
// h = hash >> bl.shift
|
||||||
|
// l = hash << bl.shift >> bl.shift
|
||||||
|
// return l, h
|
||||||
|
// }
|
||||||
|
|
||||||
|
// Add adds hash of a key to the bloomfilter.
|
||||||
|
func (bl *Bloom) Add(hash uint64) {
|
||||||
|
h := hash >> bl.shift
|
||||||
|
l := hash << bl.shift >> bl.shift
|
||||||
|
for i := uint64(0); i < bl.setLocs; i++ {
|
||||||
|
bl.Set((h + i*l) & bl.size)
|
||||||
|
bl.ElemNum++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has checks if bit(s) for entry hash is/are set,
|
||||||
|
// returns true if the hash was added to the Bloom Filter.
|
||||||
|
func (bl Bloom) Has(hash uint64) bool {
|
||||||
|
h := hash >> bl.shift
|
||||||
|
l := hash << bl.shift >> bl.shift
|
||||||
|
for i := uint64(0); i < bl.setLocs; i++ {
|
||||||
|
if !bl.IsSet((h + i*l) & bl.size) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddIfNotHas only Adds hash, if it's not present in the bloomfilter.
|
||||||
|
// Returns true if hash was added.
|
||||||
|
// Returns false if hash was already registered in the bloomfilter.
|
||||||
|
func (bl *Bloom) AddIfNotHas(hash uint64) bool {
|
||||||
|
if bl.Has(hash) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
bl.Add(hash)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// TotalSize returns the total size of the bloom filter.
|
||||||
|
func (bl *Bloom) TotalSize() int {
|
||||||
|
// The bl struct has 5 members and each one is 8 byte. The bitset is a
|
||||||
|
// uint64 byte slice.
|
||||||
|
return len(bl.bitset)*8 + 5*8
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size makes Bloom filter with as bitset of size sz.
|
||||||
|
func (bl *Bloom) Size(sz uint64) {
|
||||||
|
bl.bitset = make([]uint64, sz>>6)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear resets the Bloom filter.
|
||||||
|
func (bl *Bloom) Clear() {
|
||||||
|
for i := range bl.bitset {
|
||||||
|
bl.bitset[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the bit[idx] of bitset.
|
||||||
|
func (bl *Bloom) Set(idx uint64) {
|
||||||
|
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
|
||||||
|
*(*uint8)(ptr) |= mask[idx%8]
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsSet checks if bit[idx] of bitset is set, returns true/false.
|
||||||
|
func (bl *Bloom) IsSet(idx uint64) bool {
|
||||||
|
ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
|
||||||
|
r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
|
||||||
|
return r == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// bloomJSONImExport
|
||||||
|
// Im/Export structure used by JSONMarshal / JSONUnmarshal
|
||||||
|
type bloomJSONImExport struct {
|
||||||
|
FilterSet []byte
|
||||||
|
SetLocs uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithBoolset takes a []byte slice and number of locs per entry,
|
||||||
|
// returns the bloomfilter with a bitset populated according to the input []byte.
|
||||||
|
func newWithBoolset(bs *[]byte, locs uint64) *Bloom {
|
||||||
|
bloomfilter := NewBloomFilter(float64(len(*bs)<<3), float64(locs))
|
||||||
|
for i, b := range *bs {
|
||||||
|
*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b
|
||||||
|
}
|
||||||
|
return bloomfilter
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONUnmarshal takes JSON-Object (type bloomJSONImExport) as []bytes
|
||||||
|
// returns bloom32 / bloom64 object.
|
||||||
|
func JSONUnmarshal(dbData []byte) (*Bloom, error) {
|
||||||
|
bloomImEx := bloomJSONImExport{}
|
||||||
|
if err := json.Unmarshal(dbData, &bloomImEx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buf := bytes.NewBuffer(bloomImEx.FilterSet)
|
||||||
|
bs := buf.Bytes()
|
||||||
|
bf := newWithBoolset(&bs, bloomImEx.SetLocs)
|
||||||
|
return bf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// JSONMarshal returns JSON-object (type bloomJSONImExport) as []byte.
|
||||||
|
func (bl Bloom) JSONMarshal() []byte {
|
||||||
|
bloomImEx := bloomJSONImExport{}
|
||||||
|
bloomImEx.SetLocs = bl.setLocs
|
||||||
|
bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
|
||||||
|
for i := range bloomImEx.FilterSet {
|
||||||
|
bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) +
|
||||||
|
uintptr(i)))
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(bloomImEx)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatal("json.Marshal failed: ", err)
|
||||||
|
}
|
||||||
|
return data
|
||||||
|
}
|
@ -0,0 +1,710 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/dgraph-io/ristretto/z/simd"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
pageSize = os.Getpagesize()
|
||||||
|
maxKeys = (pageSize / 16) - 1
|
||||||
|
oneThird = int(float64(maxKeys) / 3)
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
absoluteMax = uint64(math.MaxUint64 - 1)
|
||||||
|
minSize = 1 << 20
|
||||||
|
)
|
||||||
|
|
||||||
|
// Tree represents the structure for custom mmaped B+ tree.
|
||||||
|
// It supports keys in range [1, math.MaxUint64-1] and values [1, math.Uint64].
|
||||||
|
type Tree struct {
|
||||||
|
buffer *Buffer
|
||||||
|
data []byte
|
||||||
|
nextPage uint64
|
||||||
|
freePage uint64
|
||||||
|
stats TreeStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) initRootNode() {
|
||||||
|
// This is the root node.
|
||||||
|
t.newNode(0)
|
||||||
|
// This acts as the rightmost pointer (all the keys are <= this key).
|
||||||
|
t.Set(absoluteMax, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTree returns an in-memory B+ tree.
|
||||||
|
func NewTree(tag string) *Tree {
|
||||||
|
const defaultTag = "tree"
|
||||||
|
if tag == "" {
|
||||||
|
tag = defaultTag
|
||||||
|
}
|
||||||
|
t := &Tree{buffer: NewBuffer(minSize, tag)}
|
||||||
|
t.Reset()
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTree returns a persistent on-disk B+ tree.
|
||||||
|
func NewTreePersistent(path string) (*Tree, error) {
|
||||||
|
t := &Tree{}
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Open the buffer from disk and set it to the maximum allocated size.
|
||||||
|
t.buffer, err = NewBufferPersistent(path, minSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
t.buffer.offset = uint64(len(t.buffer.buf))
|
||||||
|
t.data = t.buffer.Bytes()
|
||||||
|
|
||||||
|
// pageID can never be 0 if the tree has been initialized.
|
||||||
|
root := t.node(1)
|
||||||
|
isInitialized := root.pageID() != 0
|
||||||
|
|
||||||
|
if !isInitialized {
|
||||||
|
t.nextPage = 1
|
||||||
|
t.freePage = 0
|
||||||
|
t.initRootNode()
|
||||||
|
} else {
|
||||||
|
t.reinit()
|
||||||
|
}
|
||||||
|
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reinit sets the internal variables of a Tree, which are normally stored
|
||||||
|
// in-memory, but are lost when loading from disk.
|
||||||
|
func (t *Tree) reinit() {
|
||||||
|
// Calculate t.nextPage by finding the first node whose pageID is not set.
|
||||||
|
t.nextPage = 1
|
||||||
|
for int(t.nextPage)*pageSize < len(t.data) {
|
||||||
|
n := t.node(t.nextPage)
|
||||||
|
if n.pageID() == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.nextPage++
|
||||||
|
}
|
||||||
|
maxPageId := t.nextPage - 1
|
||||||
|
|
||||||
|
// Calculate t.freePage by finding the page to which no other page points.
|
||||||
|
// This would be the head of the page linked list.
|
||||||
|
// tailPages[i] is true if pageId i+1 is not the head of the list.
|
||||||
|
tailPages := make([]bool, maxPageId)
|
||||||
|
// Mark all pages containing nodes as tail pages.
|
||||||
|
t.Iterate(func(n node) {
|
||||||
|
i := n.pageID() - 1
|
||||||
|
tailPages[i] = true
|
||||||
|
// If this is a leaf node, increment the stats.
|
||||||
|
if n.isLeaf() {
|
||||||
|
t.stats.NumLeafKeys += n.numKeys()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
// pointedPages is a list of page IDs that the tail pages point to.
|
||||||
|
pointedPages := make([]uint64, 0)
|
||||||
|
for i, isTail := range tailPages {
|
||||||
|
if !isTail {
|
||||||
|
pageId := uint64(i) + 1
|
||||||
|
// Skip if nextPageId = 0, as that is equivalent to null page.
|
||||||
|
if nextPageId := t.node(pageId).uint64(0); nextPageId != 0 {
|
||||||
|
pointedPages = append(pointedPages, nextPageId)
|
||||||
|
}
|
||||||
|
t.stats.NumPagesFree++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mark all pages being pointed to as tail pages.
|
||||||
|
for _, pageId := range pointedPages {
|
||||||
|
i := pageId - 1
|
||||||
|
tailPages[i] = true
|
||||||
|
}
|
||||||
|
// There should only be one head page left.
|
||||||
|
for i, isTail := range tailPages {
|
||||||
|
if !isTail {
|
||||||
|
pageId := uint64(i) + 1
|
||||||
|
t.freePage = pageId
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset resets the tree and truncates it to maxSz.
|
||||||
|
func (t *Tree) Reset() {
|
||||||
|
// Tree relies on uninitialized data being zeroed out, so we need to Memclr
|
||||||
|
// the data before using it again.
|
||||||
|
Memclr(t.buffer.buf)
|
||||||
|
t.buffer.Reset()
|
||||||
|
t.buffer.AllocateOffset(minSize)
|
||||||
|
t.data = t.buffer.Bytes()
|
||||||
|
t.stats = TreeStats{}
|
||||||
|
t.nextPage = 1
|
||||||
|
t.freePage = 0
|
||||||
|
t.initRootNode()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close releases the memory used by the tree.
|
||||||
|
func (t *Tree) Close() error {
|
||||||
|
if t == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return t.buffer.Release()
|
||||||
|
}
|
||||||
|
|
||||||
|
type TreeStats struct {
|
||||||
|
Allocated int // Derived.
|
||||||
|
Bytes int // Derived.
|
||||||
|
NumLeafKeys int // Calculated.
|
||||||
|
NumPages int // Derived.
|
||||||
|
NumPagesFree int // Calculated.
|
||||||
|
Occupancy float64 // Derived.
|
||||||
|
PageSize int // Derived.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns stats about the tree.
|
||||||
|
func (t *Tree) Stats() TreeStats {
|
||||||
|
numPages := int(t.nextPage - 1)
|
||||||
|
out := TreeStats{
|
||||||
|
Bytes: numPages * pageSize,
|
||||||
|
Allocated: len(t.data),
|
||||||
|
NumLeafKeys: t.stats.NumLeafKeys,
|
||||||
|
NumPages: numPages,
|
||||||
|
NumPagesFree: t.stats.NumPagesFree,
|
||||||
|
PageSize: pageSize,
|
||||||
|
}
|
||||||
|
out.Occupancy = 100.0 * float64(out.NumLeafKeys) / float64(maxKeys*numPages)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesToUint64Slice converts a byte slice to a uint64 slice.
|
||||||
|
func BytesToUint64Slice(b []byte) []uint64 {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var u64s []uint64
|
||||||
|
hdr := (*reflect.SliceHeader)(unsafe.Pointer(&u64s))
|
||||||
|
hdr.Len = len(b) / 8
|
||||||
|
hdr.Cap = hdr.Len
|
||||||
|
hdr.Data = uintptr(unsafe.Pointer(&b[0]))
|
||||||
|
return u64s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) newNode(bit uint64) node {
|
||||||
|
var pageId uint64
|
||||||
|
if t.freePage > 0 {
|
||||||
|
pageId = t.freePage
|
||||||
|
t.stats.NumPagesFree--
|
||||||
|
} else {
|
||||||
|
pageId = t.nextPage
|
||||||
|
t.nextPage++
|
||||||
|
offset := int(pageId) * pageSize
|
||||||
|
reqSize := offset + pageSize
|
||||||
|
if reqSize > len(t.data) {
|
||||||
|
t.buffer.AllocateOffset(reqSize - len(t.data))
|
||||||
|
t.data = t.buffer.Bytes()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
n := t.node(pageId)
|
||||||
|
if t.freePage > 0 {
|
||||||
|
t.freePage = n.uint64(0)
|
||||||
|
}
|
||||||
|
zeroOut(n)
|
||||||
|
n.setBit(bit)
|
||||||
|
n.setAt(keyOffset(maxKeys), pageId)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNode(data []byte) node {
|
||||||
|
return node(BytesToUint64Slice(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
func zeroOut(data []uint64) {
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
data[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) node(pid uint64) node {
|
||||||
|
// page does not exist
|
||||||
|
if pid == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
start := pageSize * int(pid)
|
||||||
|
return getNode(t.data[start : start+pageSize])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets the key-value pair in the tree.
|
||||||
|
func (t *Tree) Set(k, v uint64) {
|
||||||
|
if k == math.MaxUint64 || k == 0 {
|
||||||
|
panic("Error setting zero or MaxUint64")
|
||||||
|
}
|
||||||
|
root := t.set(1, k, v)
|
||||||
|
if root.isFull() {
|
||||||
|
right := t.split(1)
|
||||||
|
left := t.newNode(root.bits())
|
||||||
|
// Re-read the root as the underlying buffer for tree might have changed during split.
|
||||||
|
root = t.node(1)
|
||||||
|
copy(left[:keyOffset(maxKeys)], root)
|
||||||
|
left.setNumKeys(root.numKeys())
|
||||||
|
|
||||||
|
// reset the root node.
|
||||||
|
zeroOut(root[:keyOffset(maxKeys)])
|
||||||
|
root.setNumKeys(0)
|
||||||
|
|
||||||
|
// set the pointers for left and right child in the root node.
|
||||||
|
root.set(left.maxKey(), left.pageID())
|
||||||
|
root.set(right.maxKey(), right.pageID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// For internal nodes, they contain <key, ptr>.
|
||||||
|
// where all entries <= key are stored in the corresponding ptr.
|
||||||
|
func (t *Tree) set(pid, k, v uint64) node {
|
||||||
|
n := t.node(pid)
|
||||||
|
if n.isLeaf() {
|
||||||
|
t.stats.NumLeafKeys += n.set(k, v)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is an internal node.
|
||||||
|
idx := n.search(k)
|
||||||
|
if idx >= maxKeys {
|
||||||
|
panic("search returned index >= maxKeys")
|
||||||
|
}
|
||||||
|
// If no key at idx.
|
||||||
|
if n.key(idx) == 0 {
|
||||||
|
n.setAt(keyOffset(idx), k)
|
||||||
|
n.setNumKeys(n.numKeys() + 1)
|
||||||
|
}
|
||||||
|
child := t.node(n.val(idx))
|
||||||
|
if child == nil {
|
||||||
|
child = t.newNode(bitLeaf)
|
||||||
|
n = t.node(pid)
|
||||||
|
n.setAt(valOffset(idx), child.pageID())
|
||||||
|
}
|
||||||
|
child = t.set(child.pageID(), k, v)
|
||||||
|
// Re-read n as the underlying buffer for tree might have changed during set.
|
||||||
|
n = t.node(pid)
|
||||||
|
if child.isFull() {
|
||||||
|
// Just consider the left sibling for simplicity.
|
||||||
|
// if t.shareWithSibling(n, idx) {
|
||||||
|
// return n
|
||||||
|
// }
|
||||||
|
|
||||||
|
nn := t.split(child.pageID())
|
||||||
|
// Re-read n and child as the underlying buffer for tree might have changed during split.
|
||||||
|
n = t.node(pid)
|
||||||
|
child = t.node(n.uint64(valOffset(idx)))
|
||||||
|
// Set child pointers in the node n.
|
||||||
|
// Note that key for right node (nn) already exist in node n, but the
|
||||||
|
// pointer is updated.
|
||||||
|
n.set(child.maxKey(), child.pageID())
|
||||||
|
n.set(nn.maxKey(), nn.pageID())
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks for key and returns the corresponding value.
|
||||||
|
// If key is not found, 0 is returned.
|
||||||
|
func (t *Tree) Get(k uint64) uint64 {
|
||||||
|
if k == math.MaxUint64 || k == 0 {
|
||||||
|
panic("Does not support getting MaxUint64/Zero")
|
||||||
|
}
|
||||||
|
root := t.node(1)
|
||||||
|
return t.get(root, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) get(n node, k uint64) uint64 {
|
||||||
|
if n.isLeaf() {
|
||||||
|
return n.get(k)
|
||||||
|
}
|
||||||
|
// This is internal node
|
||||||
|
idx := n.search(k)
|
||||||
|
if idx == n.numKeys() || n.key(idx) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
child := t.node(n.uint64(valOffset(idx)))
|
||||||
|
assert(child != nil)
|
||||||
|
return t.get(child, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBelow deletes all keys with value under ts.
|
||||||
|
func (t *Tree) DeleteBelow(ts uint64) {
|
||||||
|
root := t.node(1)
|
||||||
|
t.stats.NumLeafKeys = 0
|
||||||
|
t.compact(root, ts)
|
||||||
|
assert(root.numKeys() >= 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) compact(n node, ts uint64) int {
|
||||||
|
if n.isLeaf() {
|
||||||
|
numKeys := n.compact(ts)
|
||||||
|
t.stats.NumLeafKeys += n.numKeys()
|
||||||
|
return numKeys
|
||||||
|
}
|
||||||
|
// Not leaf.
|
||||||
|
N := n.numKeys()
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
assert(n.key(i) > 0)
|
||||||
|
childID := n.uint64(valOffset(i))
|
||||||
|
child := t.node(childID)
|
||||||
|
if rem := t.compact(child, ts); rem == 0 && i < N-1 {
|
||||||
|
// If no valid key is remaining we can drop this child. However, don't do that if this
|
||||||
|
// is the max key.
|
||||||
|
t.stats.NumLeafKeys -= child.numKeys()
|
||||||
|
child.setAt(0, t.freePage)
|
||||||
|
t.freePage = childID
|
||||||
|
n.setAt(valOffset(i), 0)
|
||||||
|
t.stats.NumPagesFree++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We use ts=1 here because we want to delete all the keys whose value is 0, which means they no
|
||||||
|
// longer have a valid page for that key.
|
||||||
|
return n.compact(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) iterate(n node, fn func(node)) {
|
||||||
|
fn(n)
|
||||||
|
if n.isLeaf() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Explore children.
|
||||||
|
for i := 0; i < maxKeys; i++ {
|
||||||
|
if n.key(i) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
childID := n.uint64(valOffset(i))
|
||||||
|
assert(childID > 0)
|
||||||
|
|
||||||
|
child := t.node(childID)
|
||||||
|
t.iterate(child, fn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iterate iterates over the tree and executes the fn on each node.
|
||||||
|
func (t *Tree) Iterate(fn func(node)) {
|
||||||
|
root := t.node(1)
|
||||||
|
t.iterate(root, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IterateKV iterates through all keys and values in the tree.
|
||||||
|
// If newVal is non-zero, it will be set in the tree.
|
||||||
|
func (t *Tree) IterateKV(f func(key, val uint64) (newVal uint64)) {
|
||||||
|
t.Iterate(func(n node) {
|
||||||
|
// Only leaf nodes contain keys.
|
||||||
|
if !n.isLeaf() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < n.numKeys(); i++ {
|
||||||
|
key := n.key(i)
|
||||||
|
val := n.val(i)
|
||||||
|
|
||||||
|
// A zero value here means that this is a bogus entry.
|
||||||
|
if val == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
newVal := f(key, val)
|
||||||
|
if newVal != 0 {
|
||||||
|
n.setAt(valOffset(i), newVal)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Tree) print(n node, parentID uint64) {
|
||||||
|
n.print(parentID)
|
||||||
|
if n.isLeaf() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pid := n.pageID()
|
||||||
|
for i := 0; i < maxKeys; i++ {
|
||||||
|
if n.key(i) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
childID := n.uint64(valOffset(i))
|
||||||
|
child := t.node(childID)
|
||||||
|
t.print(child, pid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print iterates over the tree and prints all valid KVs.
|
||||||
|
func (t *Tree) Print() {
|
||||||
|
root := t.node(1)
|
||||||
|
t.print(root, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Splits the node into two. It moves right half of the keys from the original node to a newly
|
||||||
|
// created right node. It returns the right node.
|
||||||
|
func (t *Tree) split(pid uint64) node {
|
||||||
|
n := t.node(pid)
|
||||||
|
if !n.isFull() {
|
||||||
|
panic("This should be called only when n is full")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a new node nn, copy over half the keys from n, and set the parent to n's parent.
|
||||||
|
nn := t.newNode(n.bits())
|
||||||
|
// Re-read n as the underlying buffer for tree might have changed during newNode.
|
||||||
|
n = t.node(pid)
|
||||||
|
rightHalf := n[keyOffset(maxKeys/2):keyOffset(maxKeys)]
|
||||||
|
copy(nn, rightHalf)
|
||||||
|
nn.setNumKeys(maxKeys - maxKeys/2)
|
||||||
|
|
||||||
|
// Remove entries from node n.
|
||||||
|
zeroOut(rightHalf)
|
||||||
|
n.setNumKeys(maxKeys / 2)
|
||||||
|
return nn
|
||||||
|
}
|
||||||
|
|
||||||
|
// shareWithSiblingXXX is unused for now. The idea is to move some keys to
|
||||||
|
// sibling when a node is full. But, I don't see any special benefits in our
|
||||||
|
// access pattern. It doesn't result in better occupancy ratios.
|
||||||
|
func (t *Tree) shareWithSiblingXXX(n node, idx int) bool {
|
||||||
|
if idx == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
left := t.node(n.val(idx - 1))
|
||||||
|
ns := left.numKeys()
|
||||||
|
if ns >= maxKeys/2 {
|
||||||
|
// Sibling is already getting full.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
right := t.node(n.val(idx))
|
||||||
|
// Copy over keys from right child to left child.
|
||||||
|
copied := copy(left[keyOffset(ns):], right[:keyOffset(oneThird)])
|
||||||
|
copied /= 2 // Considering that key-val constitute one key.
|
||||||
|
left.setNumKeys(ns + copied)
|
||||||
|
|
||||||
|
// Update the max key in parent node n for the left sibling.
|
||||||
|
n.setAt(keyOffset(idx-1), left.maxKey())
|
||||||
|
|
||||||
|
// Now move keys to left for the right sibling.
|
||||||
|
until := copy(right, right[keyOffset(oneThird):keyOffset(maxKeys)])
|
||||||
|
right.setNumKeys(until / 2)
|
||||||
|
zeroOut(right[until:keyOffset(maxKeys)])
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each node in the node is of size pageSize. Two kinds of nodes. Leaf nodes and internal nodes.
|
||||||
|
// Leaf nodes only contain the data. Internal nodes would contain the key and the offset to the
|
||||||
|
// child node.
|
||||||
|
// Internal node would have first entry as
|
||||||
|
// <0 offset to child>, <1000 offset>, <5000 offset>, and so on...
|
||||||
|
// Leaf nodes would just have: <key, value>, <key, value>, and so on...
|
||||||
|
// Last 16 bytes of the node are off limits.
|
||||||
|
// | pageID (8 bytes) | metaBits (1 byte) | 3 free bytes | numKeys (4 bytes) |
|
||||||
|
type node []uint64
|
||||||
|
|
||||||
|
func (n node) uint64(start int) uint64 { return n[start] }
|
||||||
|
|
||||||
|
// func (n node) uint32(start int) uint32 { return *(*uint32)(unsafe.Pointer(&n[start])) }
|
||||||
|
|
||||||
|
func keyOffset(i int) int { return 2 * i }
|
||||||
|
func valOffset(i int) int { return 2*i + 1 }
|
||||||
|
func (n node) numKeys() int { return int(n.uint64(valOffset(maxKeys)) & 0xFFFFFFFF) }
|
||||||
|
func (n node) pageID() uint64 { return n.uint64(keyOffset(maxKeys)) }
|
||||||
|
func (n node) key(i int) uint64 { return n.uint64(keyOffset(i)) }
|
||||||
|
func (n node) val(i int) uint64 { return n.uint64(valOffset(i)) }
|
||||||
|
func (n node) data(i int) []uint64 { return n[keyOffset(i):keyOffset(i+1)] }
|
||||||
|
|
||||||
|
func (n node) setAt(start int, k uint64) {
|
||||||
|
n[start] = k
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) setNumKeys(num int) {
|
||||||
|
idx := valOffset(maxKeys)
|
||||||
|
val := n[idx]
|
||||||
|
val &= 0xFFFFFFFF00000000
|
||||||
|
val |= uint64(num)
|
||||||
|
n[idx] = val
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) moveRight(lo int) {
|
||||||
|
hi := n.numKeys()
|
||||||
|
assert(hi != maxKeys)
|
||||||
|
// copy works despite of overlap in src and dst.
|
||||||
|
// See https://golang.org/pkg/builtin/#copy
|
||||||
|
copy(n[keyOffset(lo+1):keyOffset(hi+1)], n[keyOffset(lo):keyOffset(hi)])
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
bitLeaf = uint64(1 << 63)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (n node) setBit(b uint64) {
|
||||||
|
vo := valOffset(maxKeys)
|
||||||
|
val := n[vo]
|
||||||
|
val &= 0xFFFFFFFF
|
||||||
|
val |= b
|
||||||
|
n[vo] = val
|
||||||
|
}
|
||||||
|
func (n node) bits() uint64 {
|
||||||
|
return n.val(maxKeys) & 0xFF00000000000000
|
||||||
|
}
|
||||||
|
func (n node) isLeaf() bool {
|
||||||
|
return n.bits()&bitLeaf > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// isFull checks that the node is already full.
|
||||||
|
func (n node) isFull() bool {
|
||||||
|
return n.numKeys() == maxKeys
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search returns the index of a smallest key >= k in a node.
|
||||||
|
func (n node) search(k uint64) int {
|
||||||
|
N := n.numKeys()
|
||||||
|
if N < 4 {
|
||||||
|
for i := 0; i < N; i++ {
|
||||||
|
if ki := n.key(i); ki >= k {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return N
|
||||||
|
}
|
||||||
|
return int(simd.Search(n[:2*N], k))
|
||||||
|
// lo, hi := 0, N
|
||||||
|
// // Reduce the search space using binary seach and then do linear search.
|
||||||
|
// for hi-lo > 32 {
|
||||||
|
// mid := (hi + lo) / 2
|
||||||
|
// km := n.key(mid)
|
||||||
|
// if k == km {
|
||||||
|
// return mid
|
||||||
|
// }
|
||||||
|
// if k > km {
|
||||||
|
// // key is greater than the key at mid, so move right.
|
||||||
|
// lo = mid + 1
|
||||||
|
// } else {
|
||||||
|
// // else move left.
|
||||||
|
// hi = mid
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// for i := lo; i <= hi; i++ {
|
||||||
|
// if ki := n.key(i); ki >= k {
|
||||||
|
// return i
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// return N
|
||||||
|
}
|
||||||
|
func (n node) maxKey() uint64 {
|
||||||
|
idx := n.numKeys()
|
||||||
|
// idx points to the first key which is zero.
|
||||||
|
if idx > 0 {
|
||||||
|
idx--
|
||||||
|
}
|
||||||
|
return n.key(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// compacts the node i.e., remove all the kvs with value < lo. It returns the remaining number of
|
||||||
|
// keys.
|
||||||
|
func (n node) compact(lo uint64) int {
|
||||||
|
N := n.numKeys()
|
||||||
|
mk := n.maxKey()
|
||||||
|
var left, right int
|
||||||
|
for right = 0; right < N; right++ {
|
||||||
|
if n.val(right) < lo && n.key(right) < mk {
|
||||||
|
// Skip over this key. Don't copy it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Valid data. Copy it from right to left. Advance left.
|
||||||
|
if left != right {
|
||||||
|
copy(n.data(left), n.data(right))
|
||||||
|
}
|
||||||
|
left++
|
||||||
|
}
|
||||||
|
// zero out rest of the kv pairs.
|
||||||
|
zeroOut(n[keyOffset(left):keyOffset(right)])
|
||||||
|
n.setNumKeys(left)
|
||||||
|
|
||||||
|
// If the only key we have is the max key, and its value is less than lo, then we can indicate
|
||||||
|
// to the caller by returning a zero that it's OK to drop the node.
|
||||||
|
if left == 1 && n.key(0) == mk && n.val(0) < lo {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return left
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) get(k uint64) uint64 {
|
||||||
|
idx := n.search(k)
|
||||||
|
// key is not found
|
||||||
|
if idx == n.numKeys() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if ki := n.key(idx); ki == k {
|
||||||
|
return n.val(idx)
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// set returns true if it added a new key.
|
||||||
|
func (n node) set(k, v uint64) (numAdded int) {
|
||||||
|
idx := n.search(k)
|
||||||
|
ki := n.key(idx)
|
||||||
|
if n.numKeys() == maxKeys {
|
||||||
|
// This happens during split of non-root node, when we are updating the child pointer of
|
||||||
|
// right node. Hence, the key should already exist.
|
||||||
|
assert(ki == k)
|
||||||
|
}
|
||||||
|
if ki > k {
|
||||||
|
// Found the first entry which is greater than k. So, we need to fit k
|
||||||
|
// just before it. For that, we should move the rest of the data in the
|
||||||
|
// node to the right to make space for k.
|
||||||
|
n.moveRight(idx)
|
||||||
|
}
|
||||||
|
// If the k does not exist already, increment the number of keys.
|
||||||
|
if ki != k {
|
||||||
|
n.setNumKeys(n.numKeys() + 1)
|
||||||
|
numAdded = 1
|
||||||
|
}
|
||||||
|
if ki == 0 || ki >= k {
|
||||||
|
n.setAt(keyOffset(idx), k)
|
||||||
|
n.setAt(valOffset(idx), v)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic("shouldn't reach here")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) iterate(fn func(node, int)) {
|
||||||
|
for i := 0; i < maxKeys; i++ {
|
||||||
|
if k := n.key(i); k > 0 {
|
||||||
|
fn(n, i)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) print(parentID uint64) {
|
||||||
|
var keys []string
|
||||||
|
n.iterate(func(n node, i int) {
|
||||||
|
keys = append(keys, fmt.Sprintf("%d", n.key(i)))
|
||||||
|
})
|
||||||
|
if len(keys) > 8 {
|
||||||
|
copy(keys[4:], keys[len(keys)-4:])
|
||||||
|
keys[3] = "..."
|
||||||
|
keys = keys[:8]
|
||||||
|
}
|
||||||
|
fmt.Printf("%d Child of: %d num keys: %d keys: %s\n",
|
||||||
|
n.pageID(), parentID, n.numKeys(), strings.Join(keys, " "))
|
||||||
|
}
|
@ -0,0 +1,544 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultCapacity = 64
|
||||||
|
defaultTag = "buffer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer is equivalent of bytes.Buffer without the ability to read. It is NOT thread-safe.
|
||||||
|
//
|
||||||
|
// In UseCalloc mode, z.Calloc is used to allocate memory, which depending upon how the code is
|
||||||
|
// compiled could use jemalloc for allocations.
|
||||||
|
//
|
||||||
|
// In UseMmap mode, Buffer uses file mmap to allocate memory. This allows us to store big data
|
||||||
|
// structures without using physical memory.
|
||||||
|
//
|
||||||
|
// MaxSize can be set to limit the memory usage.
|
||||||
|
type Buffer struct {
|
||||||
|
padding uint64 // number of starting bytes used for padding
|
||||||
|
offset uint64 // used length of the buffer
|
||||||
|
buf []byte // backing slice for the buffer
|
||||||
|
bufType BufferType // type of the underlying buffer
|
||||||
|
curSz int // capacity of the buffer
|
||||||
|
maxSz int // causes a panic if the buffer grows beyond this size
|
||||||
|
mmapFile *MmapFile // optional mmap backing for the buffer
|
||||||
|
autoMmapAfter int // Calloc falls back to an mmaped tmpfile after crossing this size
|
||||||
|
autoMmapDir string // directory for autoMmap to create a tempfile in
|
||||||
|
persistent bool // when enabled, Release will not delete the underlying mmap file
|
||||||
|
tag string // used for jemalloc stats
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBuffer(capacity int, tag string) *Buffer {
|
||||||
|
if capacity < defaultCapacity {
|
||||||
|
capacity = defaultCapacity
|
||||||
|
}
|
||||||
|
if tag == "" {
|
||||||
|
tag = defaultTag
|
||||||
|
}
|
||||||
|
return &Buffer{
|
||||||
|
buf: Calloc(capacity, tag),
|
||||||
|
bufType: UseCalloc,
|
||||||
|
curSz: capacity,
|
||||||
|
offset: 8,
|
||||||
|
padding: 8,
|
||||||
|
tag: tag,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is the caller's responsibility to set offset after this, because Buffer
|
||||||
|
// doesn't remember what it was.
|
||||||
|
func NewBufferPersistent(path string, capacity int) (*Buffer, error) {
|
||||||
|
file, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buffer, err := newBufferFile(file, capacity)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buffer.persistent = true
|
||||||
|
return buffer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBufferTmp(dir string, capacity int) (*Buffer, error) {
|
||||||
|
if dir == "" {
|
||||||
|
dir = tmpDir
|
||||||
|
}
|
||||||
|
file, err := ioutil.TempFile(dir, "buffer")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return newBufferFile(file, capacity)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBufferFile(file *os.File, capacity int) (*Buffer, error) {
|
||||||
|
if capacity < defaultCapacity {
|
||||||
|
capacity = defaultCapacity
|
||||||
|
}
|
||||||
|
mmapFile, err := OpenMmapFileUsing(file, capacity, true)
|
||||||
|
if err != nil && err != NewFile {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buf := &Buffer{
|
||||||
|
buf: mmapFile.Data,
|
||||||
|
bufType: UseMmap,
|
||||||
|
curSz: len(mmapFile.Data),
|
||||||
|
mmapFile: mmapFile,
|
||||||
|
offset: 8,
|
||||||
|
padding: 8,
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBufferSlice(slice []byte) *Buffer {
|
||||||
|
return &Buffer{
|
||||||
|
offset: uint64(len(slice)),
|
||||||
|
buf: slice,
|
||||||
|
bufType: UseInvalid,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) WithAutoMmap(threshold int, path string) *Buffer {
|
||||||
|
if b.bufType != UseCalloc {
|
||||||
|
panic("can only autoMmap with UseCalloc")
|
||||||
|
}
|
||||||
|
b.autoMmapAfter = threshold
|
||||||
|
if path == "" {
|
||||||
|
b.autoMmapDir = tmpDir
|
||||||
|
} else {
|
||||||
|
b.autoMmapDir = path
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) WithMaxSize(size int) *Buffer {
|
||||||
|
b.maxSz = size
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) IsEmpty() bool {
|
||||||
|
return int(b.offset) == b.StartOffset()
|
||||||
|
}
|
||||||
|
|
||||||
|
// LenWithPadding would return the number of bytes written to the buffer so far
|
||||||
|
// plus the padding at the start of the buffer.
|
||||||
|
func (b *Buffer) LenWithPadding() int {
|
||||||
|
return int(atomic.LoadUint64(&b.offset))
|
||||||
|
}
|
||||||
|
|
||||||
|
// LenNoPadding would return the number of bytes written to the buffer so far
|
||||||
|
// (without the padding).
|
||||||
|
func (b *Buffer) LenNoPadding() int {
|
||||||
|
return int(atomic.LoadUint64(&b.offset) - b.padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes would return all the written bytes as a slice.
|
||||||
|
func (b *Buffer) Bytes() []byte {
|
||||||
|
off := atomic.LoadUint64(&b.offset)
|
||||||
|
return b.buf[b.padding:off]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grow would grow the buffer to have at least n more bytes. In case the buffer is at capacity, it
|
||||||
|
// would reallocate twice the size of current capacity + n, to ensure n bytes can be written to the
|
||||||
|
// buffer without further allocation. In UseMmap mode, this might result in underlying file
|
||||||
|
// expansion.
|
||||||
|
func (b *Buffer) Grow(n int) {
|
||||||
|
if b.buf == nil {
|
||||||
|
panic("z.Buffer needs to be initialized before using")
|
||||||
|
}
|
||||||
|
if b.maxSz > 0 && int(b.offset)+n > b.maxSz {
|
||||||
|
err := fmt.Errorf(
|
||||||
|
"z.Buffer max size exceeded: %d offset: %d grow: %d", b.maxSz, b.offset, n)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
if int(b.offset)+n < b.curSz {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate new capacity.
|
||||||
|
growBy := b.curSz + n
|
||||||
|
// Don't allocate more than 1GB at a time.
|
||||||
|
if growBy > 1<<30 {
|
||||||
|
growBy = 1 << 30
|
||||||
|
}
|
||||||
|
// Allocate at least n, even if it exceeds the 1GB limit above.
|
||||||
|
if n > growBy {
|
||||||
|
growBy = n
|
||||||
|
}
|
||||||
|
b.curSz += growBy
|
||||||
|
|
||||||
|
switch b.bufType {
|
||||||
|
case UseCalloc:
|
||||||
|
// If autoMmap gets triggered, copy the slice over to an mmaped file.
|
||||||
|
if b.autoMmapAfter > 0 && b.curSz > b.autoMmapAfter {
|
||||||
|
b.bufType = UseMmap
|
||||||
|
file, err := ioutil.TempFile(b.autoMmapDir, "")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
mmapFile, err := OpenMmapFileUsing(file, b.curSz, true)
|
||||||
|
if err != nil && err != NewFile {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
assert(int(b.offset) == copy(mmapFile.Data, b.buf[:b.offset]))
|
||||||
|
Free(b.buf)
|
||||||
|
b.mmapFile = mmapFile
|
||||||
|
b.buf = mmapFile.Data
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Else, reallocate the slice.
|
||||||
|
newBuf := Calloc(b.curSz, b.tag)
|
||||||
|
assert(int(b.offset) == copy(newBuf, b.buf[:b.offset]))
|
||||||
|
Free(b.buf)
|
||||||
|
b.buf = newBuf
|
||||||
|
|
||||||
|
case UseMmap:
|
||||||
|
// Truncate and remap the underlying file.
|
||||||
|
if err := b.mmapFile.Truncate(int64(b.curSz)); err != nil {
|
||||||
|
err = errors.Wrapf(err,
|
||||||
|
"while trying to truncate file: %s to size: %d", b.mmapFile.Fd.Name(), b.curSz)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
b.buf = b.mmapFile.Data
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic("can only use Grow on UseCalloc and UseMmap buffers")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate is a way to get a slice of size n back from the buffer. This slice can be directly
|
||||||
|
// written to. Warning: Allocate is not thread-safe. The byte slice returned MUST be used before
|
||||||
|
// further calls to Buffer.
|
||||||
|
func (b *Buffer) Allocate(n int) []byte {
|
||||||
|
b.Grow(n)
|
||||||
|
off := b.offset
|
||||||
|
b.offset += uint64(n)
|
||||||
|
return b.buf[off:int(b.offset)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocateOffset works the same way as allocate, but instead of returning a byte slice, it returns
|
||||||
|
// the offset of the allocation.
|
||||||
|
func (b *Buffer) AllocateOffset(n int) int {
|
||||||
|
b.Grow(n)
|
||||||
|
b.offset += uint64(n)
|
||||||
|
return int(b.offset) - n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) writeLen(sz int) {
|
||||||
|
buf := b.Allocate(4)
|
||||||
|
binary.BigEndian.PutUint32(buf, uint32(sz))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceAllocate would encode the size provided into the buffer, followed by a call to Allocate,
|
||||||
|
// hence returning the slice of size sz. This can be used to allocate a lot of small buffers into
|
||||||
|
// this big buffer.
|
||||||
|
// Note that SliceAllocate should NOT be mixed with normal calls to Write.
|
||||||
|
func (b *Buffer) SliceAllocate(sz int) []byte {
|
||||||
|
b.Grow(4 + sz)
|
||||||
|
b.writeLen(sz)
|
||||||
|
return b.Allocate(sz)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) StartOffset() int {
|
||||||
|
return int(b.padding)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) WriteSlice(slice []byte) {
|
||||||
|
dst := b.SliceAllocate(len(slice))
|
||||||
|
assert(len(slice) == copy(dst, slice))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) SliceIterate(f func(slice []byte) error) error {
|
||||||
|
if b.IsEmpty() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
slice, next := []byte{}, b.StartOffset()
|
||||||
|
for next >= 0 {
|
||||||
|
slice, next = b.Slice(next)
|
||||||
|
if len(slice) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := f(slice); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
UseCalloc BufferType = iota
|
||||||
|
UseMmap
|
||||||
|
UseInvalid
|
||||||
|
)
|
||||||
|
|
||||||
|
type BufferType int
|
||||||
|
|
||||||
|
func (t BufferType) String() string {
|
||||||
|
switch t {
|
||||||
|
case UseCalloc:
|
||||||
|
return "UseCalloc"
|
||||||
|
case UseMmap:
|
||||||
|
return "UseMmap"
|
||||||
|
default:
|
||||||
|
return "UseInvalid"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type LessFunc func(a, b []byte) bool
|
||||||
|
type sortHelper struct {
|
||||||
|
offsets []int
|
||||||
|
b *Buffer
|
||||||
|
tmp *Buffer
|
||||||
|
less LessFunc
|
||||||
|
small []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sortHelper) sortSmall(start, end int) {
|
||||||
|
s.tmp.Reset()
|
||||||
|
s.small = s.small[:0]
|
||||||
|
next := start
|
||||||
|
for next >= 0 && next < end {
|
||||||
|
s.small = append(s.small, next)
|
||||||
|
_, next = s.b.Slice(next)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We are sorting the slices pointed to by s.small offsets, but only moving the offsets around.
|
||||||
|
sort.Slice(s.small, func(i, j int) bool {
|
||||||
|
left, _ := s.b.Slice(s.small[i])
|
||||||
|
right, _ := s.b.Slice(s.small[j])
|
||||||
|
return s.less(left, right)
|
||||||
|
})
|
||||||
|
// Now we iterate over the s.small offsets and copy over the slices. The result is now in order.
|
||||||
|
for _, off := range s.small {
|
||||||
|
s.tmp.Write(rawSlice(s.b.buf[off:]))
|
||||||
|
}
|
||||||
|
assert(end-start == copy(s.b.buf[start:end], s.tmp.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
func assert(b bool) {
|
||||||
|
if !b {
|
||||||
|
glog.Fatalf("%+v", errors.Errorf("Assertion failure"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func check(err error) {
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func check2(_ interface{}, err error) {
|
||||||
|
check(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sortHelper) merge(left, right []byte, start, end int) {
|
||||||
|
if len(left) == 0 || len(right) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s.tmp.Reset()
|
||||||
|
check2(s.tmp.Write(left))
|
||||||
|
left = s.tmp.Bytes()
|
||||||
|
|
||||||
|
var ls, rs []byte
|
||||||
|
|
||||||
|
copyLeft := func() {
|
||||||
|
assert(len(ls) == copy(s.b.buf[start:], ls))
|
||||||
|
left = left[len(ls):]
|
||||||
|
start += len(ls)
|
||||||
|
}
|
||||||
|
copyRight := func() {
|
||||||
|
assert(len(rs) == copy(s.b.buf[start:], rs))
|
||||||
|
right = right[len(rs):]
|
||||||
|
start += len(rs)
|
||||||
|
}
|
||||||
|
|
||||||
|
for start < end {
|
||||||
|
if len(left) == 0 {
|
||||||
|
assert(len(right) == copy(s.b.buf[start:end], right))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(right) == 0 {
|
||||||
|
assert(len(left) == copy(s.b.buf[start:end], left))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ls = rawSlice(left)
|
||||||
|
rs = rawSlice(right)
|
||||||
|
|
||||||
|
// We skip the first 4 bytes in the rawSlice, because that stores the length.
|
||||||
|
if s.less(ls[4:], rs[4:]) {
|
||||||
|
copyLeft()
|
||||||
|
} else {
|
||||||
|
copyRight()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sortHelper) sort(lo, hi int) []byte {
|
||||||
|
assert(lo <= hi)
|
||||||
|
|
||||||
|
mid := lo + (hi-lo)/2
|
||||||
|
loff, hoff := s.offsets[lo], s.offsets[hi]
|
||||||
|
if lo == mid {
|
||||||
|
// No need to sort, just return the buffer.
|
||||||
|
return s.b.buf[loff:hoff]
|
||||||
|
}
|
||||||
|
|
||||||
|
// lo, mid would sort from [offset[lo], offset[mid]) .
|
||||||
|
left := s.sort(lo, mid)
|
||||||
|
// Typically we'd use mid+1, but here mid represents an offset in the buffer. Each offset
|
||||||
|
// contains a thousand entries. So, if we do mid+1, we'd skip over those entries.
|
||||||
|
right := s.sort(mid, hi)
|
||||||
|
|
||||||
|
s.merge(left, right, loff, hoff)
|
||||||
|
return s.b.buf[loff:hoff]
|
||||||
|
}
|
||||||
|
|
||||||
|
// SortSlice is like SortSliceBetween but sorting over the entire buffer.
|
||||||
|
func (b *Buffer) SortSlice(less func(left, right []byte) bool) {
|
||||||
|
b.SortSliceBetween(b.StartOffset(), int(b.offset), less)
|
||||||
|
}
|
||||||
|
func (b *Buffer) SortSliceBetween(start, end int, less LessFunc) {
|
||||||
|
if start >= end {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if start == 0 {
|
||||||
|
panic("start can never be zero")
|
||||||
|
}
|
||||||
|
|
||||||
|
var offsets []int
|
||||||
|
next, count := start, 0
|
||||||
|
for next >= 0 && next < end {
|
||||||
|
if count%1024 == 0 {
|
||||||
|
offsets = append(offsets, next)
|
||||||
|
}
|
||||||
|
_, next = b.Slice(next)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
assert(len(offsets) > 0)
|
||||||
|
if offsets[len(offsets)-1] != end {
|
||||||
|
offsets = append(offsets, end)
|
||||||
|
}
|
||||||
|
|
||||||
|
szTmp := int(float64((end-start)/2) * 1.1)
|
||||||
|
s := &sortHelper{
|
||||||
|
offsets: offsets,
|
||||||
|
b: b,
|
||||||
|
less: less,
|
||||||
|
small: make([]int, 0, 1024),
|
||||||
|
tmp: NewBuffer(szTmp, b.tag),
|
||||||
|
}
|
||||||
|
defer s.tmp.Release()
|
||||||
|
|
||||||
|
left := offsets[0]
|
||||||
|
for _, off := range offsets[1:] {
|
||||||
|
s.sortSmall(left, off)
|
||||||
|
left = off
|
||||||
|
}
|
||||||
|
s.sort(0, len(offsets)-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func rawSlice(buf []byte) []byte {
|
||||||
|
sz := binary.BigEndian.Uint32(buf)
|
||||||
|
return buf[:4+int(sz)]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice would return the slice written at offset.
|
||||||
|
func (b *Buffer) Slice(offset int) ([]byte, int) {
|
||||||
|
if offset >= int(b.offset) {
|
||||||
|
return nil, -1
|
||||||
|
}
|
||||||
|
|
||||||
|
sz := binary.BigEndian.Uint32(b.buf[offset:])
|
||||||
|
start := offset + 4
|
||||||
|
next := start + int(sz)
|
||||||
|
res := b.buf[start:next]
|
||||||
|
if next >= int(b.offset) {
|
||||||
|
next = -1
|
||||||
|
}
|
||||||
|
return res, next
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceOffsets is an expensive function. Use sparingly.
|
||||||
|
func (b *Buffer) SliceOffsets() []int {
|
||||||
|
next := b.StartOffset()
|
||||||
|
var offsets []int
|
||||||
|
for next >= 0 {
|
||||||
|
offsets = append(offsets, next)
|
||||||
|
_, next = b.Slice(next)
|
||||||
|
}
|
||||||
|
return offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Buffer) Data(offset int) []byte {
|
||||||
|
if offset > b.curSz {
|
||||||
|
panic("offset beyond current size")
|
||||||
|
}
|
||||||
|
return b.buf[offset:b.curSz]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write would write p bytes to the buffer.
|
||||||
|
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||||
|
n = len(p)
|
||||||
|
b.Grow(n)
|
||||||
|
assert(n == copy(b.buf[b.offset:], p))
|
||||||
|
b.offset += uint64(n)
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset would reset the buffer to be reused.
|
||||||
|
func (b *Buffer) Reset() {
|
||||||
|
b.offset = uint64(b.StartOffset())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release would free up the memory allocated by the buffer. Once the usage of buffer is done, it is
|
||||||
|
// important to call Release, otherwise a memory leak can happen.
|
||||||
|
func (b *Buffer) Release() error {
|
||||||
|
if b == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch b.bufType {
|
||||||
|
case UseCalloc:
|
||||||
|
Free(b.buf)
|
||||||
|
case UseMmap:
|
||||||
|
if b.mmapFile == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
path := b.mmapFile.Fd.Name()
|
||||||
|
if err := b.mmapFile.Close(-1); err != nil {
|
||||||
|
return errors.Wrapf(err, "while closing file: %s", path)
|
||||||
|
}
|
||||||
|
if !b.persistent {
|
||||||
|
if err := os.Remove(path); err != nil {
|
||||||
|
return errors.Wrapf(err, "while deleting file %s", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,42 @@
|
|||||||
|
package z
|
||||||
|
|
||||||
|
import "sync/atomic"
|
||||||
|
|
||||||
|
var numBytes int64
|
||||||
|
|
||||||
|
// NumAllocBytes returns the number of bytes allocated using calls to z.Calloc. The allocations
|
||||||
|
// could be happening via either Go or jemalloc, depending upon the build flags.
|
||||||
|
func NumAllocBytes() int64 {
|
||||||
|
return atomic.LoadInt64(&numBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemStats is used to fetch JE Malloc Stats. The stats are fetched from
|
||||||
|
// the mallctl namespace http://jemalloc.net/jemalloc.3.html#mallctl_namespace.
|
||||||
|
type MemStats struct {
|
||||||
|
// Total number of bytes allocated by the application.
|
||||||
|
// http://jemalloc.net/jemalloc.3.html#stats.allocated
|
||||||
|
Allocated uint64
|
||||||
|
// Total number of bytes in active pages allocated by the application. This
|
||||||
|
// is a multiple of the page size, and greater than or equal to
|
||||||
|
// Allocated.
|
||||||
|
// http://jemalloc.net/jemalloc.3.html#stats.active
|
||||||
|
Active uint64
|
||||||
|
// Maximum number of bytes in physically resident data pages mapped by the
|
||||||
|
// allocator, comprising all pages dedicated to allocator metadata, pages
|
||||||
|
// backing active allocations, and unused dirty pages. This is a maximum
|
||||||
|
// rather than precise because pages may not actually be physically
|
||||||
|
// resident if they correspond to demand-zeroed virtual memory that has not
|
||||||
|
// yet been touched. This is a multiple of the page size, and is larger
|
||||||
|
// than stats.active.
|
||||||
|
// http://jemalloc.net/jemalloc.3.html#stats.resident
|
||||||
|
Resident uint64
|
||||||
|
// Total number of bytes in virtual memory mappings that were retained
|
||||||
|
// rather than being returned to the operating system via e.g. munmap(2) or
|
||||||
|
// similar. Retained virtual memory is typically untouched, decommitted, or
|
||||||
|
// purged, so it has no strongly associated physical memory (see extent
|
||||||
|
// hooks http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks for
|
||||||
|
// details). Retained memory is excluded from mapped memory statistics,
|
||||||
|
// e.g. stats.mapped (http://jemalloc.net/jemalloc.3.html#stats.mapped).
|
||||||
|
// http://jemalloc.net/jemalloc.3.html#stats.retained
|
||||||
|
Retained uint64
|
||||||
|
}
|
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
// +build 386 amd64p32 arm armbe mips mipsle mips64p32 mips64p32le ppc sparc
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxArrayLen is a safe maximum length for slices on this architecture.
|
||||||
|
MaxArrayLen = 1<<31 - 1
|
||||||
|
// MaxBufferSize is the size of virtually unlimited buffer on this architecture.
|
||||||
|
MaxBufferSize = 1 << 30
|
||||||
|
)
|
@ -0,0 +1,14 @@
|
|||||||
|
// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
// +build amd64 arm64 arm64be ppc64 ppc64le mips64 mips64le riscv64 s390x sparc64
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MaxArrayLen is a safe maximum length for slices on this architecture.
|
||||||
|
MaxArrayLen = 1<<50 - 1
|
||||||
|
// MaxBufferSize is the size of virtually unlimited buffer on this architecture.
|
||||||
|
MaxBufferSize = 256 << 30
|
||||||
|
)
|
@ -0,0 +1,172 @@
|
|||||||
|
// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
// +build jemalloc
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
/*
|
||||||
|
#cgo LDFLAGS: /usr/local/lib/libjemalloc.a -L/usr/local/lib -Wl,-rpath,/usr/local/lib -ljemalloc -lm -lstdc++ -pthread -ldl
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <jemalloc/jemalloc.h>
|
||||||
|
*/
|
||||||
|
import "C"
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The go:linkname directives provides backdoor access to private functions in
|
||||||
|
// the runtime. Below we're accessing the throw function.
|
||||||
|
|
||||||
|
//go:linkname throw runtime.throw
|
||||||
|
func throw(s string)
|
||||||
|
|
||||||
|
// New allocates a slice of size n. The returned slice is from manually managed
|
||||||
|
// memory and MUST be released by calling Free. Failure to do so will result in
|
||||||
|
// a memory leak.
|
||||||
|
//
|
||||||
|
// Compile jemalloc with ./configure --with-jemalloc-prefix="je_"
|
||||||
|
// https://android.googlesource.com/platform/external/jemalloc_new/+/6840b22e8e11cb68b493297a5cd757d6eaa0b406/TUNING.md
|
||||||
|
// These two config options seems useful for frequent allocations and deallocations in
|
||||||
|
// multi-threaded programs (like we have).
|
||||||
|
// JE_MALLOC_CONF="background_thread:true,metadata_thp:auto"
|
||||||
|
//
|
||||||
|
// Compile Go program with `go build -tags=jemalloc` to enable this.
|
||||||
|
|
||||||
|
type dalloc struct {
|
||||||
|
t string
|
||||||
|
sz int
|
||||||
|
}
|
||||||
|
|
||||||
|
var dallocsMu sync.Mutex
|
||||||
|
var dallocs map[unsafe.Pointer]*dalloc
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// By initializing dallocs, we can start tracking allocations and deallocations via z.Calloc.
|
||||||
|
dallocs = make(map[unsafe.Pointer]*dalloc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Calloc(n int, tag string) []byte {
|
||||||
|
if n == 0 {
|
||||||
|
return make([]byte, 0)
|
||||||
|
}
|
||||||
|
// We need to be conscious of the Cgo pointer passing rules:
|
||||||
|
//
|
||||||
|
// https://golang.org/cmd/cgo/#hdr-Passing_pointers
|
||||||
|
//
|
||||||
|
// ...
|
||||||
|
// Note: the current implementation has a bug. While Go code is permitted
|
||||||
|
// to write nil or a C pointer (but not a Go pointer) to C memory, the
|
||||||
|
// current implementation may sometimes cause a runtime error if the
|
||||||
|
// contents of the C memory appear to be a Go pointer. Therefore, avoid
|
||||||
|
// passing uninitialized C memory to Go code if the Go code is going to
|
||||||
|
// store pointer values in it. Zero out the memory in C before passing it
|
||||||
|
// to Go.
|
||||||
|
|
||||||
|
ptr := C.je_calloc(C.size_t(n), 1)
|
||||||
|
if ptr == nil {
|
||||||
|
// NB: throw is like panic, except it guarantees the process will be
|
||||||
|
// terminated. The call below is exactly what the Go runtime invokes when
|
||||||
|
// it cannot allocate memory.
|
||||||
|
throw("out of memory")
|
||||||
|
}
|
||||||
|
|
||||||
|
uptr := unsafe.Pointer(ptr)
|
||||||
|
dallocsMu.Lock()
|
||||||
|
dallocs[uptr] = &dalloc{
|
||||||
|
t: tag,
|
||||||
|
sz: n,
|
||||||
|
}
|
||||||
|
dallocsMu.Unlock()
|
||||||
|
atomic.AddInt64(&numBytes, int64(n))
|
||||||
|
// Interpret the C pointer as a pointer to a Go array, then slice.
|
||||||
|
return (*[MaxArrayLen]byte)(uptr)[:n:n]
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallocNoRef does the exact same thing as Calloc with jemalloc enabled.
|
||||||
|
func CallocNoRef(n int, tag string) []byte {
|
||||||
|
return Calloc(n, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free frees the specified slice.
|
||||||
|
func Free(b []byte) {
|
||||||
|
if sz := cap(b); sz != 0 {
|
||||||
|
b = b[:cap(b)]
|
||||||
|
ptr := unsafe.Pointer(&b[0])
|
||||||
|
C.je_free(ptr)
|
||||||
|
atomic.AddInt64(&numBytes, -int64(sz))
|
||||||
|
dallocsMu.Lock()
|
||||||
|
delete(dallocs, ptr)
|
||||||
|
dallocsMu.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Leaks() string {
|
||||||
|
if dallocs == nil {
|
||||||
|
return "Leak detection disabled. Enable with 'leak' build flag."
|
||||||
|
}
|
||||||
|
dallocsMu.Lock()
|
||||||
|
defer dallocsMu.Unlock()
|
||||||
|
if len(dallocs) == 0 {
|
||||||
|
return "NO leaks found."
|
||||||
|
}
|
||||||
|
m := make(map[string]int)
|
||||||
|
for _, da := range dallocs {
|
||||||
|
m[da.t] += da.sz
|
||||||
|
}
|
||||||
|
var buf bytes.Buffer
|
||||||
|
fmt.Fprintf(&buf, "Allocations:\n")
|
||||||
|
for f, sz := range m {
|
||||||
|
fmt.Fprintf(&buf, "%s at file: %s\n", humanize.IBytes(uint64(sz)), f)
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMemStats populates stats with JE Malloc statistics.
|
||||||
|
func ReadMemStats(stats *MemStats) {
|
||||||
|
if stats == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Call an epoch mallclt to refresh the stats data as mentioned in the docs.
|
||||||
|
// http://jemalloc.net/jemalloc.3.html#epoch
|
||||||
|
// Note: This epoch mallctl is as expensive as a malloc call. It takes up the
|
||||||
|
// malloc_mutex_lock.
|
||||||
|
epoch := 1
|
||||||
|
sz := unsafe.Sizeof(&epoch)
|
||||||
|
C.je_mallctl(
|
||||||
|
(C.CString)("epoch"),
|
||||||
|
unsafe.Pointer(&epoch),
|
||||||
|
(*C.size_t)(unsafe.Pointer(&sz)),
|
||||||
|
unsafe.Pointer(&epoch),
|
||||||
|
(C.size_t)(unsafe.Sizeof(epoch)))
|
||||||
|
stats.Allocated = fetchStat("stats.allocated")
|
||||||
|
stats.Active = fetchStat("stats.active")
|
||||||
|
stats.Resident = fetchStat("stats.resident")
|
||||||
|
stats.Retained = fetchStat("stats.retained")
|
||||||
|
}
|
||||||
|
|
||||||
|
// fetchStat is used to read a specific attribute from je malloc stats using mallctl.
|
||||||
|
func fetchStat(s string) uint64 {
|
||||||
|
var out uint64
|
||||||
|
sz := unsafe.Sizeof(&out)
|
||||||
|
C.je_mallctl(
|
||||||
|
(C.CString)(s), // Query: eg: stats.allocated, stats.resident, etc.
|
||||||
|
unsafe.Pointer(&out), // Variable to store the output.
|
||||||
|
(*C.size_t)(unsafe.Pointer(&sz)), // Size of the output variable.
|
||||||
|
nil, // Input variable used to set a value.
|
||||||
|
0) // Size of the input variable.
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func StatsPrint() {
|
||||||
|
opts := C.CString("mdablxe")
|
||||||
|
C.je_malloc_stats_print(nil, nil, opts)
|
||||||
|
C.free(unsafe.Pointer(opts))
|
||||||
|
}
|
@ -0,0 +1,37 @@
|
|||||||
|
// Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use
|
||||||
|
// of this source code is governed by a BSD-style license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
// +build !jemalloc !cgo
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Provides versions of Calloc, CallocNoRef, etc when jemalloc is not available
|
||||||
|
// (eg: build without jemalloc tag).
|
||||||
|
|
||||||
|
// Calloc allocates a slice of size n.
|
||||||
|
func Calloc(n int, tag string) []byte {
|
||||||
|
return make([]byte, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallocNoRef will not give you memory back without jemalloc.
|
||||||
|
func CallocNoRef(n int, tag string) []byte {
|
||||||
|
// We do the add here just to stay compatible with a corresponding Free call.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Free does not do anything in this mode.
|
||||||
|
func Free(b []byte) {}
|
||||||
|
|
||||||
|
func Leaks() string { return "Leaks: Using Go memory" }
|
||||||
|
func StatsPrint() {
|
||||||
|
fmt.Println("Using Go memory")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMemStats doesn't do anything since all the memory is being managed
|
||||||
|
// by the Go runtime.
|
||||||
|
func ReadMemStats(_ *MemStats) { return }
|
@ -0,0 +1,217 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MmapFile represents an mmapd file and includes both the buffer to the data
|
||||||
|
// and the file descriptor.
|
||||||
|
type MmapFile struct {
|
||||||
|
Data []byte
|
||||||
|
Fd *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
var NewFile = errors.New("Create a new file")
|
||||||
|
|
||||||
|
func OpenMmapFileUsing(fd *os.File, sz int, writable bool) (*MmapFile, error) {
|
||||||
|
filename := fd.Name()
|
||||||
|
fi, err := fd.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "cannot stat file: %s", filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
var rerr error
|
||||||
|
fileSize := fi.Size()
|
||||||
|
if sz > 0 && fileSize == 0 {
|
||||||
|
// If file is empty, truncate it to sz.
|
||||||
|
if err := fd.Truncate(int64(sz)); err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "error while truncation")
|
||||||
|
}
|
||||||
|
fileSize = int64(sz)
|
||||||
|
rerr = NewFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// fmt.Printf("Mmaping file: %s with writable: %v filesize: %d\n", fd.Name(), writable, fileSize)
|
||||||
|
buf, err := Mmap(fd, writable, fileSize) // Mmap up to file size.
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "while mmapping %s with size: %d", fd.Name(), fileSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileSize == 0 {
|
||||||
|
dir, _ := filepath.Split(filename)
|
||||||
|
go SyncDir(dir)
|
||||||
|
}
|
||||||
|
return &MmapFile{
|
||||||
|
Data: buf,
|
||||||
|
Fd: fd,
|
||||||
|
}, rerr
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenMmapFile opens an existing file or creates a new file. If the file is
|
||||||
|
// created, it would truncate the file to maxSz. In both cases, it would mmap
|
||||||
|
// the file to maxSz and returned it. In case the file is created, z.NewFile is
|
||||||
|
// returned.
|
||||||
|
func OpenMmapFile(filename string, flag int, maxSz int) (*MmapFile, error) {
|
||||||
|
// fmt.Printf("opening file %s with flag: %v\n", filename, flag)
|
||||||
|
fd, err := os.OpenFile(filename, flag, 0666)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "unable to open: %s", filename)
|
||||||
|
}
|
||||||
|
writable := true
|
||||||
|
if flag == os.O_RDONLY {
|
||||||
|
writable = false
|
||||||
|
}
|
||||||
|
return OpenMmapFileUsing(fd, maxSz, writable)
|
||||||
|
}
|
||||||
|
|
||||||
|
type mmapReader struct {
|
||||||
|
Data []byte
|
||||||
|
offset int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mr *mmapReader) Read(buf []byte) (int, error) {
|
||||||
|
if mr.offset > len(mr.Data) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
n := copy(buf, mr.Data[mr.offset:])
|
||||||
|
mr.offset += n
|
||||||
|
if n < len(buf) {
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MmapFile) NewReader(offset int) io.Reader {
|
||||||
|
return &mmapReader{
|
||||||
|
Data: m.Data,
|
||||||
|
offset: offset,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns data starting from offset off of size sz. If there's not enough data, it would
|
||||||
|
// return nil slice and io.EOF.
|
||||||
|
func (m *MmapFile) Bytes(off, sz int) ([]byte, error) {
|
||||||
|
if len(m.Data[off:]) < sz {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
return m.Data[off : off+sz], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slice returns the slice at the given offset.
|
||||||
|
func (m *MmapFile) Slice(offset int) []byte {
|
||||||
|
sz := binary.BigEndian.Uint32(m.Data[offset:])
|
||||||
|
start := offset + 4
|
||||||
|
next := start + int(sz)
|
||||||
|
if next > len(m.Data) {
|
||||||
|
return []byte{}
|
||||||
|
}
|
||||||
|
res := m.Data[start:next]
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllocateSlice allocates a slice of the given size at the given offset.
|
||||||
|
func (m *MmapFile) AllocateSlice(sz, offset int) ([]byte, int, error) {
|
||||||
|
start := offset + 4
|
||||||
|
|
||||||
|
// If the file is too small, double its size or increase it by 1GB, whichever is smaller.
|
||||||
|
if start+sz > len(m.Data) {
|
||||||
|
const oneGB = 1 << 30
|
||||||
|
growBy := len(m.Data)
|
||||||
|
if growBy > oneGB {
|
||||||
|
growBy = oneGB
|
||||||
|
}
|
||||||
|
if growBy < sz+4 {
|
||||||
|
growBy = sz + 4
|
||||||
|
}
|
||||||
|
if err := m.Truncate(int64(len(m.Data) + growBy)); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.BigEndian.PutUint32(m.Data[offset:], uint32(sz))
|
||||||
|
return m.Data[start : start+sz], start + sz, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MmapFile) Sync() error {
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return Msync(m.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MmapFile) Delete() error {
|
||||||
|
// Badger can set the m.Data directly, without setting any Fd. In that case, this should be a
|
||||||
|
// NOOP.
|
||||||
|
if m.Fd == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := Munmap(m.Data); err != nil {
|
||||||
|
return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
m.Data = nil
|
||||||
|
if err := m.Fd.Truncate(0); err != nil {
|
||||||
|
return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
if err := m.Fd.Close(); err != nil {
|
||||||
|
return fmt.Errorf("while close file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
return os.Remove(m.Fd.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close would close the file. It would also truncate the file if maxSz >= 0.
|
||||||
|
func (m *MmapFile) Close(maxSz int64) error {
|
||||||
|
// Badger can set the m.Data directly, without setting any Fd. In that case, this should be a
|
||||||
|
// NOOP.
|
||||||
|
if m.Fd == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err := m.Sync(); err != nil {
|
||||||
|
return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
if err := Munmap(m.Data); err != nil {
|
||||||
|
return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
if maxSz >= 0 {
|
||||||
|
if err := m.Fd.Truncate(maxSz); err != nil {
|
||||||
|
return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m.Fd.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func SyncDir(dir string) error {
|
||||||
|
df, err := os.Open(dir)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "while opening %s", dir)
|
||||||
|
}
|
||||||
|
if err := df.Sync(); err != nil {
|
||||||
|
return errors.Wrapf(err, "while syncing %s", dir)
|
||||||
|
}
|
||||||
|
if err := df.Close(); err != nil {
|
||||||
|
return errors.Wrapf(err, "while closing %s", dir)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -0,0 +1,39 @@
|
|||||||
|
// +build !linux
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Truncate would truncate the mmapped file to the given size. On Linux, we truncate
|
||||||
|
// the underlying file and then call mremap, but on other systems, we unmap first,
|
||||||
|
// then truncate, then re-map.
|
||||||
|
func (m *MmapFile) Truncate(maxSz int64) error {
|
||||||
|
if err := m.Sync(); err != nil {
|
||||||
|
return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
if err := Munmap(m.Data); err != nil {
|
||||||
|
return fmt.Errorf("while munmap file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
if err := m.Fd.Truncate(maxSz); err != nil {
|
||||||
|
return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
m.Data, err = Mmap(m.Fd, true, maxSz) // Mmap up to max size.
|
||||||
|
return err
|
||||||
|
}
|
@ -0,0 +1,37 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Truncate would truncate the mmapped file to the given size. On Linux, we truncate
|
||||||
|
// the underlying file and then call mremap, but on other systems, we unmap first,
|
||||||
|
// then truncate, then re-map.
|
||||||
|
func (m *MmapFile) Truncate(maxSz int64) error {
|
||||||
|
if err := m.Sync(); err != nil {
|
||||||
|
return fmt.Errorf("while sync file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
if err := m.Fd.Truncate(maxSz); err != nil {
|
||||||
|
return fmt.Errorf("while truncate file: %s, error: %v\n", m.Fd.Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
m.Data, err = mremap(m.Data, int(maxSz)) // Mmap up to max size.
|
||||||
|
return err
|
||||||
|
}
|
@ -0,0 +1,311 @@
|
|||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SuperFlagHelp makes it really easy to generate command line `--help` output for a SuperFlag. For
|
||||||
|
// example:
|
||||||
|
//
|
||||||
|
// const flagDefaults = `enabled=true; path=some/path;`
|
||||||
|
//
|
||||||
|
// var help string = z.NewSuperFlagHelp(flagDefaults).
|
||||||
|
// Flag("enabled", "Turns on <something>.").
|
||||||
|
// Flag("path", "The path to <something>.").
|
||||||
|
// Flag("another", "Not present in defaults, but still included.").
|
||||||
|
// String()
|
||||||
|
//
|
||||||
|
// The `help` string would then contain:
|
||||||
|
//
|
||||||
|
// enabled=true; Turns on <something>.
|
||||||
|
// path=some/path; The path to <something>.
|
||||||
|
// another=; Not present in defaults, but still included.
|
||||||
|
//
|
||||||
|
// All flags are sorted alphabetically for consistent `--help` output. Flags with default values are
|
||||||
|
// placed at the top, and everything else goes under.
|
||||||
|
type SuperFlagHelp struct {
|
||||||
|
head string
|
||||||
|
defaults *SuperFlag
|
||||||
|
flags map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSuperFlagHelp(defaults string) *SuperFlagHelp {
|
||||||
|
return &SuperFlagHelp{
|
||||||
|
defaults: NewSuperFlag(defaults),
|
||||||
|
flags: make(map[string]string, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *SuperFlagHelp) Head(head string) *SuperFlagHelp {
|
||||||
|
h.head = head
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *SuperFlagHelp) Flag(name, description string) *SuperFlagHelp {
|
||||||
|
h.flags[name] = description
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *SuperFlagHelp) String() string {
|
||||||
|
defaultLines := make([]string, 0)
|
||||||
|
otherLines := make([]string, 0)
|
||||||
|
for name, help := range h.flags {
|
||||||
|
val, found := h.defaults.m[name]
|
||||||
|
line := fmt.Sprintf(" %s=%s; %s\n", name, val, help)
|
||||||
|
if found {
|
||||||
|
defaultLines = append(defaultLines, line)
|
||||||
|
} else {
|
||||||
|
otherLines = append(otherLines, line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Strings(defaultLines)
|
||||||
|
sort.Strings(otherLines)
|
||||||
|
dls := strings.Join(defaultLines, "")
|
||||||
|
ols := strings.Join(otherLines, "")
|
||||||
|
if len(h.defaults.m) == 0 && len(ols) == 0 {
|
||||||
|
// remove last newline
|
||||||
|
dls = dls[:len(dls)-1]
|
||||||
|
}
|
||||||
|
// remove last newline
|
||||||
|
if len(h.defaults.m) == 0 && len(ols) > 1 {
|
||||||
|
ols = ols[:len(ols)-1]
|
||||||
|
}
|
||||||
|
return h.head + "\n" + dls + ols
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFlag(flag string) (map[string]string, error) {
|
||||||
|
kvm := make(map[string]string)
|
||||||
|
for _, kv := range strings.Split(flag, ";") {
|
||||||
|
if strings.TrimSpace(kv) == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// For a non-empty separator, 0 < len(splits) ≤ 2.
|
||||||
|
splits := strings.SplitN(kv, "=", 2)
|
||||||
|
k := strings.TrimSpace(splits[0])
|
||||||
|
if len(splits) < 2 {
|
||||||
|
return nil, fmt.Errorf("superflag: missing value for '%s' in flag: %s", k, flag)
|
||||||
|
}
|
||||||
|
k = strings.ToLower(k)
|
||||||
|
k = strings.ReplaceAll(k, "_", "-")
|
||||||
|
kvm[k] = strings.TrimSpace(splits[1])
|
||||||
|
}
|
||||||
|
return kvm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SuperFlag struct {
|
||||||
|
m map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSuperFlag(flag string) *SuperFlag {
|
||||||
|
sf, err := newSuperFlagImpl(flag)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatal(err)
|
||||||
|
}
|
||||||
|
return sf
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSuperFlagImpl(flag string) (*SuperFlag, error) {
|
||||||
|
m, err := parseFlag(flag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SuperFlag{m}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) String() string {
|
||||||
|
if sf == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
kvs := make([]string, 0, len(sf.m))
|
||||||
|
for k, v := range sf.m {
|
||||||
|
kvs = append(kvs, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
return strings.Join(kvs, "; ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) MergeAndCheckDefault(flag string) *SuperFlag {
|
||||||
|
sf, err := sf.mergeAndCheckDefaultImpl(flag)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatal(err)
|
||||||
|
}
|
||||||
|
return sf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) mergeAndCheckDefaultImpl(flag string) (*SuperFlag, error) {
|
||||||
|
if sf == nil {
|
||||||
|
m, err := parseFlag(flag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SuperFlag{m}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
src, err := parseFlag(flag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
numKeys := len(sf.m)
|
||||||
|
for k := range src {
|
||||||
|
if _, ok := sf.m[k]; ok {
|
||||||
|
numKeys--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if numKeys != 0 {
|
||||||
|
return nil, fmt.Errorf("superflag: found invalid options in flag: %s.\nvalid options: %v", sf, flag)
|
||||||
|
}
|
||||||
|
for k, v := range src {
|
||||||
|
if _, ok := sf.m[k]; !ok {
|
||||||
|
sf.m[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) Has(opt string) bool {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
return val != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetDuration(opt string) time.Duration {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
if val == "" {
|
||||||
|
return time.Duration(0)
|
||||||
|
}
|
||||||
|
if strings.Contains(val, "d") {
|
||||||
|
val = strings.Replace(val, "d", "", 1)
|
||||||
|
days, err := strconv.ParseUint(val, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Duration(0)
|
||||||
|
}
|
||||||
|
return time.Hour * 24 * time.Duration(days)
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(val)
|
||||||
|
if err != nil {
|
||||||
|
return time.Duration(0)
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetBool(opt string) bool {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
if val == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
b, err := strconv.ParseBool(val)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err,
|
||||||
|
"Unable to parse %s as bool for key: %s. Options: %s\n",
|
||||||
|
val, opt, sf)
|
||||||
|
glog.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetFloat64(opt string) float64 {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
if val == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
f, err := strconv.ParseFloat(val, 64)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err,
|
||||||
|
"Unable to parse %s as float64 for key: %s. Options: %s\n",
|
||||||
|
val, opt, sf)
|
||||||
|
glog.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetInt64(opt string) int64 {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
if val == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i, err := strconv.ParseInt(val, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err,
|
||||||
|
"Unable to parse %s as int64 for key: %s. Options: %s\n",
|
||||||
|
val, opt, sf)
|
||||||
|
glog.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetUint64(opt string) uint64 {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
if val == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
u, err := strconv.ParseUint(val, 0, 64)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err,
|
||||||
|
"Unable to parse %s as uint64 for key: %s. Options: %s\n",
|
||||||
|
val, opt, sf)
|
||||||
|
glog.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetUint32(opt string) uint32 {
|
||||||
|
val := sf.GetString(opt)
|
||||||
|
if val == "" {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
u, err := strconv.ParseUint(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrapf(err,
|
||||||
|
"Unable to parse %s as uint32 for key: %s. Options: %s\n",
|
||||||
|
val, opt, sf)
|
||||||
|
glog.Fatalf("%+v", err)
|
||||||
|
}
|
||||||
|
return uint32(u)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetString(opt string) string {
|
||||||
|
if sf == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return sf.m[opt]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sf *SuperFlag) GetPath(opt string) string {
|
||||||
|
p := sf.GetString(opt)
|
||||||
|
path, err := expandPath(p)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("Failed to get path: %+v", err)
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandPath expands the paths containing ~ to /home/user. It also computes the absolute path
|
||||||
|
// from the relative paths. For example: ~/abc/../cef will be transformed to /home/user/cef.
|
||||||
|
func expandPath(path string) (string, error) {
|
||||||
|
if len(path) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
if path[0] == '~' && (len(path) == 1 || os.IsPathSeparator(path[1])) {
|
||||||
|
usr, err := user.Current()
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "Failed to get the home directory of the user")
|
||||||
|
}
|
||||||
|
path = filepath.Join(usr.HomeDir, path[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
path, err = filepath.Abs(path)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Wrap(err, "Failed to generate absolute path")
|
||||||
|
}
|
||||||
|
return path, nil
|
||||||
|
}
|
@ -0,0 +1,205 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/dustin/go-humanize"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Creates bounds for an histogram. The bounds are powers of two of the form
|
||||||
|
// [2^min_exponent, ..., 2^max_exponent].
|
||||||
|
func HistogramBounds(minExponent, maxExponent uint32) []float64 {
|
||||||
|
var bounds []float64
|
||||||
|
for i := minExponent; i <= maxExponent; i++ {
|
||||||
|
bounds = append(bounds, float64(int(1)<<i))
|
||||||
|
}
|
||||||
|
return bounds
|
||||||
|
}
|
||||||
|
|
||||||
|
func Fibonacci(num int) []float64 {
|
||||||
|
assert(num > 4)
|
||||||
|
bounds := make([]float64, num)
|
||||||
|
bounds[0] = 1
|
||||||
|
bounds[1] = 2
|
||||||
|
for i := 2; i < num; i++ {
|
||||||
|
bounds[i] = bounds[i-1] + bounds[i-2]
|
||||||
|
}
|
||||||
|
return bounds
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramData stores the information needed to represent the sizes of the keys and values
|
||||||
|
// as a histogram.
|
||||||
|
type HistogramData struct {
|
||||||
|
Bounds []float64
|
||||||
|
Count int64
|
||||||
|
CountPerBucket []int64
|
||||||
|
Min int64
|
||||||
|
Max int64
|
||||||
|
Sum int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHistogramData returns a new instance of HistogramData with properly initialized fields.
|
||||||
|
func NewHistogramData(bounds []float64) *HistogramData {
|
||||||
|
return &HistogramData{
|
||||||
|
Bounds: bounds,
|
||||||
|
CountPerBucket: make([]int64, len(bounds)+1),
|
||||||
|
Max: 0,
|
||||||
|
Min: math.MaxInt64,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (histogram *HistogramData) Copy() *HistogramData {
|
||||||
|
if histogram == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &HistogramData{
|
||||||
|
Bounds: append([]float64{}, histogram.Bounds...),
|
||||||
|
CountPerBucket: append([]int64{}, histogram.CountPerBucket...),
|
||||||
|
Count: histogram.Count,
|
||||||
|
Min: histogram.Min,
|
||||||
|
Max: histogram.Max,
|
||||||
|
Sum: histogram.Sum,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update changes the Min and Max fields if value is less than or greater than the current values.
|
||||||
|
func (histogram *HistogramData) Update(value int64) {
|
||||||
|
if histogram == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if value > histogram.Max {
|
||||||
|
histogram.Max = value
|
||||||
|
}
|
||||||
|
if value < histogram.Min {
|
||||||
|
histogram.Min = value
|
||||||
|
}
|
||||||
|
|
||||||
|
histogram.Sum += value
|
||||||
|
histogram.Count++
|
||||||
|
|
||||||
|
for index := 0; index <= len(histogram.Bounds); index++ {
|
||||||
|
// Allocate value in the last buckets if we reached the end of the Bounds array.
|
||||||
|
if index == len(histogram.Bounds) {
|
||||||
|
histogram.CountPerBucket[index]++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if value < int64(histogram.Bounds[index]) {
|
||||||
|
histogram.CountPerBucket[index]++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mean returns the mean value for the histogram.
|
||||||
|
func (histogram *HistogramData) Mean() float64 {
|
||||||
|
if histogram.Count == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return float64(histogram.Sum) / float64(histogram.Count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String converts the histogram data into human-readable string.
|
||||||
|
func (histogram *HistogramData) String() string {
|
||||||
|
if histogram == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var b strings.Builder
|
||||||
|
|
||||||
|
b.WriteString("\n -- Histogram: \n")
|
||||||
|
b.WriteString(fmt.Sprintf("Min value: %d \n", histogram.Min))
|
||||||
|
b.WriteString(fmt.Sprintf("Max value: %d \n", histogram.Max))
|
||||||
|
b.WriteString(fmt.Sprintf("Count: %d \n", histogram.Count))
|
||||||
|
b.WriteString(fmt.Sprintf("50p: %.2f \n", histogram.Percentile(0.5)))
|
||||||
|
b.WriteString(fmt.Sprintf("75p: %.2f \n", histogram.Percentile(0.75)))
|
||||||
|
b.WriteString(fmt.Sprintf("90p: %.2f \n", histogram.Percentile(0.90)))
|
||||||
|
|
||||||
|
numBounds := len(histogram.Bounds)
|
||||||
|
var cum float64
|
||||||
|
for index, count := range histogram.CountPerBucket {
|
||||||
|
if count == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// The last bucket represents the bucket that contains the range from
|
||||||
|
// the last bound up to infinity so it's processed differently than the
|
||||||
|
// other buckets.
|
||||||
|
if index == len(histogram.CountPerBucket)-1 {
|
||||||
|
lowerBound := uint64(histogram.Bounds[numBounds-1])
|
||||||
|
page := float64(count*100) / float64(histogram.Count)
|
||||||
|
cum += page
|
||||||
|
b.WriteString(fmt.Sprintf("[%s, %s) %d %.2f%% %.2f%%\n",
|
||||||
|
humanize.IBytes(lowerBound), "infinity", count, page, cum))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
upperBound := uint64(histogram.Bounds[index])
|
||||||
|
lowerBound := uint64(0)
|
||||||
|
if index > 0 {
|
||||||
|
lowerBound = uint64(histogram.Bounds[index-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
page := float64(count*100) / float64(histogram.Count)
|
||||||
|
cum += page
|
||||||
|
b.WriteString(fmt.Sprintf("[%d, %d) %d %.2f%% %.2f%%\n",
|
||||||
|
lowerBound, upperBound, count, page, cum))
|
||||||
|
}
|
||||||
|
b.WriteString(" --\n")
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Percentile returns the percentile value for the histogram.
|
||||||
|
// value of p should be between [0.0-1.0]
|
||||||
|
func (histogram *HistogramData) Percentile(p float64) float64 {
|
||||||
|
if histogram == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if histogram.Count == 0 {
|
||||||
|
// if no data return the minimum range
|
||||||
|
return histogram.Bounds[0]
|
||||||
|
}
|
||||||
|
pval := int64(float64(histogram.Count) * p)
|
||||||
|
for i, v := range histogram.CountPerBucket {
|
||||||
|
pval = pval - v
|
||||||
|
if pval <= 0 {
|
||||||
|
if i == len(histogram.Bounds) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return histogram.Bounds[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// default return should be the max range
|
||||||
|
return histogram.Bounds[len(histogram.Bounds)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear reset the histogram. Helpful in situations where we need to reset the metrics
|
||||||
|
func (histogram *HistogramData) Clear() {
|
||||||
|
if histogram == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
histogram.Count = 0
|
||||||
|
histogram.CountPerBucket = make([]int64, len(histogram.Bounds)+1)
|
||||||
|
histogram.Sum = 0
|
||||||
|
histogram.Max = 0
|
||||||
|
histogram.Min = math.MaxInt64
|
||||||
|
}
|
@ -0,0 +1,44 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mmap uses the mmap system call to memory-map a file. If writable is true,
|
||||||
|
// memory protection of the pages is set so that they may be written to as well.
|
||||||
|
func Mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
|
||||||
|
return mmap(fd, writable, size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Munmap unmaps a previously mapped slice.
|
||||||
|
func Munmap(b []byte) error {
|
||||||
|
return munmap(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Madvise uses the madvise system call to give advise about the use of memory
|
||||||
|
// when using a slice that is memory-mapped to a file. Set the readahead flag to
|
||||||
|
// false if page references are expected in random order.
|
||||||
|
func Madvise(b []byte, readahead bool) error {
|
||||||
|
return madvise(b, readahead)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Msync would call sync on the mmapped data.
|
||||||
|
func Msync(b []byte) error {
|
||||||
|
return msync(b)
|
||||||
|
}
|
@ -0,0 +1,59 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mmap uses the mmap system call to memory-map a file. If writable is true,
|
||||||
|
// memory protection of the pages is set so that they may be written to as well.
|
||||||
|
func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
|
||||||
|
mtype := unix.PROT_READ
|
||||||
|
if writable {
|
||||||
|
mtype |= unix.PROT_WRITE
|
||||||
|
}
|
||||||
|
return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Munmap unmaps a previously mapped slice.
|
||||||
|
func munmap(b []byte) error {
|
||||||
|
return unix.Munmap(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is required because the unix package does not support the madvise system call on OS X.
|
||||||
|
func madvise(b []byte, readahead bool) error {
|
||||||
|
advice := unix.MADV_NORMAL
|
||||||
|
if !readahead {
|
||||||
|
advice = unix.MADV_RANDOM
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])),
|
||||||
|
uintptr(len(b)), uintptr(advice))
|
||||||
|
if e1 != 0 {
|
||||||
|
return e1
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func msync(b []byte) error {
|
||||||
|
return unix.Msync(b, unix.MS_SYNC)
|
||||||
|
}
|
@ -0,0 +1,101 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// mmap uses the mmap system call to memory-map a file. If writable is true,
|
||||||
|
// memory protection of the pages is set so that they may be written to as well.
|
||||||
|
func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
|
||||||
|
mtype := unix.PROT_READ
|
||||||
|
if writable {
|
||||||
|
mtype |= unix.PROT_WRITE
|
||||||
|
}
|
||||||
|
return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
|
||||||
|
}
|
||||||
|
|
||||||
|
// mremap is a Linux-specific system call to remap pages in memory. This can be used in place of munmap + mmap.
|
||||||
|
func mremap(data []byte, size int) ([]byte, error) {
|
||||||
|
// taken from <https://github.com/torvalds/linux/blob/f8394f232b1eab649ce2df5c5f15b0e528c92091/include/uapi/linux/mman.h#L8>
|
||||||
|
const MREMAP_MAYMOVE = 0x1
|
||||||
|
|
||||||
|
header := (*reflect.SliceHeader)(unsafe.Pointer(&data))
|
||||||
|
mmapAddr, mmapSize, errno := unix.Syscall6(
|
||||||
|
unix.SYS_MREMAP,
|
||||||
|
header.Data,
|
||||||
|
uintptr(header.Len),
|
||||||
|
uintptr(size),
|
||||||
|
uintptr(MREMAP_MAYMOVE),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
return nil, errno
|
||||||
|
}
|
||||||
|
if mmapSize != uintptr(size) {
|
||||||
|
return nil, fmt.Errorf("mremap size mismatch: requested: %d got: %d", size, mmapSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
header.Data = mmapAddr
|
||||||
|
header.Cap = size
|
||||||
|
header.Len = size
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// munmap unmaps a previously mapped slice.
|
||||||
|
//
|
||||||
|
// unix.Munmap maintains an internal list of mmapped addresses, and only calls munmap
|
||||||
|
// if the address is present in that list. If we use mremap, this list is not updated.
|
||||||
|
// To bypass this, we call munmap ourselves.
|
||||||
|
func munmap(data []byte) error {
|
||||||
|
if len(data) == 0 || len(data) != cap(data) {
|
||||||
|
return unix.EINVAL
|
||||||
|
}
|
||||||
|
_, _, errno := unix.Syscall(
|
||||||
|
unix.SYS_MUNMAP,
|
||||||
|
uintptr(unsafe.Pointer(&data[0])),
|
||||||
|
uintptr(len(data)),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
if errno != 0 {
|
||||||
|
return errno
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// madvise uses the madvise system call to give advise about the use of memory
|
||||||
|
// when using a slice that is memory-mapped to a file. Set the readahead flag to
|
||||||
|
// false if page references are expected in random order.
|
||||||
|
func madvise(b []byte, readahead bool) error {
|
||||||
|
flags := unix.MADV_NORMAL
|
||||||
|
if !readahead {
|
||||||
|
flags = unix.MADV_RANDOM
|
||||||
|
}
|
||||||
|
return unix.Madvise(b, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
// msync writes any modified data to persistent storage.
|
||||||
|
func msync(b []byte) error {
|
||||||
|
return unix.Msync(b, unix.MS_SYNC)
|
||||||
|
}
|
@ -0,0 +1,44 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mmap uses the mmap system call to memory-map a file. If writable is true,
|
||||||
|
// memory protection of the pages is set so that they may be written to as well.
|
||||||
|
func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
|
||||||
|
return nil, syscall.EPLAN9
|
||||||
|
}
|
||||||
|
|
||||||
|
// Munmap unmaps a previously mapped slice.
|
||||||
|
func munmap(b []byte) error {
|
||||||
|
return syscall.EPLAN9
|
||||||
|
}
|
||||||
|
|
||||||
|
// Madvise uses the madvise system call to give advise about the use of memory
|
||||||
|
// when using a slice that is memory-mapped to a file. Set the readahead flag to
|
||||||
|
// false if page references are expected in random order.
|
||||||
|
func madvise(b []byte, readahead bool) error {
|
||||||
|
return syscall.EPLAN9
|
||||||
|
}
|
||||||
|
|
||||||
|
func msync(b []byte) error {
|
||||||
|
return syscall.EPLAN9
|
||||||
|
}
|
@ -0,0 +1,55 @@
|
|||||||
|
// +build !windows,!darwin,!plan9,!linux
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mmap uses the mmap system call to memory-map a file. If writable is true,
|
||||||
|
// memory protection of the pages is set so that they may be written to as well.
|
||||||
|
func mmap(fd *os.File, writable bool, size int64) ([]byte, error) {
|
||||||
|
mtype := unix.PROT_READ
|
||||||
|
if writable {
|
||||||
|
mtype |= unix.PROT_WRITE
|
||||||
|
}
|
||||||
|
return unix.Mmap(int(fd.Fd()), 0, int(size), mtype, unix.MAP_SHARED)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Munmap unmaps a previously mapped slice.
|
||||||
|
func munmap(b []byte) error {
|
||||||
|
return unix.Munmap(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Madvise uses the madvise system call to give advise about the use of memory
|
||||||
|
// when using a slice that is memory-mapped to a file. Set the readahead flag to
|
||||||
|
// false if page references are expected in random order.
|
||||||
|
func madvise(b []byte, readahead bool) error {
|
||||||
|
flags := unix.MADV_NORMAL
|
||||||
|
if !readahead {
|
||||||
|
flags = unix.MADV_RANDOM
|
||||||
|
}
|
||||||
|
return unix.Madvise(b, flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func msync(b []byte) error {
|
||||||
|
return unix.Msync(b, unix.MS_SYNC)
|
||||||
|
}
|
@ -0,0 +1,75 @@
|
|||||||
|
// MIT License
|
||||||
|
|
||||||
|
// Copyright (c) 2019 Ewan Chou
|
||||||
|
|
||||||
|
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
// of this software and associated documentation files (the "Software"), to deal
|
||||||
|
// in the Software without restriction, including without limitation the rights
|
||||||
|
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
// copies of the Software, and to permit persons to whom the Software is
|
||||||
|
// furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
// The above copyright notice and this permission notice shall be included in all
|
||||||
|
// copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
// SOFTWARE.
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NanoTime returns the current time in nanoseconds from a monotonic clock.
|
||||||
|
//go:linkname NanoTime runtime.nanotime
|
||||||
|
func NanoTime() int64
|
||||||
|
|
||||||
|
// CPUTicks is a faster alternative to NanoTime to measure time duration.
|
||||||
|
//go:linkname CPUTicks runtime.cputicks
|
||||||
|
func CPUTicks() int64
|
||||||
|
|
||||||
|
type stringStruct struct {
|
||||||
|
str unsafe.Pointer
|
||||||
|
len int
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
//go:linkname memhash runtime.memhash
|
||||||
|
func memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
||||||
|
|
||||||
|
// MemHash is the hash function used by go map, it utilizes available hardware instructions(behaves
|
||||||
|
// as aeshash if aes instruction is available).
|
||||||
|
// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash.
|
||||||
|
func MemHash(data []byte) uint64 {
|
||||||
|
ss := (*stringStruct)(unsafe.Pointer(&data))
|
||||||
|
return uint64(memhash(ss.str, 0, uintptr(ss.len)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MemHashString is the hash function used by go map, it utilizes available hardware instructions
|
||||||
|
// (behaves as aeshash if aes instruction is available).
|
||||||
|
// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash.
|
||||||
|
func MemHashString(str string) uint64 {
|
||||||
|
ss := (*stringStruct)(unsafe.Pointer(&str))
|
||||||
|
return uint64(memhash(ss.str, 0, uintptr(ss.len)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FastRand is a fast thread local random function.
|
||||||
|
//go:linkname FastRand runtime.fastrand
|
||||||
|
func FastRand() uint32
|
||||||
|
|
||||||
|
//go:linkname memclrNoHeapPointers runtime.memclrNoHeapPointers
|
||||||
|
func memclrNoHeapPointers(p unsafe.Pointer, n uintptr)
|
||||||
|
|
||||||
|
func Memclr(b []byte) {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p := unsafe.Pointer(&b[0])
|
||||||
|
memclrNoHeapPointers(p, uintptr(len(b)))
|
||||||
|
}
|
@ -0,0 +1,127 @@
|
|||||||
|
package simd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Search finds the key using the naive way
|
||||||
|
func Naive(xs []uint64, k uint64) int16 {
|
||||||
|
var i int
|
||||||
|
for i = 0; i < len(xs); i += 2 {
|
||||||
|
x := xs[i]
|
||||||
|
if x >= k {
|
||||||
|
return int16(i / 2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return int16(i / 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Clever(xs []uint64, k uint64) int16 {
|
||||||
|
if len(xs) < 8 {
|
||||||
|
return Naive(xs, k)
|
||||||
|
}
|
||||||
|
var twos, pk [4]uint64
|
||||||
|
pk[0] = k
|
||||||
|
pk[1] = k
|
||||||
|
pk[2] = k
|
||||||
|
pk[3] = k
|
||||||
|
for i := 0; i < len(xs); i += 8 {
|
||||||
|
twos[0] = xs[i]
|
||||||
|
twos[1] = xs[i+2]
|
||||||
|
twos[2] = xs[i+4]
|
||||||
|
twos[3] = xs[i+6]
|
||||||
|
if twos[0] >= pk[0] {
|
||||||
|
return int16(i / 2)
|
||||||
|
}
|
||||||
|
if twos[1] >= pk[1] {
|
||||||
|
return int16((i + 2) / 2)
|
||||||
|
}
|
||||||
|
if twos[2] >= pk[2] {
|
||||||
|
return int16((i + 4) / 2)
|
||||||
|
}
|
||||||
|
if twos[3] >= pk[3] {
|
||||||
|
return int16((i + 6) / 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return int16(len(xs) / 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Parallel(xs []uint64, k uint64) int16 {
|
||||||
|
cpus := runtime.NumCPU()
|
||||||
|
if cpus%2 != 0 {
|
||||||
|
panic(fmt.Sprintf("odd number of CPUs %v", cpus))
|
||||||
|
}
|
||||||
|
sz := len(xs)/cpus + 1
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
retChan := make(chan int16, cpus)
|
||||||
|
for i := 0; i < len(xs); i += sz {
|
||||||
|
end := i + sz
|
||||||
|
if end >= len(xs) {
|
||||||
|
end = len(xs)
|
||||||
|
}
|
||||||
|
chunk := xs[i:end]
|
||||||
|
wg.Add(1)
|
||||||
|
go func(hd int16, xs []uint64, k uint64, wg *sync.WaitGroup, ch chan int16) {
|
||||||
|
for i := 0; i < len(xs); i += 2 {
|
||||||
|
if xs[i] >= k {
|
||||||
|
ch <- (int16(i) + hd) / 2
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wg.Done()
|
||||||
|
}(int16(i), chunk, k, &wg, retChan)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
close(retChan)
|
||||||
|
var min int16 = (1 << 15) - 1
|
||||||
|
for i := range retChan {
|
||||||
|
if i < min {
|
||||||
|
min = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if min == (1<<15)-1 {
|
||||||
|
return int16(len(xs) / 2)
|
||||||
|
}
|
||||||
|
return min
|
||||||
|
}
|
||||||
|
|
||||||
|
func Binary(keys []uint64, key uint64) int16 {
|
||||||
|
return int16(sort.Search(len(keys), func(i int) bool {
|
||||||
|
if i*2 >= len(keys) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return keys[i*2] >= key
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmp2_native(twos, pk [2]uint64) int16 {
|
||||||
|
if twos[0] == pk[0] {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if twos[1] == pk[1] {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmp4_native(fours, pk [4]uint64) int16 {
|
||||||
|
for i := range fours {
|
||||||
|
if fours[i] >= pk[i] {
|
||||||
|
return int16(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 4
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmp8_native(a [8]uint64, pk [4]uint64) int16 {
|
||||||
|
for i := range a {
|
||||||
|
if a[i] >= pk[0] {
|
||||||
|
return int16(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 8
|
||||||
|
}
|
@ -0,0 +1,51 @@
|
|||||||
|
// +build !amd64
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2020 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package simd
|
||||||
|
|
||||||
|
// Search uses the Clever search to find the correct key.
|
||||||
|
func Search(xs []uint64, k uint64) int16 {
|
||||||
|
if len(xs) < 8 {
|
||||||
|
return Naive(xs, k)
|
||||||
|
}
|
||||||
|
var twos, pk [4]uint64
|
||||||
|
pk[0] = k
|
||||||
|
pk[1] = k
|
||||||
|
pk[2] = k
|
||||||
|
pk[3] = k
|
||||||
|
for i := 0; i < len(xs); i += 8 {
|
||||||
|
twos[0] = xs[i]
|
||||||
|
twos[1] = xs[i+2]
|
||||||
|
twos[2] = xs[i+4]
|
||||||
|
twos[3] = xs[i+6]
|
||||||
|
if twos[0] >= pk[0] {
|
||||||
|
return int16(i / 2)
|
||||||
|
}
|
||||||
|
if twos[1] >= pk[1] {
|
||||||
|
return int16((i + 2) / 2)
|
||||||
|
}
|
||||||
|
if twos[2] >= pk[2] {
|
||||||
|
return int16((i + 4) / 2)
|
||||||
|
}
|
||||||
|
if twos[3] >= pk[3] {
|
||||||
|
return int16((i + 6) / 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return int16(len(xs) / 2)
|
||||||
|
}
|
@ -0,0 +1,60 @@
|
|||||||
|
// Code generated by command: go run asm2.go -out search_amd64.s -stubs stub_search_amd64.go. DO NOT EDIT.
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func Search(xs []uint64, k uint64) int16
|
||||||
|
TEXT ·Search(SB), NOSPLIT, $0-34
|
||||||
|
MOVQ xs_base+0(FP), AX
|
||||||
|
MOVQ xs_len+8(FP), CX
|
||||||
|
MOVQ k+24(FP), DX
|
||||||
|
|
||||||
|
// Save n
|
||||||
|
MOVQ CX, BX
|
||||||
|
|
||||||
|
// Initialize idx register to zero.
|
||||||
|
XORL BP, BP
|
||||||
|
|
||||||
|
loop:
|
||||||
|
// Unroll1
|
||||||
|
CMPQ (AX)(BP*8), DX
|
||||||
|
JAE Found
|
||||||
|
|
||||||
|
// Unroll2
|
||||||
|
CMPQ 16(AX)(BP*8), DX
|
||||||
|
JAE Found2
|
||||||
|
|
||||||
|
// Unroll3
|
||||||
|
CMPQ 32(AX)(BP*8), DX
|
||||||
|
JAE Found3
|
||||||
|
|
||||||
|
// Unroll4
|
||||||
|
CMPQ 48(AX)(BP*8), DX
|
||||||
|
JAE Found4
|
||||||
|
|
||||||
|
// plus8
|
||||||
|
ADDQ $0x08, BP
|
||||||
|
CMPQ BP, CX
|
||||||
|
JB loop
|
||||||
|
JMP NotFound
|
||||||
|
|
||||||
|
Found2:
|
||||||
|
ADDL $0x02, BP
|
||||||
|
JMP Found
|
||||||
|
|
||||||
|
Found3:
|
||||||
|
ADDL $0x04, BP
|
||||||
|
JMP Found
|
||||||
|
|
||||||
|
Found4:
|
||||||
|
ADDL $0x06, BP
|
||||||
|
|
||||||
|
Found:
|
||||||
|
MOVL BP, BX
|
||||||
|
|
||||||
|
NotFound:
|
||||||
|
MOVL BX, BP
|
||||||
|
SHRL $0x1f, BP
|
||||||
|
ADDL BX, BP
|
||||||
|
SHRL $0x01, BP
|
||||||
|
MOVL BP, ret+32(FP)
|
||||||
|
RET
|
@ -0,0 +1,6 @@
|
|||||||
|
// Code generated by command: go run asm2.go -out search_amd64.s -stubs stub_search_amd64.go. DO NOT EDIT.
|
||||||
|
|
||||||
|
package simd
|
||||||
|
|
||||||
|
// Search finds the first idx for which xs[idx] >= k in xs.
|
||||||
|
func Search(xs []uint64, k uint64) int16
|
@ -0,0 +1,151 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2019 Dgraph Labs, Inc. and Contributors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package z
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/cespare/xxhash/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: Figure out a way to re-use memhash for the second uint64 hash, we
|
||||||
|
// already know that appending bytes isn't reliable for generating a
|
||||||
|
// second hash (see Ristretto PR #88).
|
||||||
|
//
|
||||||
|
// We also know that while the Go runtime has a runtime memhash128
|
||||||
|
// function, it's not possible to use it to generate [2]uint64 or
|
||||||
|
// anything resembling a 128bit hash, even though that's exactly what
|
||||||
|
// we need in this situation.
|
||||||
|
func KeyToHash(key interface{}) (uint64, uint64) {
|
||||||
|
if key == nil {
|
||||||
|
return 0, 0
|
||||||
|
}
|
||||||
|
switch k := key.(type) {
|
||||||
|
case uint64:
|
||||||
|
return k, 0
|
||||||
|
case string:
|
||||||
|
return MemHashString(k), xxhash.Sum64String(k)
|
||||||
|
case []byte:
|
||||||
|
return MemHash(k), xxhash.Sum64(k)
|
||||||
|
case byte:
|
||||||
|
return uint64(k), 0
|
||||||
|
case int:
|
||||||
|
return uint64(k), 0
|
||||||
|
case int32:
|
||||||
|
return uint64(k), 0
|
||||||
|
case uint32:
|
||||||
|
return uint64(k), 0
|
||||||
|
case int64:
|
||||||
|
return uint64(k), 0
|
||||||
|
default:
|
||||||
|
panic("Key type not supported")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
dummyCloserChan <-chan struct{}
|
||||||
|
tmpDir string
|
||||||
|
)
|
||||||
|
|
||||||
|
// Closer holds the two things we need to close a goroutine and wait for it to
|
||||||
|
// finish: a chan to tell the goroutine to shut down, and a WaitGroup with
|
||||||
|
// which to wait for it to finish shutting down.
|
||||||
|
type Closer struct {
|
||||||
|
waiting sync.WaitGroup
|
||||||
|
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTmpDir sets the temporary directory for the temporary buffers.
|
||||||
|
func SetTmpDir(dir string) {
|
||||||
|
tmpDir = dir
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCloser constructs a new Closer, with an initial count on the WaitGroup.
|
||||||
|
func NewCloser(initial int) *Closer {
|
||||||
|
ret := &Closer{}
|
||||||
|
ret.ctx, ret.cancel = context.WithCancel(context.Background())
|
||||||
|
ret.waiting.Add(initial)
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRunning Add()'s delta to the WaitGroup.
|
||||||
|
func (lc *Closer) AddRunning(delta int) {
|
||||||
|
lc.waiting.Add(delta)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ctx can be used to get a context, which would automatically get cancelled when Signal is called.
|
||||||
|
func (lc *Closer) Ctx() context.Context {
|
||||||
|
if lc == nil {
|
||||||
|
return context.Background()
|
||||||
|
}
|
||||||
|
return lc.ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal signals the HasBeenClosed signal.
|
||||||
|
func (lc *Closer) Signal() {
|
||||||
|
// Todo(ibrahim): Change Signal to return error on next badger breaking change.
|
||||||
|
lc.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBeenClosed gets signaled when Signal() is called.
|
||||||
|
func (lc *Closer) HasBeenClosed() <-chan struct{} {
|
||||||
|
if lc == nil {
|
||||||
|
return dummyCloserChan
|
||||||
|
}
|
||||||
|
return lc.ctx.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Done calls Done() on the WaitGroup.
|
||||||
|
func (lc *Closer) Done() {
|
||||||
|
if lc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lc.waiting.Done()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait waits on the WaitGroup. (It waits for NewCloser's initial value, AddRunning, and Done
|
||||||
|
// calls to balance out.)
|
||||||
|
func (lc *Closer) Wait() {
|
||||||
|
lc.waiting.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignalAndWait calls Signal(), then Wait().
|
||||||
|
func (lc *Closer) SignalAndWait() {
|
||||||
|
lc.Signal()
|
||||||
|
lc.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ZeroOut zeroes out all the bytes in the range [start, end).
|
||||||
|
func ZeroOut(dst []byte, start, end int) {
|
||||||
|
if start < 0 || start >= len(dst) {
|
||||||
|
return // BAD
|
||||||
|
}
|
||||||
|
if end >= len(dst) {
|
||||||
|
end = len(dst)
|
||||||
|
}
|
||||||
|
if end-start <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
Memclr(dst[start:end])
|
||||||
|
// b := dst[start:end]
|
||||||
|
// for i := range b {
|
||||||
|
// b[i] = 0x0
|
||||||
|
// }
|
||||||
|
}
|
@ -0,0 +1,21 @@
|
|||||||
|
sudo: false
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.3.x
|
||||||
|
- 1.5.x
|
||||||
|
- 1.6.x
|
||||||
|
- 1.7.x
|
||||||
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
- master
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- go: master
|
||||||
|
fast_finish: true
|
||||||
|
install:
|
||||||
|
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||||
|
script:
|
||||||
|
- go get -t -v ./...
|
||||||
|
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||||
|
- go tool vet .
|
||||||
|
- go test -v -race ./...
|
@ -0,0 +1,21 @@
|
|||||||
|
Copyright (c) 2005-2008 Dustin Sallings <dustin@spy.net>
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
|
|
||||||
|
<http://www.opensource.org/licenses/mit-license.php>
|
@ -0,0 +1,124 @@
|
|||||||
|
# Humane Units [![Build Status](https://travis-ci.org/dustin/go-humanize.svg?branch=master)](https://travis-ci.org/dustin/go-humanize) [![GoDoc](https://godoc.org/github.com/dustin/go-humanize?status.svg)](https://godoc.org/github.com/dustin/go-humanize)
|
||||||
|
|
||||||
|
Just a few functions for helping humanize times and sizes.
|
||||||
|
|
||||||
|
`go get` it as `github.com/dustin/go-humanize`, import it as
|
||||||
|
`"github.com/dustin/go-humanize"`, use it as `humanize`.
|
||||||
|
|
||||||
|
See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
|
||||||
|
complete documentation.
|
||||||
|
|
||||||
|
## Sizes
|
||||||
|
|
||||||
|
This lets you take numbers like `82854982` and convert them to useful
|
||||||
|
strings like, `83 MB` or `79 MiB` (whichever you prefer).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("That file is %s.", humanize.Bytes(82854982)) // That file is 83 MB.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Times
|
||||||
|
|
||||||
|
This lets you take a `time.Time` and spit it out in relative terms.
|
||||||
|
For example, `12 seconds ago` or `3 days from now`.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("This was touched %s.", humanize.Time(someTimeInstance)) // This was touched 7 hours ago.
|
||||||
|
```
|
||||||
|
|
||||||
|
Thanks to Kyle Lemons for the time implementation from an IRC
|
||||||
|
conversation one day. It's pretty neat.
|
||||||
|
|
||||||
|
## Ordinals
|
||||||
|
|
||||||
|
From a [mailing list discussion][odisc] where a user wanted to be able
|
||||||
|
to label ordinals.
|
||||||
|
|
||||||
|
0 -> 0th
|
||||||
|
1 -> 1st
|
||||||
|
2 -> 2nd
|
||||||
|
3 -> 3rd
|
||||||
|
4 -> 4th
|
||||||
|
[...]
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("You're my %s best friend.", humanize.Ordinal(193)) // You are my 193rd best friend.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commas
|
||||||
|
|
||||||
|
Want to shove commas into numbers? Be my guest.
|
||||||
|
|
||||||
|
0 -> 0
|
||||||
|
100 -> 100
|
||||||
|
1000 -> 1,000
|
||||||
|
1000000000 -> 1,000,000,000
|
||||||
|
-100000 -> -100,000
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("You owe $%s.\n", humanize.Comma(6582491)) // You owe $6,582,491.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Ftoa
|
||||||
|
|
||||||
|
Nicer float64 formatter that removes trailing zeros.
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Printf("%f", 2.24) // 2.240000
|
||||||
|
fmt.Printf("%s", humanize.Ftoa(2.24)) // 2.24
|
||||||
|
fmt.Printf("%f", 2.0) // 2.000000
|
||||||
|
fmt.Printf("%s", humanize.Ftoa(2.0)) // 2
|
||||||
|
```
|
||||||
|
|
||||||
|
## SI notation
|
||||||
|
|
||||||
|
Format numbers with [SI notation][sinotation].
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```go
|
||||||
|
humanize.SI(0.00000000223, "M") // 2.23 nM
|
||||||
|
```
|
||||||
|
|
||||||
|
## English-specific functions
|
||||||
|
|
||||||
|
The following functions are in the `humanize/english` subpackage.
|
||||||
|
|
||||||
|
### Plurals
|
||||||
|
|
||||||
|
Simple English pluralization
|
||||||
|
|
||||||
|
```go
|
||||||
|
english.PluralWord(1, "object", "") // object
|
||||||
|
english.PluralWord(42, "object", "") // objects
|
||||||
|
english.PluralWord(2, "bus", "") // buses
|
||||||
|
english.PluralWord(99, "locus", "loci") // loci
|
||||||
|
|
||||||
|
english.Plural(1, "object", "") // 1 object
|
||||||
|
english.Plural(42, "object", "") // 42 objects
|
||||||
|
english.Plural(2, "bus", "") // 2 buses
|
||||||
|
english.Plural(99, "locus", "loci") // 99 loci
|
||||||
|
```
|
||||||
|
|
||||||
|
### Word series
|
||||||
|
|
||||||
|
Format comma-separated words lists with conjuctions:
|
||||||
|
|
||||||
|
```go
|
||||||
|
english.WordSeries([]string{"foo"}, "and") // foo
|
||||||
|
english.WordSeries([]string{"foo", "bar"}, "and") // foo and bar
|
||||||
|
english.WordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar and baz
|
||||||
|
|
||||||
|
english.OxfordWordSeries([]string{"foo", "bar", "baz"}, "and") // foo, bar, and baz
|
||||||
|
```
|
||||||
|
|
||||||
|
[odisc]: https://groups.google.com/d/topic/golang-nuts/l8NhI74jl-4/discussion
|
||||||
|
[sinotation]: http://en.wikipedia.org/wiki/Metric_prefix
|
@ -0,0 +1,31 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// order of magnitude (to a max order)
|
||||||
|
func oomm(n, b *big.Int, maxmag int) (float64, int) {
|
||||||
|
mag := 0
|
||||||
|
m := &big.Int{}
|
||||||
|
for n.Cmp(b) >= 0 {
|
||||||
|
n.DivMod(n, b, m)
|
||||||
|
mag++
|
||||||
|
if mag == maxmag && maxmag >= 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||||
|
}
|
||||||
|
|
||||||
|
// total order of magnitude
|
||||||
|
// (same as above, but with no upper limit)
|
||||||
|
func oom(n, b *big.Int) (float64, int) {
|
||||||
|
mag := 0
|
||||||
|
m := &big.Int{}
|
||||||
|
for n.Cmp(b) >= 0 {
|
||||||
|
n.DivMod(n, b, m)
|
||||||
|
mag++
|
||||||
|
}
|
||||||
|
return float64(n.Int64()) + (float64(m.Int64()) / float64(b.Int64())), mag
|
||||||
|
}
|
@ -0,0 +1,173 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bigIECExp = big.NewInt(1024)
|
||||||
|
|
||||||
|
// BigByte is one byte in bit.Ints
|
||||||
|
BigByte = big.NewInt(1)
|
||||||
|
// BigKiByte is 1,024 bytes in bit.Ints
|
||||||
|
BigKiByte = (&big.Int{}).Mul(BigByte, bigIECExp)
|
||||||
|
// BigMiByte is 1,024 k bytes in bit.Ints
|
||||||
|
BigMiByte = (&big.Int{}).Mul(BigKiByte, bigIECExp)
|
||||||
|
// BigGiByte is 1,024 m bytes in bit.Ints
|
||||||
|
BigGiByte = (&big.Int{}).Mul(BigMiByte, bigIECExp)
|
||||||
|
// BigTiByte is 1,024 g bytes in bit.Ints
|
||||||
|
BigTiByte = (&big.Int{}).Mul(BigGiByte, bigIECExp)
|
||||||
|
// BigPiByte is 1,024 t bytes in bit.Ints
|
||||||
|
BigPiByte = (&big.Int{}).Mul(BigTiByte, bigIECExp)
|
||||||
|
// BigEiByte is 1,024 p bytes in bit.Ints
|
||||||
|
BigEiByte = (&big.Int{}).Mul(BigPiByte, bigIECExp)
|
||||||
|
// BigZiByte is 1,024 e bytes in bit.Ints
|
||||||
|
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
|
||||||
|
// BigYiByte is 1,024 z bytes in bit.Ints
|
||||||
|
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
bigSIExp = big.NewInt(1000)
|
||||||
|
|
||||||
|
// BigSIByte is one SI byte in big.Ints
|
||||||
|
BigSIByte = big.NewInt(1)
|
||||||
|
// BigKByte is 1,000 SI bytes in big.Ints
|
||||||
|
BigKByte = (&big.Int{}).Mul(BigSIByte, bigSIExp)
|
||||||
|
// BigMByte is 1,000 SI k bytes in big.Ints
|
||||||
|
BigMByte = (&big.Int{}).Mul(BigKByte, bigSIExp)
|
||||||
|
// BigGByte is 1,000 SI m bytes in big.Ints
|
||||||
|
BigGByte = (&big.Int{}).Mul(BigMByte, bigSIExp)
|
||||||
|
// BigTByte is 1,000 SI g bytes in big.Ints
|
||||||
|
BigTByte = (&big.Int{}).Mul(BigGByte, bigSIExp)
|
||||||
|
// BigPByte is 1,000 SI t bytes in big.Ints
|
||||||
|
BigPByte = (&big.Int{}).Mul(BigTByte, bigSIExp)
|
||||||
|
// BigEByte is 1,000 SI p bytes in big.Ints
|
||||||
|
BigEByte = (&big.Int{}).Mul(BigPByte, bigSIExp)
|
||||||
|
// BigZByte is 1,000 SI e bytes in big.Ints
|
||||||
|
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
|
||||||
|
// BigYByte is 1,000 SI z bytes in big.Ints
|
||||||
|
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
|
||||||
|
)
|
||||||
|
|
||||||
|
var bigBytesSizeTable = map[string]*big.Int{
|
||||||
|
"b": BigByte,
|
||||||
|
"kib": BigKiByte,
|
||||||
|
"kb": BigKByte,
|
||||||
|
"mib": BigMiByte,
|
||||||
|
"mb": BigMByte,
|
||||||
|
"gib": BigGiByte,
|
||||||
|
"gb": BigGByte,
|
||||||
|
"tib": BigTiByte,
|
||||||
|
"tb": BigTByte,
|
||||||
|
"pib": BigPiByte,
|
||||||
|
"pb": BigPByte,
|
||||||
|
"eib": BigEiByte,
|
||||||
|
"eb": BigEByte,
|
||||||
|
"zib": BigZiByte,
|
||||||
|
"zb": BigZByte,
|
||||||
|
"yib": BigYiByte,
|
||||||
|
"yb": BigYByte,
|
||||||
|
// Without suffix
|
||||||
|
"": BigByte,
|
||||||
|
"ki": BigKiByte,
|
||||||
|
"k": BigKByte,
|
||||||
|
"mi": BigMiByte,
|
||||||
|
"m": BigMByte,
|
||||||
|
"gi": BigGiByte,
|
||||||
|
"g": BigGByte,
|
||||||
|
"ti": BigTiByte,
|
||||||
|
"t": BigTByte,
|
||||||
|
"pi": BigPiByte,
|
||||||
|
"p": BigPByte,
|
||||||
|
"ei": BigEiByte,
|
||||||
|
"e": BigEByte,
|
||||||
|
"z": BigZByte,
|
||||||
|
"zi": BigZiByte,
|
||||||
|
"y": BigYByte,
|
||||||
|
"yi": BigYiByte,
|
||||||
|
}
|
||||||
|
|
||||||
|
var ten = big.NewInt(10)
|
||||||
|
|
||||||
|
func humanateBigBytes(s, base *big.Int, sizes []string) string {
|
||||||
|
if s.Cmp(ten) < 0 {
|
||||||
|
return fmt.Sprintf("%d B", s)
|
||||||
|
}
|
||||||
|
c := (&big.Int{}).Set(s)
|
||||||
|
val, mag := oomm(c, base, len(sizes)-1)
|
||||||
|
suffix := sizes[mag]
|
||||||
|
f := "%.0f %s"
|
||||||
|
if val < 10 {
|
||||||
|
f = "%.1f %s"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(f, val, suffix)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigBytes produces a human readable representation of an SI size.
|
||||||
|
//
|
||||||
|
// See also: ParseBigBytes.
|
||||||
|
//
|
||||||
|
// BigBytes(82854982) -> 83 MB
|
||||||
|
func BigBytes(s *big.Int) string {
|
||||||
|
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||||
|
return humanateBigBytes(s, bigSIExp, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigIBytes produces a human readable representation of an IEC size.
|
||||||
|
//
|
||||||
|
// See also: ParseBigBytes.
|
||||||
|
//
|
||||||
|
// BigIBytes(82854982) -> 79 MiB
|
||||||
|
func BigIBytes(s *big.Int) string {
|
||||||
|
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||||
|
return humanateBigBytes(s, bigIECExp, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBigBytes parses a string representation of bytes into the number
|
||||||
|
// of bytes it represents.
|
||||||
|
//
|
||||||
|
// See also: BigBytes, BigIBytes.
|
||||||
|
//
|
||||||
|
// ParseBigBytes("42 MB") -> 42000000, nil
|
||||||
|
// ParseBigBytes("42 mib") -> 44040192, nil
|
||||||
|
func ParseBigBytes(s string) (*big.Int, error) {
|
||||||
|
lastDigit := 0
|
||||||
|
hasComma := false
|
||||||
|
for _, r := range s {
|
||||||
|
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if r == ',' {
|
||||||
|
hasComma = true
|
||||||
|
}
|
||||||
|
lastDigit++
|
||||||
|
}
|
||||||
|
|
||||||
|
num := s[:lastDigit]
|
||||||
|
if hasComma {
|
||||||
|
num = strings.Replace(num, ",", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
val := &big.Rat{}
|
||||||
|
_, err := fmt.Sscanf(num, "%f", val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||||
|
if m, ok := bigBytesSizeTable[extra]; ok {
|
||||||
|
mv := (&big.Rat{}).SetInt(m)
|
||||||
|
val.Mul(val, mv)
|
||||||
|
rv := &big.Int{}
|
||||||
|
rv.Div(val.Num(), val.Denom())
|
||||||
|
return rv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unhandled size name: %v", extra)
|
||||||
|
}
|
@ -0,0 +1,143 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IEC Sizes.
|
||||||
|
// kibis of bits
|
||||||
|
const (
|
||||||
|
Byte = 1 << (iota * 10)
|
||||||
|
KiByte
|
||||||
|
MiByte
|
||||||
|
GiByte
|
||||||
|
TiByte
|
||||||
|
PiByte
|
||||||
|
EiByte
|
||||||
|
)
|
||||||
|
|
||||||
|
// SI Sizes.
|
||||||
|
const (
|
||||||
|
IByte = 1
|
||||||
|
KByte = IByte * 1000
|
||||||
|
MByte = KByte * 1000
|
||||||
|
GByte = MByte * 1000
|
||||||
|
TByte = GByte * 1000
|
||||||
|
PByte = TByte * 1000
|
||||||
|
EByte = PByte * 1000
|
||||||
|
)
|
||||||
|
|
||||||
|
var bytesSizeTable = map[string]uint64{
|
||||||
|
"b": Byte,
|
||||||
|
"kib": KiByte,
|
||||||
|
"kb": KByte,
|
||||||
|
"mib": MiByte,
|
||||||
|
"mb": MByte,
|
||||||
|
"gib": GiByte,
|
||||||
|
"gb": GByte,
|
||||||
|
"tib": TiByte,
|
||||||
|
"tb": TByte,
|
||||||
|
"pib": PiByte,
|
||||||
|
"pb": PByte,
|
||||||
|
"eib": EiByte,
|
||||||
|
"eb": EByte,
|
||||||
|
// Without suffix
|
||||||
|
"": Byte,
|
||||||
|
"ki": KiByte,
|
||||||
|
"k": KByte,
|
||||||
|
"mi": MiByte,
|
||||||
|
"m": MByte,
|
||||||
|
"gi": GiByte,
|
||||||
|
"g": GByte,
|
||||||
|
"ti": TiByte,
|
||||||
|
"t": TByte,
|
||||||
|
"pi": PiByte,
|
||||||
|
"p": PByte,
|
||||||
|
"ei": EiByte,
|
||||||
|
"e": EByte,
|
||||||
|
}
|
||||||
|
|
||||||
|
func logn(n, b float64) float64 {
|
||||||
|
return math.Log(n) / math.Log(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func humanateBytes(s uint64, base float64, sizes []string) string {
|
||||||
|
if s < 10 {
|
||||||
|
return fmt.Sprintf("%d B", s)
|
||||||
|
}
|
||||||
|
e := math.Floor(logn(float64(s), base))
|
||||||
|
suffix := sizes[int(e)]
|
||||||
|
val := math.Floor(float64(s)/math.Pow(base, e)*10+0.5) / 10
|
||||||
|
f := "%.0f %s"
|
||||||
|
if val < 10 {
|
||||||
|
f = "%.1f %s"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(f, val, suffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes produces a human readable representation of an SI size.
|
||||||
|
//
|
||||||
|
// See also: ParseBytes.
|
||||||
|
//
|
||||||
|
// Bytes(82854982) -> 83 MB
|
||||||
|
func Bytes(s uint64) string {
|
||||||
|
sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB"}
|
||||||
|
return humanateBytes(s, 1000, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IBytes produces a human readable representation of an IEC size.
|
||||||
|
//
|
||||||
|
// See also: ParseBytes.
|
||||||
|
//
|
||||||
|
// IBytes(82854982) -> 79 MiB
|
||||||
|
func IBytes(s uint64) string {
|
||||||
|
sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"}
|
||||||
|
return humanateBytes(s, 1024, sizes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseBytes parses a string representation of bytes into the number
|
||||||
|
// of bytes it represents.
|
||||||
|
//
|
||||||
|
// See Also: Bytes, IBytes.
|
||||||
|
//
|
||||||
|
// ParseBytes("42 MB") -> 42000000, nil
|
||||||
|
// ParseBytes("42 mib") -> 44040192, nil
|
||||||
|
func ParseBytes(s string) (uint64, error) {
|
||||||
|
lastDigit := 0
|
||||||
|
hasComma := false
|
||||||
|
for _, r := range s {
|
||||||
|
if !(unicode.IsDigit(r) || r == '.' || r == ',') {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if r == ',' {
|
||||||
|
hasComma = true
|
||||||
|
}
|
||||||
|
lastDigit++
|
||||||
|
}
|
||||||
|
|
||||||
|
num := s[:lastDigit]
|
||||||
|
if hasComma {
|
||||||
|
num = strings.Replace(num, ",", "", -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := strconv.ParseFloat(num, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
|
||||||
|
if m, ok := bytesSizeTable[extra]; ok {
|
||||||
|
f *= float64(m)
|
||||||
|
if f >= math.MaxUint64 {
|
||||||
|
return 0, fmt.Errorf("too large: %v", s)
|
||||||
|
}
|
||||||
|
return uint64(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, fmt.Errorf("unhandled size name: %v", extra)
|
||||||
|
}
|
@ -0,0 +1,116 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Comma produces a string form of the given number in base 10 with
|
||||||
|
// commas after every three orders of magnitude.
|
||||||
|
//
|
||||||
|
// e.g. Comma(834142) -> 834,142
|
||||||
|
func Comma(v int64) string {
|
||||||
|
sign := ""
|
||||||
|
|
||||||
|
// Min int64 can't be negated to a usable value, so it has to be special cased.
|
||||||
|
if v == math.MinInt64 {
|
||||||
|
return "-9,223,372,036,854,775,808"
|
||||||
|
}
|
||||||
|
|
||||||
|
if v < 0 {
|
||||||
|
sign = "-"
|
||||||
|
v = 0 - v
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := []string{"", "", "", "", "", "", ""}
|
||||||
|
j := len(parts) - 1
|
||||||
|
|
||||||
|
for v > 999 {
|
||||||
|
parts[j] = strconv.FormatInt(v%1000, 10)
|
||||||
|
switch len(parts[j]) {
|
||||||
|
case 2:
|
||||||
|
parts[j] = "0" + parts[j]
|
||||||
|
case 1:
|
||||||
|
parts[j] = "00" + parts[j]
|
||||||
|
}
|
||||||
|
v = v / 1000
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
parts[j] = strconv.Itoa(int(v))
|
||||||
|
return sign + strings.Join(parts[j:], ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commaf produces a string form of the given number in base 10 with
|
||||||
|
// commas after every three orders of magnitude.
|
||||||
|
//
|
||||||
|
// e.g. Commaf(834142.32) -> 834,142.32
|
||||||
|
func Commaf(v float64) string {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if v < 0 {
|
||||||
|
buf.Write([]byte{'-'})
|
||||||
|
v = 0 - v
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := []byte{','}
|
||||||
|
|
||||||
|
parts := strings.Split(strconv.FormatFloat(v, 'f', -1, 64), ".")
|
||||||
|
pos := 0
|
||||||
|
if len(parts[0])%3 != 0 {
|
||||||
|
pos += len(parts[0]) % 3
|
||||||
|
buf.WriteString(parts[0][:pos])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
for ; pos < len(parts[0]); pos += 3 {
|
||||||
|
buf.WriteString(parts[0][pos : pos+3])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
buf.Truncate(buf.Len() - 1)
|
||||||
|
|
||||||
|
if len(parts) > 1 {
|
||||||
|
buf.Write([]byte{'.'})
|
||||||
|
buf.WriteString(parts[1])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// CommafWithDigits works like the Commaf but limits the resulting
|
||||||
|
// string to the given number of decimal places.
|
||||||
|
//
|
||||||
|
// e.g. CommafWithDigits(834142.32, 1) -> 834,142.3
|
||||||
|
func CommafWithDigits(f float64, decimals int) string {
|
||||||
|
return stripTrailingDigits(Commaf(f), decimals)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BigComma produces a string form of the given big.Int in base 10
|
||||||
|
// with commas after every three orders of magnitude.
|
||||||
|
func BigComma(b *big.Int) string {
|
||||||
|
sign := ""
|
||||||
|
if b.Sign() < 0 {
|
||||||
|
sign = "-"
|
||||||
|
b.Abs(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
athousand := big.NewInt(1000)
|
||||||
|
c := (&big.Int{}).Set(b)
|
||||||
|
_, m := oom(c, athousand)
|
||||||
|
parts := make([]string, m+1)
|
||||||
|
j := len(parts) - 1
|
||||||
|
|
||||||
|
mod := &big.Int{}
|
||||||
|
for b.Cmp(athousand) >= 0 {
|
||||||
|
b.DivMod(b, athousand, mod)
|
||||||
|
parts[j] = strconv.FormatInt(mod.Int64(), 10)
|
||||||
|
switch len(parts[j]) {
|
||||||
|
case 2:
|
||||||
|
parts[j] = "0" + parts[j]
|
||||||
|
case 1:
|
||||||
|
parts[j] = "00" + parts[j]
|
||||||
|
}
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
parts[j] = strconv.Itoa(int(b.Int64()))
|
||||||
|
return sign + strings.Join(parts[j:], ",")
|
||||||
|
}
|
@ -0,0 +1,40 @@
|
|||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"math/big"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BigCommaf produces a string form of the given big.Float in base 10
|
||||||
|
// with commas after every three orders of magnitude.
|
||||||
|
func BigCommaf(v *big.Float) string {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
if v.Sign() < 0 {
|
||||||
|
buf.Write([]byte{'-'})
|
||||||
|
v.Abs(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
comma := []byte{','}
|
||||||
|
|
||||||
|
parts := strings.Split(v.Text('f', -1), ".")
|
||||||
|
pos := 0
|
||||||
|
if len(parts[0])%3 != 0 {
|
||||||
|
pos += len(parts[0]) % 3
|
||||||
|
buf.WriteString(parts[0][:pos])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
for ; pos < len(parts[0]); pos += 3 {
|
||||||
|
buf.WriteString(parts[0][pos : pos+3])
|
||||||
|
buf.Write(comma)
|
||||||
|
}
|
||||||
|
buf.Truncate(buf.Len() - 1)
|
||||||
|
|
||||||
|
if len(parts) > 1 {
|
||||||
|
buf.Write([]byte{'.'})
|
||||||
|
buf.WriteString(parts[1])
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
@ -0,0 +1,46 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func stripTrailingZeros(s string) string {
|
||||||
|
offset := len(s) - 1
|
||||||
|
for offset > 0 {
|
||||||
|
if s[offset] == '.' {
|
||||||
|
offset--
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if s[offset] != '0' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
offset--
|
||||||
|
}
|
||||||
|
return s[:offset+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
func stripTrailingDigits(s string, digits int) string {
|
||||||
|
if i := strings.Index(s, "."); i >= 0 {
|
||||||
|
if digits <= 0 {
|
||||||
|
return s[:i]
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
if i+digits >= len(s) {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return s[:i+digits]
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ftoa converts a float to a string with no trailing zeros.
|
||||||
|
func Ftoa(num float64) string {
|
||||||
|
return stripTrailingZeros(strconv.FormatFloat(num, 'f', 6, 64))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FtoaWithDigits converts a float to a string but limits the resulting string
|
||||||
|
// to the given number of decimal places, and no trailing zeros.
|
||||||
|
func FtoaWithDigits(num float64, digits int) string {
|
||||||
|
return stripTrailingZeros(stripTrailingDigits(strconv.FormatFloat(num, 'f', 6, 64), digits))
|
||||||
|
}
|
@ -0,0 +1,8 @@
|
|||||||
|
/*
|
||||||
|
Package humanize converts boring ugly numbers to human-friendly strings and back.
|
||||||
|
|
||||||
|
Durations can be turned into strings such as "3 days ago", numbers
|
||||||
|
representing sizes like 82854982 into useful strings like, "83 MB" or
|
||||||
|
"79 MiB" (whichever you prefer).
|
||||||
|
*/
|
||||||
|
package humanize
|
@ -0,0 +1,25 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
// Ordinal gives you the input number in a rank/ordinal format.
|
||||||
|
//
|
||||||
|
// Ordinal(3) -> 3rd
|
||||||
|
func Ordinal(x int) string {
|
||||||
|
suffix := "th"
|
||||||
|
switch x % 10 {
|
||||||
|
case 1:
|
||||||
|
if x%100 != 11 {
|
||||||
|
suffix = "st"
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if x%100 != 12 {
|
||||||
|
suffix = "nd"
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
if x%100 != 13 {
|
||||||
|
suffix = "rd"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strconv.Itoa(x) + suffix
|
||||||
|
}
|
@ -0,0 +1,123 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"math"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
var siPrefixTable = map[float64]string{
|
||||||
|
-24: "y", // yocto
|
||||||
|
-21: "z", // zepto
|
||||||
|
-18: "a", // atto
|
||||||
|
-15: "f", // femto
|
||||||
|
-12: "p", // pico
|
||||||
|
-9: "n", // nano
|
||||||
|
-6: "µ", // micro
|
||||||
|
-3: "m", // milli
|
||||||
|
0: "",
|
||||||
|
3: "k", // kilo
|
||||||
|
6: "M", // mega
|
||||||
|
9: "G", // giga
|
||||||
|
12: "T", // tera
|
||||||
|
15: "P", // peta
|
||||||
|
18: "E", // exa
|
||||||
|
21: "Z", // zetta
|
||||||
|
24: "Y", // yotta
|
||||||
|
}
|
||||||
|
|
||||||
|
var revSIPrefixTable = revfmap(siPrefixTable)
|
||||||
|
|
||||||
|
// revfmap reverses the map and precomputes the power multiplier
|
||||||
|
func revfmap(in map[float64]string) map[string]float64 {
|
||||||
|
rv := map[string]float64{}
|
||||||
|
for k, v := range in {
|
||||||
|
rv[v] = math.Pow(10, k)
|
||||||
|
}
|
||||||
|
return rv
|
||||||
|
}
|
||||||
|
|
||||||
|
var riParseRegex *regexp.Regexp
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
ri := `^([\-0-9.]+)\s?([`
|
||||||
|
for _, v := range siPrefixTable {
|
||||||
|
ri += v
|
||||||
|
}
|
||||||
|
ri += `]?)(.*)`
|
||||||
|
|
||||||
|
riParseRegex = regexp.MustCompile(ri)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ComputeSI finds the most appropriate SI prefix for the given number
|
||||||
|
// and returns the prefix along with the value adjusted to be within
|
||||||
|
// that prefix.
|
||||||
|
//
|
||||||
|
// See also: SI, ParseSI.
|
||||||
|
//
|
||||||
|
// e.g. ComputeSI(2.2345e-12) -> (2.2345, "p")
|
||||||
|
func ComputeSI(input float64) (float64, string) {
|
||||||
|
if input == 0 {
|
||||||
|
return 0, ""
|
||||||
|
}
|
||||||
|
mag := math.Abs(input)
|
||||||
|
exponent := math.Floor(logn(mag, 10))
|
||||||
|
exponent = math.Floor(exponent/3) * 3
|
||||||
|
|
||||||
|
value := mag / math.Pow(10, exponent)
|
||||||
|
|
||||||
|
// Handle special case where value is exactly 1000.0
|
||||||
|
// Should return 1 M instead of 1000 k
|
||||||
|
if value == 1000.0 {
|
||||||
|
exponent += 3
|
||||||
|
value = mag / math.Pow(10, exponent)
|
||||||
|
}
|
||||||
|
|
||||||
|
value = math.Copysign(value, input)
|
||||||
|
|
||||||
|
prefix := siPrefixTable[exponent]
|
||||||
|
return value, prefix
|
||||||
|
}
|
||||||
|
|
||||||
|
// SI returns a string with default formatting.
|
||||||
|
//
|
||||||
|
// SI uses Ftoa to format float value, removing trailing zeros.
|
||||||
|
//
|
||||||
|
// See also: ComputeSI, ParseSI.
|
||||||
|
//
|
||||||
|
// e.g. SI(1000000, "B") -> 1 MB
|
||||||
|
// e.g. SI(2.2345e-12, "F") -> 2.2345 pF
|
||||||
|
func SI(input float64, unit string) string {
|
||||||
|
value, prefix := ComputeSI(input)
|
||||||
|
return Ftoa(value) + " " + prefix + unit
|
||||||
|
}
|
||||||
|
|
||||||
|
// SIWithDigits works like SI but limits the resulting string to the
|
||||||
|
// given number of decimal places.
|
||||||
|
//
|
||||||
|
// e.g. SIWithDigits(1000000, 0, "B") -> 1 MB
|
||||||
|
// e.g. SIWithDigits(2.2345e-12, 2, "F") -> 2.23 pF
|
||||||
|
func SIWithDigits(input float64, decimals int, unit string) string {
|
||||||
|
value, prefix := ComputeSI(input)
|
||||||
|
return FtoaWithDigits(value, decimals) + " " + prefix + unit
|
||||||
|
}
|
||||||
|
|
||||||
|
var errInvalid = errors.New("invalid input")
|
||||||
|
|
||||||
|
// ParseSI parses an SI string back into the number and unit.
|
||||||
|
//
|
||||||
|
// See also: SI, ComputeSI.
|
||||||
|
//
|
||||||
|
// e.g. ParseSI("2.2345 pF") -> (2.2345e-12, "F", nil)
|
||||||
|
func ParseSI(input string) (float64, string, error) {
|
||||||
|
found := riParseRegex.FindStringSubmatch(input)
|
||||||
|
if len(found) != 4 {
|
||||||
|
return 0, "", errInvalid
|
||||||
|
}
|
||||||
|
mag := revSIPrefixTable[found[2]]
|
||||||
|
unit := found[3]
|
||||||
|
|
||||||
|
base, err := strconv.ParseFloat(found[1], 64)
|
||||||
|
return base * mag, unit, err
|
||||||
|
}
|
@ -0,0 +1,117 @@
|
|||||||
|
package humanize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Seconds-based time units
|
||||||
|
const (
|
||||||
|
Day = 24 * time.Hour
|
||||||
|
Week = 7 * Day
|
||||||
|
Month = 30 * Day
|
||||||
|
Year = 12 * Month
|
||||||
|
LongTime = 37 * Year
|
||||||
|
)
|
||||||
|
|
||||||
|
// Time formats a time into a relative string.
|
||||||
|
//
|
||||||
|
// Time(someT) -> "3 weeks ago"
|
||||||
|
func Time(then time.Time) string {
|
||||||
|
return RelTime(then, time.Now(), "ago", "from now")
|
||||||
|
}
|
||||||
|
|
||||||
|
// A RelTimeMagnitude struct contains a relative time point at which
|
||||||
|
// the relative format of time will switch to a new format string. A
|
||||||
|
// slice of these in ascending order by their "D" field is passed to
|
||||||
|
// CustomRelTime to format durations.
|
||||||
|
//
|
||||||
|
// The Format field is a string that may contain a "%s" which will be
|
||||||
|
// replaced with the appropriate signed label (e.g. "ago" or "from
|
||||||
|
// now") and a "%d" that will be replaced by the quantity.
|
||||||
|
//
|
||||||
|
// The DivBy field is the amount of time the time difference must be
|
||||||
|
// divided by in order to display correctly.
|
||||||
|
//
|
||||||
|
// e.g. if D is 2*time.Minute and you want to display "%d minutes %s"
|
||||||
|
// DivBy should be time.Minute so whatever the duration is will be
|
||||||
|
// expressed in minutes.
|
||||||
|
type RelTimeMagnitude struct {
|
||||||
|
D time.Duration
|
||||||
|
Format string
|
||||||
|
DivBy time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultMagnitudes = []RelTimeMagnitude{
|
||||||
|
{time.Second, "now", time.Second},
|
||||||
|
{2 * time.Second, "1 second %s", 1},
|
||||||
|
{time.Minute, "%d seconds %s", time.Second},
|
||||||
|
{2 * time.Minute, "1 minute %s", 1},
|
||||||
|
{time.Hour, "%d minutes %s", time.Minute},
|
||||||
|
{2 * time.Hour, "1 hour %s", 1},
|
||||||
|
{Day, "%d hours %s", time.Hour},
|
||||||
|
{2 * Day, "1 day %s", 1},
|
||||||
|
{Week, "%d days %s", Day},
|
||||||
|
{2 * Week, "1 week %s", 1},
|
||||||
|
{Month, "%d weeks %s", Week},
|
||||||
|
{2 * Month, "1 month %s", 1},
|
||||||
|
{Year, "%d months %s", Month},
|
||||||
|
{18 * Month, "1 year %s", 1},
|
||||||
|
{2 * Year, "2 years %s", 1},
|
||||||
|
{LongTime, "%d years %s", Year},
|
||||||
|
{math.MaxInt64, "a long while %s", 1},
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelTime formats a time into a relative string.
|
||||||
|
//
|
||||||
|
// It takes two times and two labels. In addition to the generic time
|
||||||
|
// delta string (e.g. 5 minutes), the labels are used applied so that
|
||||||
|
// the label corresponding to the smaller time is applied.
|
||||||
|
//
|
||||||
|
// RelTime(timeInPast, timeInFuture, "earlier", "later") -> "3 weeks earlier"
|
||||||
|
func RelTime(a, b time.Time, albl, blbl string) string {
|
||||||
|
return CustomRelTime(a, b, albl, blbl, defaultMagnitudes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CustomRelTime formats a time into a relative string.
|
||||||
|
//
|
||||||
|
// It takes two times two labels and a table of relative time formats.
|
||||||
|
// In addition to the generic time delta string (e.g. 5 minutes), the
|
||||||
|
// labels are used applied so that the label corresponding to the
|
||||||
|
// smaller time is applied.
|
||||||
|
func CustomRelTime(a, b time.Time, albl, blbl string, magnitudes []RelTimeMagnitude) string {
|
||||||
|
lbl := albl
|
||||||
|
diff := b.Sub(a)
|
||||||
|
|
||||||
|
if a.After(b) {
|
||||||
|
lbl = blbl
|
||||||
|
diff = a.Sub(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := sort.Search(len(magnitudes), func(i int) bool {
|
||||||
|
return magnitudes[i].D > diff
|
||||||
|
})
|
||||||
|
|
||||||
|
if n >= len(magnitudes) {
|
||||||
|
n = len(magnitudes) - 1
|
||||||
|
}
|
||||||
|
mag := magnitudes[n]
|
||||||
|
args := []interface{}{}
|
||||||
|
escaped := false
|
||||||
|
for _, ch := range mag.Format {
|
||||||
|
if escaped {
|
||||||
|
switch ch {
|
||||||
|
case 's':
|
||||||
|
args = append(args, lbl)
|
||||||
|
case 'd':
|
||||||
|
args = append(args, diff/mag.DivBy)
|
||||||
|
}
|
||||||
|
escaped = false
|
||||||
|
} else {
|
||||||
|
escaped = ch == '%'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(mag.Format, args...)
|
||||||
|
}
|
@ -0,0 +1,26 @@
|
|||||||
|
language: go
|
||||||
|
sudo: false
|
||||||
|
go:
|
||||||
|
- 1.8.x
|
||||||
|
- 1.9.x
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
|
- master
|
||||||
|
|
||||||
|
git:
|
||||||
|
depth: 10
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
|
include:
|
||||||
|
- go: 1.11.x
|
||||||
|
env: GO111MODULE=on
|
||||||
|
- go: 1.12.x
|
||||||
|
env: GO111MODULE=on
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -v -covermode=count -coverprofile=coverage.out
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
@ -0,0 +1,21 @@
|
|||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Manuel Martínez-Almeida
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
@ -0,0 +1,58 @@
|
|||||||
|
# Server-Sent Events
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/gin-contrib/sse?status.svg)](https://godoc.org/github.com/gin-contrib/sse)
|
||||||
|
[![Build Status](https://travis-ci.org/gin-contrib/sse.svg)](https://travis-ci.org/gin-contrib/sse)
|
||||||
|
[![codecov](https://codecov.io/gh/gin-contrib/sse/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-contrib/sse)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/github.com/gin-contrib/sse)](https://goreportcard.com/report/github.com/gin-contrib/sse)
|
||||||
|
|
||||||
|
Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/).
|
||||||
|
|
||||||
|
- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/)
|
||||||
|
- [Browser support](http://caniuse.com/#feat=eventsource)
|
||||||
|
|
||||||
|
## Sample code
|
||||||
|
|
||||||
|
```go
|
||||||
|
import "github.com/gin-contrib/sse"
|
||||||
|
|
||||||
|
func httpHandler(w http.ResponseWriter, req *http.Request) {
|
||||||
|
// data can be a primitive like a string, an integer or a float
|
||||||
|
sse.Encode(w, sse.Event{
|
||||||
|
Event: "message",
|
||||||
|
Data: "some data\nmore data",
|
||||||
|
})
|
||||||
|
|
||||||
|
// also a complex type, like a map, a struct or a slice
|
||||||
|
sse.Encode(w, sse.Event{
|
||||||
|
Id: "124",
|
||||||
|
Event: "message",
|
||||||
|
Data: map[string]interface{}{
|
||||||
|
"user": "manu",
|
||||||
|
"date": time.Now().Unix(),
|
||||||
|
"content": "hi!",
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
```
|
||||||
|
event: message
|
||||||
|
data: some data\\nmore data
|
||||||
|
|
||||||
|
id: 124
|
||||||
|
event: message
|
||||||
|
data: {"content":"hi!","date":1431540810,"user":"manu"}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Content-Type
|
||||||
|
|
||||||
|
```go
|
||||||
|
fmt.Println(sse.ContentType)
|
||||||
|
```
|
||||||
|
```
|
||||||
|
text/event-stream
|
||||||
|
```
|
||||||
|
|
||||||
|
## Decoding support
|
||||||
|
|
||||||
|
There is a client-side implementation of SSE coming soon.
|
@ -0,0 +1,116 @@
|
|||||||
|
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package sse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type decoder struct {
|
||||||
|
events []Event
|
||||||
|
}
|
||||||
|
|
||||||
|
func Decode(r io.Reader) ([]Event, error) {
|
||||||
|
var dec decoder
|
||||||
|
return dec.decode(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) dispatchEvent(event Event, data string) {
|
||||||
|
dataLength := len(data)
|
||||||
|
if dataLength > 0 {
|
||||||
|
//If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer.
|
||||||
|
data = data[:dataLength-1]
|
||||||
|
dataLength--
|
||||||
|
}
|
||||||
|
if dataLength == 0 && event.Event == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if event.Event == "" {
|
||||||
|
event.Event = "message"
|
||||||
|
}
|
||||||
|
event.Data = data
|
||||||
|
d.events = append(d.events, event)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *decoder) decode(r io.Reader) ([]Event, error) {
|
||||||
|
buf, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var currentEvent Event
|
||||||
|
var dataBuffer *bytes.Buffer = new(bytes.Buffer)
|
||||||
|
// TODO (and unit tests)
|
||||||
|
// Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair,
|
||||||
|
// a single U+000A LINE FEED (LF) character,
|
||||||
|
// or a single U+000D CARRIAGE RETURN (CR) character.
|
||||||
|
lines := bytes.Split(buf, []byte{'\n'})
|
||||||
|
for _, line := range lines {
|
||||||
|
if len(line) == 0 {
|
||||||
|
// If the line is empty (a blank line). Dispatch the event.
|
||||||
|
d.dispatchEvent(currentEvent, dataBuffer.String())
|
||||||
|
|
||||||
|
// reset current event and data buffer
|
||||||
|
currentEvent = Event{}
|
||||||
|
dataBuffer.Reset()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if line[0] == byte(':') {
|
||||||
|
// If the line starts with a U+003A COLON character (:), ignore the line.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var field, value []byte
|
||||||
|
colonIndex := bytes.IndexRune(line, ':')
|
||||||
|
if colonIndex != -1 {
|
||||||
|
// If the line contains a U+003A COLON character character (:)
|
||||||
|
// Collect the characters on the line before the first U+003A COLON character (:),
|
||||||
|
// and let field be that string.
|
||||||
|
field = line[:colonIndex]
|
||||||
|
// Collect the characters on the line after the first U+003A COLON character (:),
|
||||||
|
// and let value be that string.
|
||||||
|
value = line[colonIndex+1:]
|
||||||
|
// If value starts with a single U+0020 SPACE character, remove it from value.
|
||||||
|
if len(value) > 0 && value[0] == ' ' {
|
||||||
|
value = value[1:]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Otherwise, the string is not empty but does not contain a U+003A COLON character character (:)
|
||||||
|
// Use the whole line as the field name, and the empty string as the field value.
|
||||||
|
field = line
|
||||||
|
value = []byte{}
|
||||||
|
}
|
||||||
|
// The steps to process the field given a field name and a field value depend on the field name,
|
||||||
|
// as given in the following list. Field names must be compared literally,
|
||||||
|
// with no case folding performed.
|
||||||
|
switch string(field) {
|
||||||
|
case "event":
|
||||||
|
// Set the event name buffer to field value.
|
||||||
|
currentEvent.Event = string(value)
|
||||||
|
case "id":
|
||||||
|
// Set the event stream's last event ID to the field value.
|
||||||
|
currentEvent.Id = string(value)
|
||||||
|
case "retry":
|
||||||
|
// If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9),
|
||||||
|
// then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer.
|
||||||
|
// Otherwise, ignore the field.
|
||||||
|
currentEvent.Id = string(value)
|
||||||
|
case "data":
|
||||||
|
// Append the field value to the data buffer,
|
||||||
|
dataBuffer.Write(value)
|
||||||
|
// then append a single U+000A LINE FEED (LF) character to the data buffer.
|
||||||
|
dataBuffer.WriteString("\n")
|
||||||
|
default:
|
||||||
|
//Otherwise. The field is ignored.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Once the end of the file is reached, the user agent must dispatch the event one final time.
|
||||||
|
d.dispatchEvent(currentEvent, dataBuffer.String())
|
||||||
|
|
||||||
|
return d.events, nil
|
||||||
|
}
|
@ -0,0 +1,110 @@
|
|||||||
|
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package sse
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Server-Sent Events
|
||||||
|
// W3C Working Draft 29 October 2009
|
||||||
|
// http://www.w3.org/TR/2009/WD-eventsource-20091029/
|
||||||
|
|
||||||
|
const ContentType = "text/event-stream"
|
||||||
|
|
||||||
|
var contentType = []string{ContentType}
|
||||||
|
var noCache = []string{"no-cache"}
|
||||||
|
|
||||||
|
var fieldReplacer = strings.NewReplacer(
|
||||||
|
"\n", "\\n",
|
||||||
|
"\r", "\\r")
|
||||||
|
|
||||||
|
var dataReplacer = strings.NewReplacer(
|
||||||
|
"\n", "\ndata:",
|
||||||
|
"\r", "\\r")
|
||||||
|
|
||||||
|
type Event struct {
|
||||||
|
Event string
|
||||||
|
Id string
|
||||||
|
Retry uint
|
||||||
|
Data interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Encode(writer io.Writer, event Event) error {
|
||||||
|
w := checkWriter(writer)
|
||||||
|
writeId(w, event.Id)
|
||||||
|
writeEvent(w, event.Event)
|
||||||
|
writeRetry(w, event.Retry)
|
||||||
|
return writeData(w, event.Data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeId(w stringWriter, id string) {
|
||||||
|
if len(id) > 0 {
|
||||||
|
w.WriteString("id:")
|
||||||
|
fieldReplacer.WriteString(w, id)
|
||||||
|
w.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeEvent(w stringWriter, event string) {
|
||||||
|
if len(event) > 0 {
|
||||||
|
w.WriteString("event:")
|
||||||
|
fieldReplacer.WriteString(w, event)
|
||||||
|
w.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeRetry(w stringWriter, retry uint) {
|
||||||
|
if retry > 0 {
|
||||||
|
w.WriteString("retry:")
|
||||||
|
w.WriteString(strconv.FormatUint(uint64(retry), 10))
|
||||||
|
w.WriteString("\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeData(w stringWriter, data interface{}) error {
|
||||||
|
w.WriteString("data:")
|
||||||
|
switch kindOfData(data) {
|
||||||
|
case reflect.Struct, reflect.Slice, reflect.Map:
|
||||||
|
err := json.NewEncoder(w).Encode(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.WriteString("\n")
|
||||||
|
default:
|
||||||
|
dataReplacer.WriteString(w, fmt.Sprint(data))
|
||||||
|
w.WriteString("\n\n")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Event) Render(w http.ResponseWriter) error {
|
||||||
|
r.WriteContentType(w)
|
||||||
|
return Encode(w, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r Event) WriteContentType(w http.ResponseWriter) {
|
||||||
|
header := w.Header()
|
||||||
|
header["Content-Type"] = contentType
|
||||||
|
|
||||||
|
if _, exist := header["Cache-Control"]; !exist {
|
||||||
|
header["Cache-Control"] = noCache
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func kindOfData(data interface{}) reflect.Kind {
|
||||||
|
value := reflect.ValueOf(data)
|
||||||
|
valueType := value.Kind()
|
||||||
|
if valueType == reflect.Ptr {
|
||||||
|
valueType = value.Elem().Kind()
|
||||||
|
}
|
||||||
|
return valueType
|
||||||
|
}
|
@ -0,0 +1,24 @@
|
|||||||
|
package sse
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
type stringWriter interface {
|
||||||
|
io.Writer
|
||||||
|
WriteString(string) (int, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type stringWrapper struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w stringWrapper) WriteString(str string) (int, error) {
|
||||||
|
return w.Writer.Write([]byte(str))
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkWriter(writer io.Writer) stringWriter {
|
||||||
|
if w, ok := writer.(stringWriter); ok {
|
||||||
|
return w
|
||||||
|
} else {
|
||||||
|
return stringWrapper{writer}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,7 @@
|
|||||||
|
vendor/*
|
||||||
|
!vendor/vendor.json
|
||||||
|
coverage.out
|
||||||
|
count.out
|
||||||
|
test
|
||||||
|
profile.out
|
||||||
|
tmp.out
|
@ -0,0 +1,48 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
|
include:
|
||||||
|
- go: 1.13.x
|
||||||
|
- go: 1.13.x
|
||||||
|
env:
|
||||||
|
- TESTTAGS=nomsgpack
|
||||||
|
- go: 1.14.x
|
||||||
|
- go: 1.14.x
|
||||||
|
env:
|
||||||
|
- TESTTAGS=nomsgpack
|
||||||
|
- go: 1.15.x
|
||||||
|
- go: 1.15.x
|
||||||
|
env:
|
||||||
|
- TESTTAGS=nomsgpack
|
||||||
|
- go: master
|
||||||
|
|
||||||
|
git:
|
||||||
|
depth: 10
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
|
||||||
|
|
||||||
|
install:
|
||||||
|
- if [[ "${GO111MODULE}" = "on" ]]; then go mod download; fi
|
||||||
|
- if [[ "${GO111MODULE}" = "on" ]]; then export PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"; fi
|
||||||
|
- if [[ "${GO111MODULE}" = "on" ]]; then make tools; fi
|
||||||
|
|
||||||
|
go_import_path: github.com/gin-gonic/gin
|
||||||
|
|
||||||
|
script:
|
||||||
|
- make vet
|
||||||
|
- make fmt-check
|
||||||
|
- make misspell-check
|
||||||
|
- make test
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- bash <(curl -s https://codecov.io/bash)
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
webhooks:
|
||||||
|
urls:
|
||||||
|
- https://webhooks.gitter.im/e/7f95bf605c4d356372f4
|
||||||
|
on_success: change # options: [always|never|change] default: always
|
||||||
|
on_failure: always # options: [always|never|change] default: always
|
||||||
|
on_start: false # default: false
|
@ -0,0 +1,233 @@
|
|||||||
|
List of all the awesome people working to make Gin the best Web Framework in Go.
|
||||||
|
|
||||||
|
## gin 1.x series authors
|
||||||
|
|
||||||
|
**Gin Core Team:** Bo-Yi Wu (@appleboy), 田欧 (@thinkerou), Javier Provecho (@javierprovecho)
|
||||||
|
|
||||||
|
## gin 0.x series authors
|
||||||
|
|
||||||
|
**Maintainers:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho)
|
||||||
|
|
||||||
|
People and companies, who have contributed, in alphabetical order.
|
||||||
|
|
||||||
|
**@858806258 (杰哥)**
|
||||||
|
- Fix typo in example
|
||||||
|
|
||||||
|
|
||||||
|
**@achedeuzot (Klemen Sever)**
|
||||||
|
- Fix newline debug printing
|
||||||
|
|
||||||
|
|
||||||
|
**@adammck (Adam Mckaig)**
|
||||||
|
- Add MIT license
|
||||||
|
|
||||||
|
|
||||||
|
**@AlexanderChen1989 (Alexander)**
|
||||||
|
- Typos in README
|
||||||
|
|
||||||
|
|
||||||
|
**@alexanderdidenko (Aleksandr Didenko)**
|
||||||
|
- Add support multipart/form-data
|
||||||
|
|
||||||
|
|
||||||
|
**@alexandernyquist (Alexander Nyquist)**
|
||||||
|
- Using template.Must to fix multiple return issue
|
||||||
|
- ★ Added support for OPTIONS verb
|
||||||
|
- ★ Setting response headers before calling WriteHeader
|
||||||
|
- Improved documentation for model binding
|
||||||
|
- ★ Added Content.Redirect()
|
||||||
|
- ★ Added tons of Unit tests
|
||||||
|
|
||||||
|
|
||||||
|
**@austinheap (Austin Heap)**
|
||||||
|
- Added travis CI integration
|
||||||
|
|
||||||
|
|
||||||
|
**@andredublin (Andre Dublin)**
|
||||||
|
- Fix typo in comment
|
||||||
|
|
||||||
|
|
||||||
|
**@bredov (Ludwig Valda Vasquez)**
|
||||||
|
- Fix html templating in debug mode
|
||||||
|
|
||||||
|
|
||||||
|
**@bluele (Jun Kimura)**
|
||||||
|
- Fixes code examples in README
|
||||||
|
|
||||||
|
|
||||||
|
**@chad-russell**
|
||||||
|
- ★ Support for serializing gin.H into XML
|
||||||
|
|
||||||
|
|
||||||
|
**@dickeyxxx (Jeff Dickey)**
|
||||||
|
- Typos in README
|
||||||
|
- Add example about serving static files
|
||||||
|
|
||||||
|
|
||||||
|
**@donileo (Adonis)**
|
||||||
|
- Add NoMethod handler
|
||||||
|
|
||||||
|
|
||||||
|
**@dutchcoders (DutchCoders)**
|
||||||
|
- ★ Fix security bug that allows client to spoof ip
|
||||||
|
- Fix typo. r.HTMLTemplates -> SetHTMLTemplate
|
||||||
|
|
||||||
|
|
||||||
|
**@el3ctro- (Joshua Loper)**
|
||||||
|
- Fix typo in example
|
||||||
|
|
||||||
|
|
||||||
|
**@ethankan (Ethan Kan)**
|
||||||
|
- Unsigned integers in binding
|
||||||
|
|
||||||
|
|
||||||
|
**(Evgeny Persienko)**
|
||||||
|
- Validate sub structures
|
||||||
|
|
||||||
|
|
||||||
|
**@frankbille (Frank Bille)**
|
||||||
|
- Add support for HTTP Realm Auth
|
||||||
|
|
||||||
|
|
||||||
|
**@fmd (Fareed Dudhia)**
|
||||||
|
- Fix typo. SetHTTPTemplate -> SetHTMLTemplate
|
||||||
|
|
||||||
|
|
||||||
|
**@ironiridis (Christopher Harrington)**
|
||||||
|
- Remove old reference
|
||||||
|
|
||||||
|
|
||||||
|
**@jammie-stackhouse (Jamie Stackhouse)**
|
||||||
|
- Add more shortcuts for router methods
|
||||||
|
|
||||||
|
|
||||||
|
**@jasonrhansen**
|
||||||
|
- Fix spelling and grammar errors in documentation
|
||||||
|
|
||||||
|
|
||||||
|
**@JasonSoft (Jason Lee)**
|
||||||
|
- Fix typo in comment
|
||||||
|
|
||||||
|
|
||||||
|
**@joiggama (Ignacio Galindo)**
|
||||||
|
- Add utf-8 charset header on renders
|
||||||
|
|
||||||
|
|
||||||
|
**@julienschmidt (Julien Schmidt)**
|
||||||
|
- gofmt the code examples
|
||||||
|
|
||||||
|
|
||||||
|
**@kelcecil (Kel Cecil)**
|
||||||
|
- Fix readme typo
|
||||||
|
|
||||||
|
|
||||||
|
**@kyledinh (Kyle Dinh)**
|
||||||
|
- Adds RunTLS()
|
||||||
|
|
||||||
|
|
||||||
|
**@LinusU (Linus Unnebäck)**
|
||||||
|
- Small fixes in README
|
||||||
|
|
||||||
|
|
||||||
|
**@loongmxbt (Saint Asky)**
|
||||||
|
- Fix typo in example
|
||||||
|
|
||||||
|
|
||||||
|
**@lucas-clemente (Lucas Clemente)**
|
||||||
|
- ★ work around path.Join removing trailing slashes from routes
|
||||||
|
|
||||||
|
|
||||||
|
**@mattn (Yasuhiro Matsumoto)**
|
||||||
|
- Improve color logger
|
||||||
|
|
||||||
|
|
||||||
|
**@mdigger (Dmitry Sedykh)**
|
||||||
|
- Fixes Form binding when content-type is x-www-form-urlencoded
|
||||||
|
- No repeat call c.Writer.Status() in gin.Logger
|
||||||
|
- Fixes Content-Type for json render
|
||||||
|
|
||||||
|
|
||||||
|
**@mirzac (Mirza Ceric)**
|
||||||
|
- Fix debug printing
|
||||||
|
|
||||||
|
|
||||||
|
**@mopemope (Yutaka Matsubara)**
|
||||||
|
- ★ Adds Godep support (Dependencies Manager)
|
||||||
|
- Fix variadic parameter in the flexible render API
|
||||||
|
- Fix Corrupted plain render
|
||||||
|
- Add Pluggable View Renderer Example
|
||||||
|
|
||||||
|
|
||||||
|
**@msemenistyi (Mykyta Semenistyi)**
|
||||||
|
- update Readme.md. Add code to String method
|
||||||
|
|
||||||
|
|
||||||
|
**@msoedov (Sasha Myasoedov)**
|
||||||
|
- ★ Adds tons of unit tests.
|
||||||
|
|
||||||
|
|
||||||
|
**@ngerakines (Nick Gerakines)**
|
||||||
|
- ★ Improves API, c.GET() doesn't panic
|
||||||
|
- Adds MustGet() method
|
||||||
|
|
||||||
|
|
||||||
|
**@r8k (Rajiv Kilaparti)**
|
||||||
|
- Fix Port usage in README.
|
||||||
|
|
||||||
|
|
||||||
|
**@rayrod2030 (Ray Rodriguez)**
|
||||||
|
- Fix typo in example
|
||||||
|
|
||||||
|
|
||||||
|
**@rns**
|
||||||
|
- Fix typo in example
|
||||||
|
|
||||||
|
|
||||||
|
**@RobAWilkinson (Robert Wilkinson)**
|
||||||
|
- Add example of forms and params
|
||||||
|
|
||||||
|
|
||||||
|
**@rogierlommers (Rogier Lommers)**
|
||||||
|
- Add updated static serve example
|
||||||
|
|
||||||
|
**@rw-access (Ross Wolf)**
|
||||||
|
- Added support to mix exact and param routes
|
||||||
|
|
||||||
|
**@se77en (Damon Zhao)**
|
||||||
|
- Improve color logging
|
||||||
|
|
||||||
|
|
||||||
|
**@silasb (Silas Baronda)**
|
||||||
|
- Fixing quotes in README
|
||||||
|
|
||||||
|
|
||||||
|
**@SkuliOskarsson (Skuli Oskarsson)**
|
||||||
|
- Fixes some texts in README II
|
||||||
|
|
||||||
|
|
||||||
|
**@slimmy (Jimmy Pettersson)**
|
||||||
|
- Added messages for required bindings
|
||||||
|
|
||||||
|
|
||||||
|
**@smira (Andrey Smirnov)**
|
||||||
|
- Add support for ignored/unexported fields in binding
|
||||||
|
|
||||||
|
|
||||||
|
**@superalsrk (SRK.Lyu)**
|
||||||
|
- Update httprouter godeps
|
||||||
|
|
||||||
|
|
||||||
|
**@tebeka (Miki Tebeka)**
|
||||||
|
- Use net/http constants instead of numeric values
|
||||||
|
|
||||||
|
|
||||||
|
**@techjanitor**
|
||||||
|
- Update context.go reserved IPs
|
||||||
|
|
||||||
|
|
||||||
|
**@yosssi (Keiji Yoshida)**
|
||||||
|
- Fix link in README
|
||||||
|
|
||||||
|
|
||||||
|
**@yuyabee**
|
||||||
|
- Fixed README
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue