parent
5ac7b3ef0a
commit
fdd6dfbc63
@ -1,136 +0,0 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package semaphore provides a weighted semaphore implementation.
|
||||
package semaphore // import "golang.org/x/sync/semaphore"
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type waiter struct {
|
||||
n int64
|
||||
ready chan<- struct{} // Closed when semaphore acquired.
|
||||
}
|
||||
|
||||
// NewWeighted creates a new weighted semaphore with the given
|
||||
// maximum combined weight for concurrent access.
|
||||
func NewWeighted(n int64) *Weighted {
|
||||
w := &Weighted{size: n}
|
||||
return w
|
||||
}
|
||||
|
||||
// Weighted provides a way to bound concurrent access to a resource.
|
||||
// The callers can request access with a given weight.
|
||||
type Weighted struct {
|
||||
size int64
|
||||
cur int64
|
||||
mu sync.Mutex
|
||||
waiters list.List
|
||||
}
|
||||
|
||||
// Acquire acquires the semaphore with a weight of n, blocking until resources
|
||||
// are available or ctx is done. On success, returns nil. On failure, returns
|
||||
// ctx.Err() and leaves the semaphore unchanged.
|
||||
//
|
||||
// If ctx is already done, Acquire may still succeed without blocking.
|
||||
func (s *Weighted) Acquire(ctx context.Context, n int64) error {
|
||||
s.mu.Lock()
|
||||
if s.size-s.cur >= n && s.waiters.Len() == 0 {
|
||||
s.cur += n
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
if n > s.size {
|
||||
// Don't make other Acquire calls block on one that's doomed to fail.
|
||||
s.mu.Unlock()
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
ready := make(chan struct{})
|
||||
w := waiter{n: n, ready: ready}
|
||||
elem := s.waiters.PushBack(w)
|
||||
s.mu.Unlock()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err := ctx.Err()
|
||||
s.mu.Lock()
|
||||
select {
|
||||
case <-ready:
|
||||
// Acquired the semaphore after we were canceled. Rather than trying to
|
||||
// fix up the queue, just pretend we didn't notice the cancelation.
|
||||
err = nil
|
||||
default:
|
||||
isFront := s.waiters.Front() == elem
|
||||
s.waiters.Remove(elem)
|
||||
// If we're at the front and there're extra tokens left, notify other waiters.
|
||||
if isFront && s.size > s.cur {
|
||||
s.notifyWaiters()
|
||||
}
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
|
||||
case <-ready:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// TryAcquire acquires the semaphore with a weight of n without blocking.
|
||||
// On success, returns true. On failure, returns false and leaves the semaphore unchanged.
|
||||
func (s *Weighted) TryAcquire(n int64) bool {
|
||||
s.mu.Lock()
|
||||
success := s.size-s.cur >= n && s.waiters.Len() == 0
|
||||
if success {
|
||||
s.cur += n
|
||||
}
|
||||
s.mu.Unlock()
|
||||
return success
|
||||
}
|
||||
|
||||
// Release releases the semaphore with a weight of n.
|
||||
func (s *Weighted) Release(n int64) {
|
||||
s.mu.Lock()
|
||||
s.cur -= n
|
||||
if s.cur < 0 {
|
||||
s.mu.Unlock()
|
||||
panic("semaphore: released more than held")
|
||||
}
|
||||
s.notifyWaiters()
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Weighted) notifyWaiters() {
|
||||
for {
|
||||
next := s.waiters.Front()
|
||||
if next == nil {
|
||||
break // No more waiters blocked.
|
||||
}
|
||||
|
||||
w := next.Value.(waiter)
|
||||
if s.size-s.cur < w.n {
|
||||
// Not enough tokens for the next waiter. We could keep going (to try to
|
||||
// find a waiter with a smaller request), but under load that could cause
|
||||
// starvation for large requests; instead, we leave all remaining waiters
|
||||
// blocked.
|
||||
//
|
||||
// Consider a semaphore used as a read-write lock, with N tokens, N
|
||||
// readers, and one writer. Each reader can Acquire(1) to obtain a read
|
||||
// lock. The writer can Acquire(N) to obtain a write lock, excluding all
|
||||
// of the readers. If we allow the readers to jump ahead in the queue,
|
||||
// the writer will starve — there is always one token available for every
|
||||
// reader.
|
||||
break
|
||||
}
|
||||
|
||||
s.cur += w.n
|
||||
s.waiters.Remove(next)
|
||||
close(w.ready)
|
||||
}
|
||||
}
|
@ -0,0 +1,90 @@
|
||||
package field
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gorm.io/gorm/clause"
|
||||
"gorm.io/gorm/schema"
|
||||
"reflect"
|
||||
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type ValuerType struct {
|
||||
Column string
|
||||
Value schema.SerializerValuerInterface
|
||||
FucName string
|
||||
}
|
||||
|
||||
func (v ValuerType) GormValue(ctx context.Context, db *gorm.DB) (expr clause.Expr) {
|
||||
stmt := db.Statement.Schema
|
||||
field := stmt.LookUpField(v.Column)
|
||||
newValue, err := v.Value.Value(context.WithValue(ctx, "func_name", v.FucName), field, reflect.ValueOf(v.Value), v.Value)
|
||||
db.AddError(err)
|
||||
return clause.Expr{SQL: "?", Vars: []interface{}{newValue}}
|
||||
}
|
||||
|
||||
// Field2 a standard field struct
|
||||
type Serializer struct{ expr }
|
||||
|
||||
// Eq judge equal
|
||||
func (field Serializer) Eq(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Eq{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Eq"}}}
|
||||
}
|
||||
|
||||
// Neq judge not equal
|
||||
func (field Serializer) Neq(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Neq{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Neq"}}}
|
||||
}
|
||||
|
||||
// In ...
|
||||
func (field Serializer) In(values ...schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.IN{Column: field.RawExpr(), Values: field.toSlice(values...)}}
|
||||
}
|
||||
|
||||
// Gt ...
|
||||
func (field Serializer) Gt(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Gt{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Gt"}}}
|
||||
}
|
||||
|
||||
// Gte ...
|
||||
func (field Serializer) Gte(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Gte{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Gte"}}}
|
||||
}
|
||||
|
||||
// Lt ...
|
||||
func (field Serializer) Lt(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Lt{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Lt"}}}
|
||||
}
|
||||
|
||||
// Lte ...
|
||||
func (field Serializer) Lte(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Lte{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Lte"}}}
|
||||
}
|
||||
|
||||
// Like ...
|
||||
func (field Serializer) Like(value schema.SerializerValuerInterface) Expr {
|
||||
return expr{e: clause.Like{Column: field.RawExpr(), Value: ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Like"}}}
|
||||
}
|
||||
|
||||
// Value ...
|
||||
func (field Serializer) Value(value schema.SerializerValuerInterface) AssignExpr {
|
||||
return field.value(ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "Value"})
|
||||
}
|
||||
|
||||
// Sum ...
|
||||
func (field Serializer) Sum() Field {
|
||||
return Field{field.sum()}
|
||||
}
|
||||
|
||||
// IfNull ...
|
||||
func (field Serializer) IfNull(value schema.SerializerValuerInterface) Expr {
|
||||
return field.ifNull(ValuerType{Column: field.ColumnName().String(), Value: value, FucName: "IfNull"})
|
||||
}
|
||||
|
||||
func (field Serializer) toSlice(values ...schema.SerializerValuerInterface) []interface{} {
|
||||
slice := make([]interface{}, len(values))
|
||||
for i, v := range values {
|
||||
slice[i] = ValuerType{Column: field.ColumnName().String(), Value: v, FucName: "In"}
|
||||
}
|
||||
return slice
|
||||
}
|
@ -0,0 +1,129 @@
|
||||
package field
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
TagKeyGorm = "gorm"
|
||||
TagKeyJson = "json"
|
||||
|
||||
//gorm tag
|
||||
TagKeyGormColumn = "column"
|
||||
TagKeyGormType = "type"
|
||||
TagKeyGormPrimaryKey = "primaryKey"
|
||||
TagKeyGormAutoIncrement = "autoIncrement"
|
||||
TagKeyGormNotNull = "not null"
|
||||
TagKeyGormUniqueIndex = "uniqueIndex"
|
||||
TagKeyGormIndex = "index"
|
||||
TagKeyGormDefault = "default"
|
||||
TagKeyGormComment = "comment"
|
||||
)
|
||||
|
||||
var (
|
||||
tagKeyPriorities = map[string]int16{
|
||||
TagKeyGorm: 100,
|
||||
TagKeyJson: 99,
|
||||
|
||||
TagKeyGormColumn: 10,
|
||||
TagKeyGormType: 9,
|
||||
TagKeyGormPrimaryKey: 8,
|
||||
TagKeyGormAutoIncrement: 7,
|
||||
TagKeyGormNotNull: 6,
|
||||
TagKeyGormUniqueIndex: 5,
|
||||
TagKeyGormIndex: 4,
|
||||
TagKeyGormDefault: 3,
|
||||
TagKeyGormComment: 0,
|
||||
}
|
||||
)
|
||||
|
||||
type TagBuilder interface {
|
||||
Build() string
|
||||
}
|
||||
|
||||
type Tag map[string]string
|
||||
|
||||
func NewTag() Tag {
|
||||
return Tag{}
|
||||
}
|
||||
|
||||
func (tag Tag) Set(key, value string) {
|
||||
tag[key] = value
|
||||
}
|
||||
|
||||
func (tag Tag) Remove(key string) {
|
||||
delete(tag, key)
|
||||
}
|
||||
|
||||
func (tag Tag) Build() string {
|
||||
if tag == nil || len(tag) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tags := make([]string, 0, len(tag))
|
||||
keys := tagKeySort(tag)
|
||||
for _, k := range keys {
|
||||
v := tag[k]
|
||||
if k == "" || v == "" {
|
||||
continue
|
||||
}
|
||||
tags = append(tags, k+":\""+v+"\"")
|
||||
}
|
||||
return strings.Join(tags, " ")
|
||||
}
|
||||
|
||||
type GormTag Tag
|
||||
|
||||
func NewGormTag() GormTag {
|
||||
return GormTag{}
|
||||
}
|
||||
|
||||
func (tag GormTag) Set(key, value string) {
|
||||
tag[key] = value
|
||||
}
|
||||
|
||||
func (tag GormTag) Remove(key string) {
|
||||
delete(tag, key)
|
||||
}
|
||||
|
||||
func (tag GormTag) Build() string {
|
||||
if tag == nil || len(tag) == 0 {
|
||||
return ""
|
||||
}
|
||||
tags := make([]string, 0, len(tag))
|
||||
keys := tagKeySort(Tag(tag))
|
||||
for _, k := range keys {
|
||||
v := tag[k]
|
||||
if k == "" && v == "" {
|
||||
continue
|
||||
}
|
||||
tv := make([]string, 0, 2)
|
||||
if k != "" {
|
||||
tv = append(tv, k)
|
||||
}
|
||||
if v != "" {
|
||||
tv = append(tv, v)
|
||||
}
|
||||
tags = append(tags, strings.Join(tv, ":"))
|
||||
}
|
||||
|
||||
return strings.Join(tags, ";")
|
||||
}
|
||||
|
||||
func tagKeySort(tag Tag) []string {
|
||||
keys := make([]string, 0, len(tag))
|
||||
if len(tag) == 0 {
|
||||
return keys
|
||||
}
|
||||
for k, _ := range tag {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Slice(keys, func(i, j int) bool {
|
||||
if tagKeyPriorities[keys[i]] == tagKeyPriorities[keys[j]] {
|
||||
return keys[i] <= keys[j]
|
||||
}
|
||||
return tagKeyPriorities[keys[i]] > tagKeyPriorities[keys[j]]
|
||||
})
|
||||
return keys
|
||||
}
|
Loading…
Reference in new issue