parent
b5d1bef508
commit
f3904a1605
@ -1,16 +0,0 @@
|
|||||||
package dorm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/beego/beego/v2/client/orm"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ConfigBeegoClient struct {
|
|
||||||
Dns string // 地址
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeegoClient
|
|
||||||
// https://beego.vip/
|
|
||||||
type BeegoClient struct {
|
|
||||||
Db *orm.Ormer // 驱动
|
|
||||||
config *ConfigBeegoClient // 配置
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
package dorm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/beego/beego/v2/client/orm"
|
|
||||||
_ "github.com/go-sql-driver/mysql"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewBeegoMysqlClient(config *ConfigBeegoClient) (*BeegoClient, error) {
|
|
||||||
|
|
||||||
var err error
|
|
||||||
c := &BeegoClient{config: config}
|
|
||||||
|
|
||||||
err = orm.RegisterDriver("mysql", orm.DRMySQL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New(fmt.Sprintf("加载驱动失败:%v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
var db *sql.DB
|
|
||||||
o, err := orm.NewOrmWithDB("mysql", "default", db)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.Db = &o
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
@ -1,29 +0,0 @@
|
|||||||
package dorm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"github.com/beego/beego/v2/client/orm"
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewBeegoPostgresqlClient(config *ConfigBeegoClient) (*BeegoClient, error) {
|
|
||||||
|
|
||||||
var err error
|
|
||||||
c := &BeegoClient{config: config}
|
|
||||||
|
|
||||||
err = orm.RegisterDriver("pgsql", orm.DRPostgres)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New(fmt.Sprintf("加载驱动失败:%v", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
var db *sql.DB
|
|
||||||
o, err := orm.NewOrmWithDB("pgsql", "default", db)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.Db = &o
|
|
||||||
|
|
||||||
return c, nil
|
|
||||||
}
|
|
@ -0,0 +1,16 @@
|
|||||||
|
package dorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ConfigBunClient struct {
|
||||||
|
Dns string // 地址
|
||||||
|
}
|
||||||
|
|
||||||
|
// BunClient
|
||||||
|
// https://bun.uptrace.dev/
|
||||||
|
type BunClient struct {
|
||||||
|
Db *bun.DB // 驱动
|
||||||
|
config *ConfigBunClient // 配置
|
||||||
|
}
|
@ -0,0 +1,25 @@
|
|||||||
|
package dorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
_ "github.com/go-sql-driver/mysql"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/dialect/mysqldialect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewBunMysqlClient(config *ConfigBunClient) (*BunClient, error) {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
c := &BunClient{config: config}
|
||||||
|
|
||||||
|
sqlDb, err := sql.Open("mysql", c.config.Dns)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New(fmt.Sprintf("加载驱动失败:%v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.Db = bun.NewDB(sqlDb, mysqldialect.New())
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
@ -0,0 +1,19 @@
|
|||||||
|
package dorm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql"
|
||||||
|
"github.com/uptrace/bun"
|
||||||
|
"github.com/uptrace/bun/dialect/pgdialect"
|
||||||
|
"github.com/uptrace/bun/driver/pgdriver"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewBunPgsqlClient(config *ConfigBunClient) (*BunClient, error) {
|
||||||
|
|
||||||
|
c := &BunClient{config: config}
|
||||||
|
|
||||||
|
sqlDb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(c.config.Dns)))
|
||||||
|
|
||||||
|
c.Db = bun.NewDB(sqlDb, pgdialect.New())
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
@ -1,13 +0,0 @@
|
|||||||
Copyright 2014 astaxie
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,6 +0,0 @@
|
|||||||
package clauses
|
|
||||||
|
|
||||||
const (
|
|
||||||
ExprSep = "__"
|
|
||||||
ExprDot = "."
|
|
||||||
)
|
|
@ -1,104 +0,0 @@
|
|||||||
package order_clause
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Sort int8
|
|
||||||
|
|
||||||
const (
|
|
||||||
None Sort = 0
|
|
||||||
Ascending Sort = 1
|
|
||||||
Descending Sort = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
type Option func(order *Order)
|
|
||||||
|
|
||||||
type Order struct {
|
|
||||||
column string
|
|
||||||
sort Sort
|
|
||||||
isRaw bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func Clause(options ...Option) *Order {
|
|
||||||
o := &Order{}
|
|
||||||
for _, option := range options {
|
|
||||||
option(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) GetColumn() string {
|
|
||||||
return o.column
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) GetSort() Sort {
|
|
||||||
return o.sort
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) SortString() string {
|
|
||||||
switch o.GetSort() {
|
|
||||||
case Ascending:
|
|
||||||
return "ASC"
|
|
||||||
case Descending:
|
|
||||||
return "DESC"
|
|
||||||
}
|
|
||||||
|
|
||||||
return ``
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Order) IsRaw() bool {
|
|
||||||
return o.isRaw
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseOrder(expressions ...string) []*Order {
|
|
||||||
var orders []*Order
|
|
||||||
for _, expression := range expressions {
|
|
||||||
sort := Ascending
|
|
||||||
column := strings.ReplaceAll(expression, clauses.ExprSep, clauses.ExprDot)
|
|
||||||
if column[0] == '-' {
|
|
||||||
sort = Descending
|
|
||||||
column = column[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
orders = append(orders, &Order{
|
|
||||||
column: column,
|
|
||||||
sort: sort,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return orders
|
|
||||||
}
|
|
||||||
|
|
||||||
func Column(column string) Option {
|
|
||||||
return func(order *Order) {
|
|
||||||
order.column = strings.ReplaceAll(column, clauses.ExprSep, clauses.ExprDot)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sort(sort Sort) Option {
|
|
||||||
return func(order *Order) {
|
|
||||||
order.sort = sort
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func SortAscending() Option {
|
|
||||||
return sort(Ascending)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SortDescending() Option {
|
|
||||||
return sort(Descending)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SortNone() Option {
|
|
||||||
return sort(None)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Raw() Option {
|
|
||||||
return func(order *Order) {
|
|
||||||
order.isRaw = true
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,299 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type commander interface {
|
|
||||||
Parse([]string)
|
|
||||||
Run() error
|
|
||||||
}
|
|
||||||
|
|
||||||
var commands = make(map[string]commander)
|
|
||||||
|
|
||||||
// print help.
|
|
||||||
func printHelp(errs ...string) {
|
|
||||||
content := `orm command usage:
|
|
||||||
|
|
||||||
syncdb - auto create tables
|
|
||||||
sqlall - print sql of create tables
|
|
||||||
help - print this help
|
|
||||||
`
|
|
||||||
|
|
||||||
if len(errs) > 0 {
|
|
||||||
fmt.Println(errs[0])
|
|
||||||
}
|
|
||||||
fmt.Println(content)
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunCommand listens for orm command and runs if command arguments have been passed.
|
|
||||||
func RunCommand() {
|
|
||||||
if len(os.Args) < 2 || os.Args[1] != "orm" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
BootStrap()
|
|
||||||
|
|
||||||
args := argString(os.Args[2:])
|
|
||||||
name := args.Get(0)
|
|
||||||
|
|
||||||
if name == "help" {
|
|
||||||
printHelp()
|
|
||||||
}
|
|
||||||
|
|
||||||
if cmd, ok := commands[name]; ok {
|
|
||||||
cmd.Parse(os.Args[3:])
|
|
||||||
cmd.Run()
|
|
||||||
os.Exit(0)
|
|
||||||
} else {
|
|
||||||
if name == "" {
|
|
||||||
printHelp()
|
|
||||||
} else {
|
|
||||||
printHelp(fmt.Sprintf("unknown command %s", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sync database struct command interface.
|
|
||||||
type commandSyncDb struct {
|
|
||||||
al *alias
|
|
||||||
force bool
|
|
||||||
verbose bool
|
|
||||||
noInfo bool
|
|
||||||
rtOnError bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse the orm command line arguments.
|
|
||||||
func (d *commandSyncDb) Parse(args []string) {
|
|
||||||
var name string
|
|
||||||
|
|
||||||
flagSet := flag.NewFlagSet("orm command: syncdb", flag.ExitOnError)
|
|
||||||
flagSet.StringVar(&name, "db", "default", "DataBase alias name")
|
|
||||||
flagSet.BoolVar(&d.force, "force", false, "drop tables before create")
|
|
||||||
flagSet.BoolVar(&d.verbose, "v", false, "verbose info")
|
|
||||||
flagSet.Parse(args)
|
|
||||||
|
|
||||||
d.al = getDbAlias(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run orm line command.
|
|
||||||
func (d *commandSyncDb) Run() error {
|
|
||||||
var drops []string
|
|
||||||
var err error
|
|
||||||
if d.force {
|
|
||||||
drops, err = defaultModelCache.getDbDropSQL(d.al)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db := d.al.DB
|
|
||||||
|
|
||||||
if d.force && len(drops) > 0 {
|
|
||||||
for i, mi := range defaultModelCache.allOrdered() {
|
|
||||||
query := drops[i]
|
|
||||||
if !d.noInfo {
|
|
||||||
fmt.Printf("drop table `%s`\n", mi.table)
|
|
||||||
}
|
|
||||||
_, err := db.Exec(query)
|
|
||||||
if d.verbose {
|
|
||||||
fmt.Printf(" %s\n\n", query)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if d.rtOnError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s\n", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
createQueries, indexes, err := defaultModelCache.getDbCreateSQL(d.al)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tables, err := d.al.DbBaser.GetTables(db)
|
|
||||||
if err != nil {
|
|
||||||
if d.rtOnError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s\n", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.Background()
|
|
||||||
for i, mi := range defaultModelCache.allOrdered() {
|
|
||||||
|
|
||||||
if !isApplicableTableForDB(mi.addrField, d.al.Name) {
|
|
||||||
fmt.Printf("table `%s` is not applicable to database '%s'\n", mi.table, d.al.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if tables[mi.table] {
|
|
||||||
if !d.noInfo {
|
|
||||||
fmt.Printf("table `%s` already exists, skip\n", mi.table)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fields []*fieldInfo
|
|
||||||
columns, err := d.al.DbBaser.GetColumns(ctx, db, mi.table)
|
|
||||||
if err != nil {
|
|
||||||
if d.rtOnError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s\n", err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range mi.fields.fieldsDB {
|
|
||||||
if _, ok := columns[fi.column]; !ok {
|
|
||||||
fields = append(fields, fi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range fields {
|
|
||||||
query := getColumnAddQuery(d.al, fi)
|
|
||||||
|
|
||||||
if !d.noInfo {
|
|
||||||
fmt.Printf("add column `%s` for table `%s`\n", fi.fullName, mi.table)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := db.Exec(query)
|
|
||||||
if d.verbose {
|
|
||||||
fmt.Printf(" %s\n", query)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if d.rtOnError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s\n", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, idx := range indexes[mi.table] {
|
|
||||||
if !d.al.DbBaser.IndexExists(ctx, db, idx.Table, idx.Name) {
|
|
||||||
if !d.noInfo {
|
|
||||||
fmt.Printf("create index `%s` for table `%s`\n", idx.Name, idx.Table)
|
|
||||||
}
|
|
||||||
|
|
||||||
query := idx.SQL
|
|
||||||
_, err := db.Exec(query)
|
|
||||||
if d.verbose {
|
|
||||||
fmt.Printf(" %s\n", query)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if d.rtOnError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s\n", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !d.noInfo {
|
|
||||||
fmt.Printf("create table `%s` \n", mi.table)
|
|
||||||
}
|
|
||||||
|
|
||||||
queries := []string{createQueries[i]}
|
|
||||||
for _, idx := range indexes[mi.table] {
|
|
||||||
queries = append(queries, idx.SQL)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, query := range queries {
|
|
||||||
_, err := db.Exec(query)
|
|
||||||
if d.verbose {
|
|
||||||
query = " " + strings.Join(strings.Split(query, "\n"), "\n ")
|
|
||||||
fmt.Println(query)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
if d.rtOnError {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Printf(" %s\n", err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d.verbose {
|
|
||||||
fmt.Println("")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// database creation commander interface implement.
|
|
||||||
type commandSQLAll struct {
|
|
||||||
al *alias
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse orm command line arguments.
|
|
||||||
func (d *commandSQLAll) Parse(args []string) {
|
|
||||||
var name string
|
|
||||||
|
|
||||||
flagSet := flag.NewFlagSet("orm command: sqlall", flag.ExitOnError)
|
|
||||||
flagSet.StringVar(&name, "db", "default", "DataBase alias name")
|
|
||||||
flagSet.Parse(args)
|
|
||||||
|
|
||||||
d.al = getDbAlias(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run orm line command.
|
|
||||||
func (d *commandSQLAll) Run() error {
|
|
||||||
createQueries, indexes, err := defaultModelCache.getDbCreateSQL(d.al)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var all []string
|
|
||||||
for i, mi := range defaultModelCache.allOrdered() {
|
|
||||||
queries := []string{createQueries[i]}
|
|
||||||
for _, idx := range indexes[mi.table] {
|
|
||||||
queries = append(queries, idx.SQL)
|
|
||||||
}
|
|
||||||
sql := strings.Join(queries, "\n")
|
|
||||||
all = append(all, sql)
|
|
||||||
}
|
|
||||||
fmt.Println(strings.Join(all, "\n\n"))
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
commands["syncdb"] = new(commandSyncDb)
|
|
||||||
commands["sqlall"] = new(commandSQLAll)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RunSyncdb run syncdb command line.
|
|
||||||
// name: Table's alias name (default is "default")
|
|
||||||
// force: Run the next sql command even if the current gave an error
|
|
||||||
// verbose: Print all information, useful for debugging
|
|
||||||
func RunSyncdb(name string, force bool, verbose bool) error {
|
|
||||||
BootStrap()
|
|
||||||
|
|
||||||
al := getDbAlias(name)
|
|
||||||
cmd := new(commandSyncDb)
|
|
||||||
cmd.al = al
|
|
||||||
cmd.force = force
|
|
||||||
cmd.noInfo = !verbose
|
|
||||||
cmd.verbose = verbose
|
|
||||||
cmd.rtOnError = true
|
|
||||||
return cmd.Run()
|
|
||||||
}
|
|
@ -1,169 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type dbIndex struct {
|
|
||||||
Table string
|
|
||||||
Name string
|
|
||||||
SQL string
|
|
||||||
}
|
|
||||||
|
|
||||||
// get database column type string.
|
|
||||||
func getColumnTyp(al *alias, fi *fieldInfo) (col string) {
|
|
||||||
T := al.DbBaser.DbTypes()
|
|
||||||
fieldType := fi.fieldType
|
|
||||||
fieldSize := fi.size
|
|
||||||
|
|
||||||
checkColumn:
|
|
||||||
switch fieldType {
|
|
||||||
case TypeBooleanField:
|
|
||||||
col = T["bool"]
|
|
||||||
case TypeVarCharField:
|
|
||||||
if al.Driver == DRPostgres && fi.toText {
|
|
||||||
col = T["string-text"]
|
|
||||||
} else {
|
|
||||||
col = fmt.Sprintf(T["string"], fieldSize)
|
|
||||||
}
|
|
||||||
case TypeCharField:
|
|
||||||
col = fmt.Sprintf(T["string-char"], fieldSize)
|
|
||||||
case TypeTextField:
|
|
||||||
col = T["string-text"]
|
|
||||||
case TypeTimeField:
|
|
||||||
col = T["time.Time-clock"]
|
|
||||||
case TypeDateField:
|
|
||||||
col = T["time.Time-date"]
|
|
||||||
case TypeDateTimeField:
|
|
||||||
// the precision of sqlite is not implemented
|
|
||||||
if al.Driver == 2 || fi.timePrecision == nil {
|
|
||||||
col = T["time.Time"]
|
|
||||||
} else {
|
|
||||||
s := T["time.Time-precision"]
|
|
||||||
col = fmt.Sprintf(s, *fi.timePrecision)
|
|
||||||
}
|
|
||||||
|
|
||||||
case TypeBitField:
|
|
||||||
col = T["int8"]
|
|
||||||
case TypeSmallIntegerField:
|
|
||||||
col = T["int16"]
|
|
||||||
case TypeIntegerField:
|
|
||||||
col = T["int32"]
|
|
||||||
case TypeBigIntegerField:
|
|
||||||
if al.Driver == DRSqlite {
|
|
||||||
fieldType = TypeIntegerField
|
|
||||||
goto checkColumn
|
|
||||||
}
|
|
||||||
col = T["int64"]
|
|
||||||
case TypePositiveBitField:
|
|
||||||
col = T["uint8"]
|
|
||||||
case TypePositiveSmallIntegerField:
|
|
||||||
col = T["uint16"]
|
|
||||||
case TypePositiveIntegerField:
|
|
||||||
col = T["uint32"]
|
|
||||||
case TypePositiveBigIntegerField:
|
|
||||||
col = T["uint64"]
|
|
||||||
case TypeFloatField:
|
|
||||||
col = T["float64"]
|
|
||||||
case TypeDecimalField:
|
|
||||||
s := T["float64-decimal"]
|
|
||||||
if !strings.Contains(s, "%d") {
|
|
||||||
col = s
|
|
||||||
} else {
|
|
||||||
col = fmt.Sprintf(s, fi.digits, fi.decimals)
|
|
||||||
}
|
|
||||||
case TypeJSONField:
|
|
||||||
if al.Driver != DRPostgres {
|
|
||||||
fieldType = TypeVarCharField
|
|
||||||
goto checkColumn
|
|
||||||
}
|
|
||||||
col = T["json"]
|
|
||||||
case TypeJsonbField:
|
|
||||||
if al.Driver != DRPostgres {
|
|
||||||
fieldType = TypeVarCharField
|
|
||||||
goto checkColumn
|
|
||||||
}
|
|
||||||
col = T["jsonb"]
|
|
||||||
case RelForeignKey, RelOneToOne:
|
|
||||||
fieldType = fi.relModelInfo.fields.pk.fieldType
|
|
||||||
fieldSize = fi.relModelInfo.fields.pk.size
|
|
||||||
goto checkColumn
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// create alter sql string.
|
|
||||||
func getColumnAddQuery(al *alias, fi *fieldInfo) string {
|
|
||||||
Q := al.DbBaser.TableQuote()
|
|
||||||
typ := getColumnTyp(al, fi)
|
|
||||||
|
|
||||||
if !fi.null {
|
|
||||||
typ += " " + "NOT NULL"
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("ALTER TABLE %s%s%s ADD COLUMN %s%s%s %s %s",
|
|
||||||
Q, fi.mi.table, Q,
|
|
||||||
Q, fi.column, Q,
|
|
||||||
typ, getColumnDefault(fi),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get string value for the attribute "DEFAULT" for the CREATE, ALTER commands
|
|
||||||
func getColumnDefault(fi *fieldInfo) string {
|
|
||||||
var v, t, d string
|
|
||||||
|
|
||||||
// Skip default attribute if field is in relations
|
|
||||||
if fi.rel || fi.reverse {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
t = " DEFAULT '%s' "
|
|
||||||
|
|
||||||
// These defaults will be useful if there no config value orm:"default" and NOT NULL is on
|
|
||||||
switch fi.fieldType {
|
|
||||||
case TypeTimeField, TypeDateField, TypeDateTimeField, TypeTextField:
|
|
||||||
return v
|
|
||||||
|
|
||||||
case TypeBitField, TypeSmallIntegerField, TypeIntegerField,
|
|
||||||
TypeBigIntegerField, TypePositiveBitField, TypePositiveSmallIntegerField,
|
|
||||||
TypePositiveIntegerField, TypePositiveBigIntegerField, TypeFloatField,
|
|
||||||
TypeDecimalField:
|
|
||||||
t = " DEFAULT %s "
|
|
||||||
d = "0"
|
|
||||||
case TypeBooleanField:
|
|
||||||
t = " DEFAULT %s "
|
|
||||||
d = "FALSE"
|
|
||||||
case TypeJSONField, TypeJsonbField:
|
|
||||||
d = "{}"
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.colDefault {
|
|
||||||
if !fi.initial.Exist() {
|
|
||||||
v = fmt.Sprintf(t, "")
|
|
||||||
} else {
|
|
||||||
v = fmt.Sprintf(t, fi.initial.String())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if !fi.null {
|
|
||||||
v = fmt.Sprintf(t, d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return v
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -1,599 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DriverType database driver constant int.
|
|
||||||
type DriverType int
|
|
||||||
|
|
||||||
// Enum the Database driver
|
|
||||||
const (
|
|
||||||
_ DriverType = iota // int enum type
|
|
||||||
DRMySQL // mysql
|
|
||||||
DRSqlite // sqlite
|
|
||||||
DROracle // oracle
|
|
||||||
DRPostgres // pgsql
|
|
||||||
DRTiDB // TiDB
|
|
||||||
)
|
|
||||||
|
|
||||||
// database driver string.
|
|
||||||
type driver string
|
|
||||||
|
|
||||||
// get type constant int of current driver..
|
|
||||||
func (d driver) Type() DriverType {
|
|
||||||
a, _ := dataBaseCache.get(string(d))
|
|
||||||
return a.Driver
|
|
||||||
}
|
|
||||||
|
|
||||||
// get name of current driver
|
|
||||||
func (d driver) Name() string {
|
|
||||||
return string(d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check driver iis implemented Driver interface or not.
|
|
||||||
var _ Driver = new(driver)
|
|
||||||
|
|
||||||
var (
|
|
||||||
dataBaseCache = &_dbCache{cache: make(map[string]*alias)}
|
|
||||||
drivers = map[string]DriverType{
|
|
||||||
"mysql": DRMySQL,
|
|
||||||
"postgres": DRPostgres,
|
|
||||||
"sqlite3": DRSqlite,
|
|
||||||
"tidb": DRTiDB,
|
|
||||||
"oracle": DROracle,
|
|
||||||
"oci8": DROracle, // github.com/mattn/go-oci8
|
|
||||||
"ora": DROracle, // https://github.com/rana/ora
|
|
||||||
}
|
|
||||||
dbBasers = map[DriverType]dbBaser{
|
|
||||||
DRMySQL: newdbBaseMysql(),
|
|
||||||
DRSqlite: newdbBaseSqlite(),
|
|
||||||
DROracle: newdbBaseOracle(),
|
|
||||||
DRPostgres: newdbBasePostgres(),
|
|
||||||
DRTiDB: newdbBaseTidb(),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// database alias cacher.
|
|
||||||
type _dbCache struct {
|
|
||||||
mux sync.RWMutex
|
|
||||||
cache map[string]*alias
|
|
||||||
}
|
|
||||||
|
|
||||||
// add database alias with original name.
|
|
||||||
func (ac *_dbCache) add(name string, al *alias) (added bool) {
|
|
||||||
ac.mux.Lock()
|
|
||||||
defer ac.mux.Unlock()
|
|
||||||
if _, ok := ac.cache[name]; !ok {
|
|
||||||
ac.cache[name] = al
|
|
||||||
added = true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get database alias if cached.
|
|
||||||
func (ac *_dbCache) get(name string) (al *alias, ok bool) {
|
|
||||||
ac.mux.RLock()
|
|
||||||
defer ac.mux.RUnlock()
|
|
||||||
al, ok = ac.cache[name]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get default alias.
|
|
||||||
func (ac *_dbCache) getDefault() (al *alias) {
|
|
||||||
al, _ = ac.get("default")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type DB struct {
|
|
||||||
*sync.RWMutex
|
|
||||||
DB *sql.DB
|
|
||||||
stmtDecorators *lru.Cache
|
|
||||||
stmtDecoratorsLimit int
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ dbQuerier = new(DB)
|
|
||||||
_ txer = new(DB)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d *DB) Begin() (*sql.Tx, error) {
|
|
||||||
return d.DB.Begin()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
|
|
||||||
return d.DB.BeginTx(ctx, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// su must call release to release *sql.Stmt after using
|
|
||||||
func (d *DB) getStmtDecorator(query string) (*stmtDecorator, error) {
|
|
||||||
d.RLock()
|
|
||||||
c, ok := d.stmtDecorators.Get(query)
|
|
||||||
if ok {
|
|
||||||
c.(*stmtDecorator).acquire()
|
|
||||||
d.RUnlock()
|
|
||||||
return c.(*stmtDecorator), nil
|
|
||||||
}
|
|
||||||
d.RUnlock()
|
|
||||||
|
|
||||||
d.Lock()
|
|
||||||
c, ok = d.stmtDecorators.Get(query)
|
|
||||||
if ok {
|
|
||||||
c.(*stmtDecorator).acquire()
|
|
||||||
d.Unlock()
|
|
||||||
return c.(*stmtDecorator), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
stmt, err := d.Prepare(query)
|
|
||||||
if err != nil {
|
|
||||||
d.Unlock()
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sd := newStmtDecorator(stmt)
|
|
||||||
sd.acquire()
|
|
||||||
d.stmtDecorators.Add(query, sd)
|
|
||||||
d.Unlock()
|
|
||||||
|
|
||||||
return sd, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) Prepare(query string) (*sql.Stmt, error) {
|
|
||||||
return d.DB.Prepare(query)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
|
|
||||||
return d.DB.PrepareContext(ctx, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) Exec(query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
return d.ExecContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
if d.stmtDecorators == nil {
|
|
||||||
return d.DB.ExecContext(ctx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
sd, err := d.getStmtDecorator(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
stmt := sd.getStmt()
|
|
||||||
defer sd.release()
|
|
||||||
return stmt.ExecContext(ctx, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
return d.QueryContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
if d.stmtDecorators == nil {
|
|
||||||
return d.DB.QueryContext(ctx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
sd, err := d.getStmtDecorator(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
stmt := sd.getStmt()
|
|
||||||
defer sd.release()
|
|
||||||
return stmt.QueryContext(ctx, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) QueryRow(query string, args ...interface{}) *sql.Row {
|
|
||||||
return d.QueryRowContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
|
||||||
if d.stmtDecorators == nil {
|
|
||||||
return d.DB.QueryRowContext(ctx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
sd, err := d.getStmtDecorator(query)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
stmt := sd.getStmt()
|
|
||||||
defer sd.release()
|
|
||||||
return stmt.QueryRowContext(ctx, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
type TxDB struct {
|
|
||||||
tx *sql.Tx
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ dbQuerier = new(TxDB)
|
|
||||||
_ txEnder = new(TxDB)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *TxDB) Commit() error {
|
|
||||||
return t.tx.Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) Rollback() error {
|
|
||||||
return t.tx.Rollback()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) RollbackUnlessCommit() error {
|
|
||||||
err := t.tx.Rollback()
|
|
||||||
if err != sql.ErrTxDone {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ dbQuerier = new(TxDB)
|
|
||||||
_ txEnder = new(TxDB)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *TxDB) Prepare(query string) (*sql.Stmt, error) {
|
|
||||||
return t.PrepareContext(context.Background(), query)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
|
|
||||||
return t.tx.PrepareContext(ctx, query)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) Exec(query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
return t.ExecContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
return t.tx.ExecContext(ctx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
return t.QueryContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
return t.tx.QueryContext(ctx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) QueryRow(query string, args ...interface{}) *sql.Row {
|
|
||||||
return t.QueryRowContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TxDB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
|
||||||
return t.tx.QueryRowContext(ctx, query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
type alias struct {
|
|
||||||
Name string
|
|
||||||
Driver DriverType
|
|
||||||
DriverName string
|
|
||||||
DataSource string
|
|
||||||
MaxIdleConns int
|
|
||||||
MaxOpenConns int
|
|
||||||
ConnMaxLifetime time.Duration
|
|
||||||
StmtCacheSize int
|
|
||||||
DB *DB
|
|
||||||
DbBaser dbBaser
|
|
||||||
TZ *time.Location
|
|
||||||
Engine string
|
|
||||||
}
|
|
||||||
|
|
||||||
func detectTZ(al *alias) {
|
|
||||||
// orm timezone system match database
|
|
||||||
// default use Local
|
|
||||||
al.TZ = DefaultTimeLoc
|
|
||||||
|
|
||||||
if al.DriverName == "sphinx" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
switch al.Driver {
|
|
||||||
case DRMySQL:
|
|
||||||
row := al.DB.QueryRow("SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP)")
|
|
||||||
var tz string
|
|
||||||
row.Scan(&tz)
|
|
||||||
if len(tz) >= 8 {
|
|
||||||
if tz[0] != '-' {
|
|
||||||
tz = "+" + tz
|
|
||||||
}
|
|
||||||
t, err := time.Parse("-07:00:00", tz)
|
|
||||||
if err == nil {
|
|
||||||
if t.Location().String() != "" {
|
|
||||||
al.TZ = t.Location()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DebugLog.Printf("Detect DB timezone: %s %s\n", tz, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get default engine from current database
|
|
||||||
row = al.DB.QueryRow("SELECT ENGINE, TRANSACTIONS FROM information_schema.engines WHERE SUPPORT = 'DEFAULT'")
|
|
||||||
var engine string
|
|
||||||
var tx bool
|
|
||||||
row.Scan(&engine, &tx)
|
|
||||||
|
|
||||||
if engine != "" {
|
|
||||||
al.Engine = engine
|
|
||||||
} else {
|
|
||||||
al.Engine = "INNODB"
|
|
||||||
}
|
|
||||||
|
|
||||||
case DRSqlite, DROracle:
|
|
||||||
al.TZ = time.UTC
|
|
||||||
|
|
||||||
case DRPostgres:
|
|
||||||
row := al.DB.QueryRow("SELECT current_setting('TIMEZONE')")
|
|
||||||
var tz string
|
|
||||||
row.Scan(&tz)
|
|
||||||
loc, err := time.LoadLocation(tz)
|
|
||||||
if err == nil {
|
|
||||||
al.TZ = loc
|
|
||||||
} else {
|
|
||||||
DebugLog.Printf("Detect DB timezone: %s %s\n", tz, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addAliasWthDB(aliasName, driverName string, db *sql.DB, params ...DBOption) (*alias, error) {
|
|
||||||
existErr := fmt.Errorf("DataBase alias name `%s` already registered, cannot reuse", aliasName)
|
|
||||||
if _, ok := dataBaseCache.get(aliasName); ok {
|
|
||||||
return nil, existErr
|
|
||||||
}
|
|
||||||
|
|
||||||
al, err := newAliasWithDb(aliasName, driverName, db, params...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !dataBaseCache.add(aliasName, al) {
|
|
||||||
return nil, existErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return al, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAliasWithDb(aliasName, driverName string, db *sql.DB, params ...DBOption) (*alias, error) {
|
|
||||||
al := &alias{}
|
|
||||||
al.DB = &DB{
|
|
||||||
RWMutex: new(sync.RWMutex),
|
|
||||||
DB: db,
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, p := range params {
|
|
||||||
p(al)
|
|
||||||
}
|
|
||||||
|
|
||||||
var stmtCache *lru.Cache
|
|
||||||
var stmtCacheSize int
|
|
||||||
|
|
||||||
if al.StmtCacheSize > 0 {
|
|
||||||
_stmtCache, errC := newStmtDecoratorLruWithEvict(al.StmtCacheSize)
|
|
||||||
if errC != nil {
|
|
||||||
return nil, errC
|
|
||||||
} else {
|
|
||||||
stmtCache = _stmtCache
|
|
||||||
stmtCacheSize = al.StmtCacheSize
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
al.Name = aliasName
|
|
||||||
al.DriverName = driverName
|
|
||||||
al.DB.stmtDecorators = stmtCache
|
|
||||||
al.DB.stmtDecoratorsLimit = stmtCacheSize
|
|
||||||
|
|
||||||
if dr, ok := drivers[driverName]; ok {
|
|
||||||
al.DbBaser = dbBasers[dr]
|
|
||||||
al.Driver = dr
|
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("driver name `%s` have not registered", driverName)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := db.Ping()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("register db Ping `%s`, %s", aliasName, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
detectTZ(al)
|
|
||||||
|
|
||||||
return al, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxIdleConns Change the max idle conns for *sql.DB, use specify database alias name
|
|
||||||
// Deprecated you should not use this, we will remove it in the future
|
|
||||||
func SetMaxIdleConns(aliasName string, maxIdleConns int) {
|
|
||||||
al := getDbAlias(aliasName)
|
|
||||||
al.SetMaxIdleConns(maxIdleConns)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxOpenConns Change the max open conns for *sql.DB, use specify database alias name
|
|
||||||
// Deprecated you should not use this, we will remove it in the future
|
|
||||||
func SetMaxOpenConns(aliasName string, maxOpenConns int) {
|
|
||||||
al := getDbAlias(aliasName)
|
|
||||||
al.SetMaxOpenConns(maxOpenConns)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxIdleConns Change the max idle conns for *sql.DB, use specify database alias name
|
|
||||||
func (al *alias) SetMaxIdleConns(maxIdleConns int) {
|
|
||||||
al.MaxIdleConns = maxIdleConns
|
|
||||||
al.DB.DB.SetMaxIdleConns(maxIdleConns)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMaxOpenConns Change the max open conns for *sql.DB, use specify database alias name
|
|
||||||
func (al *alias) SetMaxOpenConns(maxOpenConns int) {
|
|
||||||
al.MaxOpenConns = maxOpenConns
|
|
||||||
al.DB.DB.SetMaxOpenConns(maxOpenConns)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (al *alias) SetConnMaxLifetime(lifeTime time.Duration) {
|
|
||||||
al.ConnMaxLifetime = lifeTime
|
|
||||||
al.DB.DB.SetConnMaxLifetime(lifeTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddAliasWthDB add a aliasName for the drivename
|
|
||||||
func AddAliasWthDB(aliasName, driverName string, db *sql.DB, params ...DBOption) error {
|
|
||||||
_, err := addAliasWthDB(aliasName, driverName, db, params...)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterDataBase Setting the database connect params. Use the database driver self dataSource args.
|
|
||||||
func RegisterDataBase(aliasName, driverName, dataSource string, params ...DBOption) error {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
db *sql.DB
|
|
||||||
al *alias
|
|
||||||
)
|
|
||||||
|
|
||||||
db, err = sql.Open(driverName, dataSource)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("register db `%s`, %s", aliasName, err.Error())
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
|
|
||||||
al, err = addAliasWthDB(aliasName, driverName, db, params...)
|
|
||||||
if err != nil {
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
|
|
||||||
al.DataSource = dataSource
|
|
||||||
|
|
||||||
end:
|
|
||||||
if err != nil {
|
|
||||||
if db != nil {
|
|
||||||
db.Close()
|
|
||||||
}
|
|
||||||
DebugLog.Println(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterDriver Register a database driver use specify driver name, this can be definition the driver is which database type.
|
|
||||||
func RegisterDriver(driverName string, typ DriverType) error {
|
|
||||||
if t, ok := drivers[driverName]; !ok {
|
|
||||||
drivers[driverName] = typ
|
|
||||||
} else {
|
|
||||||
if t != typ {
|
|
||||||
return fmt.Errorf("driverName `%s` db driver already registered and is other type", driverName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetDataBaseTZ Change the database default used timezone
|
|
||||||
func SetDataBaseTZ(aliasName string, tz *time.Location) error {
|
|
||||||
if al, ok := dataBaseCache.get(aliasName); ok {
|
|
||||||
al.TZ = tz
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("DataBase alias name `%s` not registered", aliasName)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDB Get *sql.DB from registered database by db alias name.
|
|
||||||
// Use "default" as alias name if you not set.
|
|
||||||
func GetDB(aliasNames ...string) (*sql.DB, error) {
|
|
||||||
var name string
|
|
||||||
if len(aliasNames) > 0 {
|
|
||||||
name = aliasNames[0]
|
|
||||||
} else {
|
|
||||||
name = "default"
|
|
||||||
}
|
|
||||||
al, ok := dataBaseCache.get(name)
|
|
||||||
if ok {
|
|
||||||
return al.DB.DB, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("DataBase of alias name `%s` not found", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
type stmtDecorator struct {
|
|
||||||
wg sync.WaitGroup
|
|
||||||
stmt *sql.Stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stmtDecorator) getStmt() *sql.Stmt {
|
|
||||||
return s.stmt
|
|
||||||
}
|
|
||||||
|
|
||||||
// acquire will add one
|
|
||||||
// since this method will be used inside read lock scope,
|
|
||||||
// so we can not do more things here
|
|
||||||
// we should think about refactor this
|
|
||||||
func (s *stmtDecorator) acquire() {
|
|
||||||
s.wg.Add(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stmtDecorator) release() {
|
|
||||||
s.wg.Done()
|
|
||||||
}
|
|
||||||
|
|
||||||
// garbage recycle for stmt
|
|
||||||
func (s *stmtDecorator) destroy() {
|
|
||||||
go func() {
|
|
||||||
s.wg.Wait()
|
|
||||||
_ = s.stmt.Close()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStmtDecorator(sqlStmt *sql.Stmt) *stmtDecorator {
|
|
||||||
return &stmtDecorator{
|
|
||||||
stmt: sqlStmt,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStmtDecoratorLruWithEvict(cacheSize int) (*lru.Cache, error) {
|
|
||||||
cache, err := lru.NewWithEvict(cacheSize, func(key interface{}, value interface{}) {
|
|
||||||
value.(*stmtDecorator).destroy()
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return cache, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type DBOption func(al *alias)
|
|
||||||
|
|
||||||
// MaxIdleConnections return a hint about MaxIdleConnections
|
|
||||||
func MaxIdleConnections(maxIdleConn int) DBOption {
|
|
||||||
return func(al *alias) {
|
|
||||||
al.SetMaxIdleConns(maxIdleConn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxOpenConnections return a hint about MaxOpenConnections
|
|
||||||
func MaxOpenConnections(maxOpenConn int) DBOption {
|
|
||||||
return func(al *alias) {
|
|
||||||
al.SetMaxOpenConns(maxOpenConn)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnMaxLifetime return a hint about ConnMaxLifetime
|
|
||||||
func ConnMaxLifetime(v time.Duration) DBOption {
|
|
||||||
return func(al *alias) {
|
|
||||||
al.SetConnMaxLifetime(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// MaxStmtCacheSize return a hint about MaxStmtCacheSize
|
|
||||||
func MaxStmtCacheSize(v int) DBOption {
|
|
||||||
return func(al *alias) {
|
|
||||||
al.StmtCacheSize = v
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,192 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// mysql operators.
|
|
||||||
var mysqlOperators = map[string]string{
|
|
||||||
"exact": "= ?",
|
|
||||||
"iexact": "LIKE ?",
|
|
||||||
"strictexact": "= BINARY ?",
|
|
||||||
"contains": "LIKE BINARY ?",
|
|
||||||
"icontains": "LIKE ?",
|
|
||||||
// "regex": "REGEXP BINARY ?",
|
|
||||||
// "iregex": "REGEXP ?",
|
|
||||||
"gt": "> ?",
|
|
||||||
"gte": ">= ?",
|
|
||||||
"lt": "< ?",
|
|
||||||
"lte": "<= ?",
|
|
||||||
"eq": "= ?",
|
|
||||||
"ne": "!= ?",
|
|
||||||
"startswith": "LIKE BINARY ?",
|
|
||||||
"endswith": "LIKE BINARY ?",
|
|
||||||
"istartswith": "LIKE ?",
|
|
||||||
"iendswith": "LIKE ?",
|
|
||||||
}
|
|
||||||
|
|
||||||
// mysql column field types.
|
|
||||||
var mysqlTypes = map[string]string{
|
|
||||||
"auto": "AUTO_INCREMENT NOT NULL PRIMARY KEY",
|
|
||||||
"pk": "NOT NULL PRIMARY KEY",
|
|
||||||
"bool": "bool",
|
|
||||||
"string": "varchar(%d)",
|
|
||||||
"string-char": "char(%d)",
|
|
||||||
"string-text": "longtext",
|
|
||||||
"time.Time-date": "date",
|
|
||||||
"time.Time": "datetime",
|
|
||||||
"int8": "tinyint",
|
|
||||||
"int16": "smallint",
|
|
||||||
"int32": "integer",
|
|
||||||
"int64": "bigint",
|
|
||||||
"uint8": "tinyint unsigned",
|
|
||||||
"uint16": "smallint unsigned",
|
|
||||||
"uint32": "integer unsigned",
|
|
||||||
"uint64": "bigint unsigned",
|
|
||||||
"float64": "double precision",
|
|
||||||
"float64-decimal": "numeric(%d, %d)",
|
|
||||||
"time.Time-precision": "datetime(%d)",
|
|
||||||
}
|
|
||||||
|
|
||||||
// mysql dbBaser implementation.
|
|
||||||
type dbBaseMysql struct {
|
|
||||||
dbBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dbBaser = new(dbBaseMysql)
|
|
||||||
|
|
||||||
// get mysql operator.
|
|
||||||
func (d *dbBaseMysql) OperatorSQL(operator string) string {
|
|
||||||
return mysqlOperators[operator]
|
|
||||||
}
|
|
||||||
|
|
||||||
// get mysql table field types.
|
|
||||||
func (d *dbBaseMysql) DbTypes() map[string]string {
|
|
||||||
return mysqlTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// show table sql for mysql.
|
|
||||||
func (d *dbBaseMysql) ShowTablesQuery() string {
|
|
||||||
return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema = DATABASE()"
|
|
||||||
}
|
|
||||||
|
|
||||||
// show columns sql of table for mysql.
|
|
||||||
func (d *dbBaseMysql) ShowColumnsQuery(table string) string {
|
|
||||||
return fmt.Sprintf("SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE FROM information_schema.columns "+
|
|
||||||
"WHERE table_schema = DATABASE() AND table_name = '%s'", table)
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute sql to check index exist.
|
|
||||||
func (d *dbBaseMysql) IndexExists(ctx context.Context, db dbQuerier, table string, name string) bool {
|
|
||||||
row := db.QueryRowContext(ctx, "SELECT count(*) FROM information_schema.statistics "+
|
|
||||||
"WHERE table_schema = DATABASE() AND table_name = ? AND index_name = ?", table, name)
|
|
||||||
var cnt int
|
|
||||||
row.Scan(&cnt)
|
|
||||||
return cnt > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrUpdate a row
|
|
||||||
// If your primary key or unique column conflict will update
|
|
||||||
// If no will insert
|
|
||||||
// Add "`" for mysql sql building
|
|
||||||
func (d *dbBaseMysql) InsertOrUpdate(ctx context.Context, q dbQuerier, mi *modelInfo, ind reflect.Value, a *alias, args ...string) (int64, error) {
|
|
||||||
var iouStr string
|
|
||||||
argsMap := map[string]string{}
|
|
||||||
|
|
||||||
iouStr = "ON DUPLICATE KEY UPDATE"
|
|
||||||
|
|
||||||
// Get on the key-value pairs
|
|
||||||
for _, v := range args {
|
|
||||||
kv := strings.Split(v, "=")
|
|
||||||
if len(kv) == 2 {
|
|
||||||
argsMap[strings.ToLower(kv[0])] = kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
isMulti := false
|
|
||||||
names := make([]string, 0, len(mi.fields.dbcols)-1)
|
|
||||||
Q := d.ins.TableQuote()
|
|
||||||
values, _, err := d.collectValues(mi, ind, mi.fields.dbcols, true, true, &names, a.TZ)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
marks := make([]string, len(names))
|
|
||||||
updateValues := make([]interface{}, 0)
|
|
||||||
updates := make([]string, len(names))
|
|
||||||
|
|
||||||
for i, v := range names {
|
|
||||||
marks[i] = "?"
|
|
||||||
valueStr := argsMap[strings.ToLower(v)]
|
|
||||||
if valueStr != "" {
|
|
||||||
updates[i] = "`" + v + "`" + "=" + valueStr
|
|
||||||
} else {
|
|
||||||
updates[i] = "`" + v + "`" + "=?"
|
|
||||||
updateValues = append(updateValues, values[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
values = append(values, updateValues...)
|
|
||||||
|
|
||||||
sep := fmt.Sprintf("%s, %s", Q, Q)
|
|
||||||
qmarks := strings.Join(marks, ", ")
|
|
||||||
qupdates := strings.Join(updates, ", ")
|
|
||||||
columns := strings.Join(names, sep)
|
|
||||||
|
|
||||||
multi := len(values) / len(names)
|
|
||||||
|
|
||||||
if isMulti {
|
|
||||||
qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
|
|
||||||
}
|
|
||||||
// conflitValue maybe is an int,can`t use fmt.Sprintf
|
|
||||||
query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s) %s "+qupdates, Q, mi.table, Q, Q, columns, Q, qmarks, iouStr)
|
|
||||||
|
|
||||||
d.ins.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
if isMulti || !d.ins.HasReturningID(mi, &query) {
|
|
||||||
res, err := q.ExecContext(ctx, query, values...)
|
|
||||||
if err == nil {
|
|
||||||
if isMulti {
|
|
||||||
return res.RowsAffected()
|
|
||||||
}
|
|
||||||
|
|
||||||
lastInsertId, err := res.LastInsertId()
|
|
||||||
if err != nil {
|
|
||||||
DebugLog.Println(ErrLastInsertIdUnavailable, ':', err)
|
|
||||||
return lastInsertId, ErrLastInsertIdUnavailable
|
|
||||||
} else {
|
|
||||||
return lastInsertId, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
row := q.QueryRowContext(ctx, query, values...)
|
|
||||||
var id int64
|
|
||||||
err = row.Scan(&id)
|
|
||||||
return id, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new mysql dbBaser.
|
|
||||||
func newdbBaseMysql() dbBaser {
|
|
||||||
b := new(dbBaseMysql)
|
|
||||||
b.ins = b
|
|
||||||
return b
|
|
||||||
}
|
|
@ -1,171 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/hints"
|
|
||||||
)
|
|
||||||
|
|
||||||
// oracle operators.
|
|
||||||
var oracleOperators = map[string]string{
|
|
||||||
"exact": "= ?",
|
|
||||||
"gt": "> ?",
|
|
||||||
"gte": ">= ?",
|
|
||||||
"lt": "< ?",
|
|
||||||
"lte": "<= ?",
|
|
||||||
"//iendswith": "LIKE ?",
|
|
||||||
}
|
|
||||||
|
|
||||||
// oracle column field types.
|
|
||||||
var oracleTypes = map[string]string{
|
|
||||||
"pk": "NOT NULL PRIMARY KEY",
|
|
||||||
"bool": "bool",
|
|
||||||
"string": "VARCHAR2(%d)",
|
|
||||||
"string-char": "CHAR(%d)",
|
|
||||||
"string-text": "VARCHAR2(%d)",
|
|
||||||
"time.Time-date": "DATE",
|
|
||||||
"time.Time": "TIMESTAMP",
|
|
||||||
"int8": "INTEGER",
|
|
||||||
"int16": "INTEGER",
|
|
||||||
"int32": "INTEGER",
|
|
||||||
"int64": "INTEGER",
|
|
||||||
"uint8": "INTEGER",
|
|
||||||
"uint16": "INTEGER",
|
|
||||||
"uint32": "INTEGER",
|
|
||||||
"uint64": "INTEGER",
|
|
||||||
"float64": "NUMBER",
|
|
||||||
"float64-decimal": "NUMBER(%d, %d)",
|
|
||||||
"time.Time-precision": "TIMESTAMP(%d)",
|
|
||||||
}
|
|
||||||
|
|
||||||
// oracle dbBaser
|
|
||||||
type dbBaseOracle struct {
|
|
||||||
dbBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dbBaser = new(dbBaseOracle)
|
|
||||||
|
|
||||||
// create oracle dbBaser.
|
|
||||||
func newdbBaseOracle() dbBaser {
|
|
||||||
b := new(dbBaseOracle)
|
|
||||||
b.ins = b
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
// OperatorSQL get oracle operator.
|
|
||||||
func (d *dbBaseOracle) OperatorSQL(operator string) string {
|
|
||||||
return oracleOperators[operator]
|
|
||||||
}
|
|
||||||
|
|
||||||
// DbTypes get oracle table field types.
|
|
||||||
func (d *dbBaseOracle) DbTypes() map[string]string {
|
|
||||||
return oracleTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShowTablesQuery show all the tables in database
|
|
||||||
func (d *dbBaseOracle) ShowTablesQuery() string {
|
|
||||||
return "SELECT TABLE_NAME FROM USER_TABLES"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Oracle
|
|
||||||
func (d *dbBaseOracle) ShowColumnsQuery(table string) string {
|
|
||||||
return fmt.Sprintf("SELECT COLUMN_NAME FROM ALL_TAB_COLUMNS "+
|
|
||||||
"WHERE TABLE_NAME ='%s'", strings.ToUpper(table))
|
|
||||||
}
|
|
||||||
|
|
||||||
// check index is exist
|
|
||||||
func (d *dbBaseOracle) IndexExists(ctx context.Context, db dbQuerier, table string, name string) bool {
|
|
||||||
row := db.QueryRowContext(ctx, "SELECT COUNT(*) FROM USER_IND_COLUMNS, USER_INDEXES "+
|
|
||||||
"WHERE USER_IND_COLUMNS.INDEX_NAME = USER_INDEXES.INDEX_NAME "+
|
|
||||||
"AND USER_IND_COLUMNS.TABLE_NAME = ? AND USER_IND_COLUMNS.INDEX_NAME = ?", strings.ToUpper(table), strings.ToUpper(name))
|
|
||||||
|
|
||||||
var cnt int
|
|
||||||
row.Scan(&cnt)
|
|
||||||
return cnt > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbBaseOracle) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {
|
|
||||||
var s []string
|
|
||||||
Q := d.TableQuote()
|
|
||||||
for _, index := range indexes {
|
|
||||||
tmp := fmt.Sprintf(`%s%s%s`, Q, index, Q)
|
|
||||||
s = append(s, tmp)
|
|
||||||
}
|
|
||||||
|
|
||||||
var hint string
|
|
||||||
|
|
||||||
switch useIndex {
|
|
||||||
case hints.KeyUseIndex, hints.KeyForceIndex:
|
|
||||||
hint = `INDEX`
|
|
||||||
case hints.KeyIgnoreIndex:
|
|
||||||
hint = `NO_INDEX`
|
|
||||||
default:
|
|
||||||
DebugLog.Println("[WARN] Not a valid specifying action, so that action is ignored")
|
|
||||||
return ``
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf(` /*+ %s(%s %s)*/ `, hint, tableName, strings.Join(s, `,`))
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute insert sql with given struct and given values.
|
|
||||||
// insert the given values, not the field values in struct.
|
|
||||||
func (d *dbBaseOracle) InsertValue(ctx context.Context, q dbQuerier, mi *modelInfo, isMulti bool, names []string, values []interface{}) (int64, error) {
|
|
||||||
Q := d.ins.TableQuote()
|
|
||||||
|
|
||||||
marks := make([]string, len(names))
|
|
||||||
for i := range marks {
|
|
||||||
marks[i] = ":" + names[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
sep := fmt.Sprintf("%s, %s", Q, Q)
|
|
||||||
qmarks := strings.Join(marks, ", ")
|
|
||||||
columns := strings.Join(names, sep)
|
|
||||||
|
|
||||||
multi := len(values) / len(names)
|
|
||||||
|
|
||||||
if isMulti {
|
|
||||||
qmarks = strings.Repeat(qmarks+"), (", multi-1) + qmarks
|
|
||||||
}
|
|
||||||
|
|
||||||
query := fmt.Sprintf("INSERT INTO %s%s%s (%s%s%s) VALUES (%s)", Q, mi.table, Q, Q, columns, Q, qmarks)
|
|
||||||
|
|
||||||
d.ins.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
if isMulti || !d.ins.HasReturningID(mi, &query) {
|
|
||||||
res, err := q.ExecContext(ctx, query, values...)
|
|
||||||
if err == nil {
|
|
||||||
if isMulti {
|
|
||||||
return res.RowsAffected()
|
|
||||||
}
|
|
||||||
|
|
||||||
lastInsertId, err := res.LastInsertId()
|
|
||||||
if err != nil {
|
|
||||||
DebugLog.Println(ErrLastInsertIdUnavailable, ':', err)
|
|
||||||
return lastInsertId, ErrLastInsertIdUnavailable
|
|
||||||
} else {
|
|
||||||
return lastInsertId, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
row := q.QueryRowContext(ctx, query, values...)
|
|
||||||
var id int64
|
|
||||||
err := row.Scan(&id)
|
|
||||||
return id, err
|
|
||||||
}
|
|
@ -1,197 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// postgresql operators.
|
|
||||||
var postgresOperators = map[string]string{
|
|
||||||
"exact": "= ?",
|
|
||||||
"iexact": "= UPPER(?)",
|
|
||||||
"contains": "LIKE ?",
|
|
||||||
"icontains": "LIKE UPPER(?)",
|
|
||||||
"gt": "> ?",
|
|
||||||
"gte": ">= ?",
|
|
||||||
"lt": "< ?",
|
|
||||||
"lte": "<= ?",
|
|
||||||
"eq": "= ?",
|
|
||||||
"ne": "!= ?",
|
|
||||||
"startswith": "LIKE ?",
|
|
||||||
"endswith": "LIKE ?",
|
|
||||||
"istartswith": "LIKE UPPER(?)",
|
|
||||||
"iendswith": "LIKE UPPER(?)",
|
|
||||||
}
|
|
||||||
|
|
||||||
// postgresql column field types.
|
|
||||||
var postgresTypes = map[string]string{
|
|
||||||
"auto": "serial NOT NULL PRIMARY KEY",
|
|
||||||
"pk": "NOT NULL PRIMARY KEY",
|
|
||||||
"bool": "bool",
|
|
||||||
"string": "varchar(%d)",
|
|
||||||
"string-char": "char(%d)",
|
|
||||||
"string-text": "text",
|
|
||||||
"time.Time-date": "date",
|
|
||||||
"time.Time": "timestamp with time zone",
|
|
||||||
"int8": `smallint CHECK("%COL%" >= -127 AND "%COL%" <= 128)`,
|
|
||||||
"int16": "smallint",
|
|
||||||
"int32": "integer",
|
|
||||||
"int64": "bigint",
|
|
||||||
"uint8": `smallint CHECK("%COL%" >= 0 AND "%COL%" <= 255)`,
|
|
||||||
"uint16": `integer CHECK("%COL%" >= 0)`,
|
|
||||||
"uint32": `bigint CHECK("%COL%" >= 0)`,
|
|
||||||
"uint64": `bigint CHECK("%COL%" >= 0)`,
|
|
||||||
"float64": "double precision",
|
|
||||||
"float64-decimal": "numeric(%d, %d)",
|
|
||||||
"json": "json",
|
|
||||||
"jsonb": "jsonb",
|
|
||||||
"time.Time-precision": "timestamp(%d) with time zone",
|
|
||||||
}
|
|
||||||
|
|
||||||
// postgresql dbBaser.
|
|
||||||
type dbBasePostgres struct {
|
|
||||||
dbBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dbBaser = new(dbBasePostgres)
|
|
||||||
|
|
||||||
// get postgresql operator.
|
|
||||||
func (d *dbBasePostgres) OperatorSQL(operator string) string {
|
|
||||||
return postgresOperators[operator]
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate functioned sql string, such as contains(text).
|
|
||||||
func (d *dbBasePostgres) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) {
|
|
||||||
switch operator {
|
|
||||||
case "contains", "startswith", "endswith":
|
|
||||||
*leftCol = fmt.Sprintf("%s::text", *leftCol)
|
|
||||||
case "iexact", "icontains", "istartswith", "iendswith":
|
|
||||||
*leftCol = fmt.Sprintf("UPPER(%s::text)", *leftCol)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// postgresql unsupports updating joined record.
|
|
||||||
func (d *dbBasePostgres) SupportUpdateJoin() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbBasePostgres) MaxLimit() uint64 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// postgresql quote is ".
|
|
||||||
func (d *dbBasePostgres) TableQuote() string {
|
|
||||||
return `"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// postgresql value placeholder is $n.
|
|
||||||
// replace default ? to $n.
|
|
||||||
func (d *dbBasePostgres) ReplaceMarks(query *string) {
|
|
||||||
q := *query
|
|
||||||
num := 0
|
|
||||||
for _, c := range q {
|
|
||||||
if c == '?' {
|
|
||||||
num++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if num == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
data := make([]byte, 0, len(q)+num)
|
|
||||||
num = 1
|
|
||||||
for i := 0; i < len(q); i++ {
|
|
||||||
c := q[i]
|
|
||||||
if c == '?' {
|
|
||||||
data = append(data, '$')
|
|
||||||
data = append(data, []byte(strconv.Itoa(num))...)
|
|
||||||
num++
|
|
||||||
} else {
|
|
||||||
data = append(data, c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
*query = string(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// make returning sql support for postgresql.
|
|
||||||
func (d *dbBasePostgres) HasReturningID(mi *modelInfo, query *string) bool {
|
|
||||||
fi := mi.fields.pk
|
|
||||||
if fi.fieldType&IsPositiveIntegerField == 0 && fi.fieldType&IsIntegerField == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if query != nil {
|
|
||||||
*query = fmt.Sprintf(`%s RETURNING "%s"`, *query, fi.column)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// sync auto key
|
|
||||||
func (d *dbBasePostgres) setval(ctx context.Context, db dbQuerier, mi *modelInfo, autoFields []string) error {
|
|
||||||
if len(autoFields) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
Q := d.ins.TableQuote()
|
|
||||||
for _, name := range autoFields {
|
|
||||||
query := fmt.Sprintf("SELECT setval(pg_get_serial_sequence('%s', '%s'), (SELECT MAX(%s%s%s) FROM %s%s%s));",
|
|
||||||
mi.table, name,
|
|
||||||
Q, name, Q,
|
|
||||||
Q, mi.table, Q)
|
|
||||||
if _, err := db.ExecContext(ctx, query); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// show table sql for postgresql.
|
|
||||||
func (d *dbBasePostgres) ShowTablesQuery() string {
|
|
||||||
return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema NOT IN ('pg_catalog', 'information_schema')"
|
|
||||||
}
|
|
||||||
|
|
||||||
// show table columns sql for postgresql.
|
|
||||||
func (d *dbBasePostgres) ShowColumnsQuery(table string) string {
|
|
||||||
return fmt.Sprintf("SELECT column_name, data_type, is_nullable FROM information_schema.columns where table_schema NOT IN ('pg_catalog', 'information_schema') and table_name = '%s'", table)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get column types of postgresql.
|
|
||||||
func (d *dbBasePostgres) DbTypes() map[string]string {
|
|
||||||
return postgresTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// check index exist in postgresql.
|
|
||||||
func (d *dbBasePostgres) IndexExists(ctx context.Context, db dbQuerier, table string, name string) bool {
|
|
||||||
query := fmt.Sprintf("SELECT COUNT(*) FROM pg_indexes WHERE tablename = '%s' AND indexname = '%s'", table, name)
|
|
||||||
row := db.QueryRowContext(ctx, query)
|
|
||||||
var cnt int
|
|
||||||
row.Scan(&cnt)
|
|
||||||
return cnt > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateSpecifyIndex return a specifying index clause
|
|
||||||
func (d *dbBasePostgres) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {
|
|
||||||
DebugLog.Println("[WARN] Not support any specifying index action, so that action is ignored")
|
|
||||||
return ``
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new postgresql dbBaser.
|
|
||||||
func newdbBasePostgres() dbBaser {
|
|
||||||
b := new(dbBasePostgres)
|
|
||||||
b.ins = b
|
|
||||||
return b
|
|
||||||
}
|
|
@ -1,184 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/hints"
|
|
||||||
)
|
|
||||||
|
|
||||||
// sqlite operators.
|
|
||||||
var sqliteOperators = map[string]string{
|
|
||||||
"exact": "= ?",
|
|
||||||
"iexact": "LIKE ? ESCAPE '\\'",
|
|
||||||
"contains": "LIKE ? ESCAPE '\\'",
|
|
||||||
"icontains": "LIKE ? ESCAPE '\\'",
|
|
||||||
"gt": "> ?",
|
|
||||||
"gte": ">= ?",
|
|
||||||
"lt": "< ?",
|
|
||||||
"lte": "<= ?",
|
|
||||||
"eq": "= ?",
|
|
||||||
"ne": "!= ?",
|
|
||||||
"startswith": "LIKE ? ESCAPE '\\'",
|
|
||||||
"endswith": "LIKE ? ESCAPE '\\'",
|
|
||||||
"istartswith": "LIKE ? ESCAPE '\\'",
|
|
||||||
"iendswith": "LIKE ? ESCAPE '\\'",
|
|
||||||
}
|
|
||||||
|
|
||||||
// sqlite column types.
|
|
||||||
var sqliteTypes = map[string]string{
|
|
||||||
"auto": "integer NOT NULL PRIMARY KEY AUTOINCREMENT",
|
|
||||||
"pk": "NOT NULL PRIMARY KEY",
|
|
||||||
"bool": "bool",
|
|
||||||
"string": "varchar(%d)",
|
|
||||||
"string-char": "character(%d)",
|
|
||||||
"string-text": "text",
|
|
||||||
"time.Time-date": "date",
|
|
||||||
"time.Time": "datetime",
|
|
||||||
"time.Time-precision": "datetime(%d)",
|
|
||||||
"int8": "tinyint",
|
|
||||||
"int16": "smallint",
|
|
||||||
"int32": "integer",
|
|
||||||
"int64": "bigint",
|
|
||||||
"uint8": "tinyint unsigned",
|
|
||||||
"uint16": "smallint unsigned",
|
|
||||||
"uint32": "integer unsigned",
|
|
||||||
"uint64": "bigint unsigned",
|
|
||||||
"float64": "real",
|
|
||||||
"float64-decimal": "decimal",
|
|
||||||
}
|
|
||||||
|
|
||||||
// sqlite dbBaser.
|
|
||||||
type dbBaseSqlite struct {
|
|
||||||
dbBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dbBaser = new(dbBaseSqlite)
|
|
||||||
|
|
||||||
// override base db read for update behavior as SQlite does not support syntax
|
|
||||||
func (d *dbBaseSqlite) Read(ctx context.Context, q dbQuerier, mi *modelInfo, ind reflect.Value, tz *time.Location, cols []string, isForUpdate bool) error {
|
|
||||||
if isForUpdate {
|
|
||||||
DebugLog.Println("[WARN] SQLite does not support SELECT FOR UPDATE query, isForUpdate param is ignored and always as false to do the work")
|
|
||||||
}
|
|
||||||
return d.dbBase.Read(ctx, q, mi, ind, tz, cols, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get sqlite operator.
|
|
||||||
func (d *dbBaseSqlite) OperatorSQL(operator string) string {
|
|
||||||
return sqliteOperators[operator]
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate functioned sql for sqlite.
|
|
||||||
// only support DATE(text).
|
|
||||||
func (d *dbBaseSqlite) GenerateOperatorLeftCol(fi *fieldInfo, operator string, leftCol *string) {
|
|
||||||
if fi.fieldType == TypeDateField {
|
|
||||||
*leftCol = fmt.Sprintf("DATE(%s)", *leftCol)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unable updating joined record in sqlite.
|
|
||||||
func (d *dbBaseSqlite) SupportUpdateJoin() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// max int in sqlite.
|
|
||||||
func (d *dbBaseSqlite) MaxLimit() uint64 {
|
|
||||||
return 9223372036854775807
|
|
||||||
}
|
|
||||||
|
|
||||||
// get column types in sqlite.
|
|
||||||
func (d *dbBaseSqlite) DbTypes() map[string]string {
|
|
||||||
return sqliteTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// get show tables sql in sqlite.
|
|
||||||
func (d *dbBaseSqlite) ShowTablesQuery() string {
|
|
||||||
return "SELECT name FROM sqlite_master WHERE type = 'table'"
|
|
||||||
}
|
|
||||||
|
|
||||||
// get columns in sqlite.
|
|
||||||
func (d *dbBaseSqlite) GetColumns(ctx context.Context, db dbQuerier, table string) (map[string][3]string, error) {
|
|
||||||
query := d.ins.ShowColumnsQuery(table)
|
|
||||||
rows, err := db.QueryContext(ctx, query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
columns := make(map[string][3]string)
|
|
||||||
for rows.Next() {
|
|
||||||
var tmp, name, typ, null sql.NullString
|
|
||||||
err := rows.Scan(&tmp, &name, &typ, &null, &tmp, &tmp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
columns[name.String] = [3]string{name.String, typ.String, null.String}
|
|
||||||
}
|
|
||||||
|
|
||||||
return columns, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// get show columns sql in sqlite.
|
|
||||||
func (d *dbBaseSqlite) ShowColumnsQuery(table string) string {
|
|
||||||
return fmt.Sprintf("pragma table_info('%s')", table)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check index exist in sqlite.
|
|
||||||
func (d *dbBaseSqlite) IndexExists(ctx context.Context, db dbQuerier, table string, name string) bool {
|
|
||||||
query := fmt.Sprintf("PRAGMA index_list('%s')", table)
|
|
||||||
rows, err := db.QueryContext(ctx, query)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
defer rows.Close()
|
|
||||||
for rows.Next() {
|
|
||||||
var tmp, index sql.NullString
|
|
||||||
rows.Scan(&tmp, &index, &tmp, &tmp, &tmp)
|
|
||||||
if name == index.String {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GenerateSpecifyIndex return a specifying index clause
|
|
||||||
func (d *dbBaseSqlite) GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string {
|
|
||||||
var s []string
|
|
||||||
Q := d.TableQuote()
|
|
||||||
for _, index := range indexes {
|
|
||||||
tmp := fmt.Sprintf(`%s%s%s`, Q, index, Q)
|
|
||||||
s = append(s, tmp)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch useIndex {
|
|
||||||
case hints.KeyUseIndex, hints.KeyForceIndex:
|
|
||||||
return fmt.Sprintf(` INDEXED BY %s `, strings.Join(s, `,`))
|
|
||||||
default:
|
|
||||||
DebugLog.Println("[WARN] Not a valid specifying action, so that action is ignored")
|
|
||||||
return ``
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new sqlite dbBaser.
|
|
||||||
func newdbBaseSqlite() dbBaser {
|
|
||||||
b := new(dbBaseSqlite)
|
|
||||||
b.ins = b
|
|
||||||
return b
|
|
||||||
}
|
|
@ -1,499 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses"
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses/order_clause"
|
|
||||||
)
|
|
||||||
|
|
||||||
// table info struct.
|
|
||||||
type dbTable struct {
|
|
||||||
id int
|
|
||||||
index string
|
|
||||||
name string
|
|
||||||
names []string
|
|
||||||
sel bool
|
|
||||||
inner bool
|
|
||||||
mi *modelInfo
|
|
||||||
fi *fieldInfo
|
|
||||||
jtl *dbTable
|
|
||||||
}
|
|
||||||
|
|
||||||
// tables collection struct, contains some tables.
|
|
||||||
type dbTables struct {
|
|
||||||
tablesM map[string]*dbTable
|
|
||||||
tables []*dbTable
|
|
||||||
mi *modelInfo
|
|
||||||
base dbBaser
|
|
||||||
skipEnd bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// set table info to collection.
|
|
||||||
// if not exist, create new.
|
|
||||||
func (t *dbTables) set(names []string, mi *modelInfo, fi *fieldInfo, inner bool) *dbTable {
|
|
||||||
name := strings.Join(names, ExprSep)
|
|
||||||
if j, ok := t.tablesM[name]; ok {
|
|
||||||
j.name = name
|
|
||||||
j.mi = mi
|
|
||||||
j.fi = fi
|
|
||||||
j.inner = inner
|
|
||||||
} else {
|
|
||||||
i := len(t.tables) + 1
|
|
||||||
jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil}
|
|
||||||
t.tablesM[name] = jt
|
|
||||||
t.tables = append(t.tables, jt)
|
|
||||||
}
|
|
||||||
return t.tablesM[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// add table info to collection.
|
|
||||||
func (t *dbTables) add(names []string, mi *modelInfo, fi *fieldInfo, inner bool) (*dbTable, bool) {
|
|
||||||
name := strings.Join(names, ExprSep)
|
|
||||||
if _, ok := t.tablesM[name]; !ok {
|
|
||||||
i := len(t.tables) + 1
|
|
||||||
jt := &dbTable{i, fmt.Sprintf("T%d", i), name, names, false, inner, mi, fi, nil}
|
|
||||||
t.tablesM[name] = jt
|
|
||||||
t.tables = append(t.tables, jt)
|
|
||||||
return jt, true
|
|
||||||
}
|
|
||||||
return t.tablesM[name], false
|
|
||||||
}
|
|
||||||
|
|
||||||
// get table info in collection.
|
|
||||||
func (t *dbTables) get(name string) (*dbTable, bool) {
|
|
||||||
j, ok := t.tablesM[name]
|
|
||||||
return j, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// get related fields info in recursive depth loop.
|
|
||||||
// loop once, depth decreases one.
|
|
||||||
func (t *dbTables) loopDepth(depth int, prefix string, fi *fieldInfo, related []string) []string {
|
|
||||||
if depth < 0 || fi.fieldType == RelManyToMany {
|
|
||||||
return related
|
|
||||||
}
|
|
||||||
|
|
||||||
if prefix == "" {
|
|
||||||
prefix = fi.name
|
|
||||||
} else {
|
|
||||||
prefix = prefix + ExprSep + fi.name
|
|
||||||
}
|
|
||||||
related = append(related, prefix)
|
|
||||||
|
|
||||||
depth--
|
|
||||||
for _, fi := range fi.relModelInfo.fields.fieldsRel {
|
|
||||||
related = t.loopDepth(depth, prefix, fi, related)
|
|
||||||
}
|
|
||||||
|
|
||||||
return related
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse related fields.
|
|
||||||
func (t *dbTables) parseRelated(rels []string, depth int) {
|
|
||||||
relsNum := len(rels)
|
|
||||||
related := make([]string, relsNum)
|
|
||||||
copy(related, rels)
|
|
||||||
|
|
||||||
relDepth := depth
|
|
||||||
|
|
||||||
if relsNum != 0 {
|
|
||||||
relDepth = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
relDepth--
|
|
||||||
for _, fi := range t.mi.fields.fieldsRel {
|
|
||||||
related = t.loopDepth(relDepth, "", fi, related)
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, s := range related {
|
|
||||||
var (
|
|
||||||
exs = strings.Split(s, ExprSep)
|
|
||||||
names = make([]string, 0, len(exs))
|
|
||||||
mmi = t.mi
|
|
||||||
cancel = true
|
|
||||||
jtl *dbTable
|
|
||||||
)
|
|
||||||
|
|
||||||
inner := true
|
|
||||||
|
|
||||||
for _, ex := range exs {
|
|
||||||
if fi, ok := mmi.fields.GetByAny(ex); ok && fi.rel && fi.fieldType != RelManyToMany {
|
|
||||||
names = append(names, fi.name)
|
|
||||||
mmi = fi.relModelInfo
|
|
||||||
|
|
||||||
if fi.null || t.skipEnd {
|
|
||||||
inner = false
|
|
||||||
}
|
|
||||||
|
|
||||||
jt := t.set(names, mmi, fi, inner)
|
|
||||||
jt.jtl = jtl
|
|
||||||
|
|
||||||
if fi.reverse {
|
|
||||||
cancel = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if cancel {
|
|
||||||
jt.sel = depth > 0
|
|
||||||
|
|
||||||
if i < relsNum {
|
|
||||||
jt.sel = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
jtl = jt
|
|
||||||
|
|
||||||
} else {
|
|
||||||
panic(fmt.Errorf("unknown model/table name `%s`", ex))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate join string.
|
|
||||||
func (t *dbTables) getJoinSQL() (join string) {
|
|
||||||
Q := t.base.TableQuote()
|
|
||||||
|
|
||||||
for _, jt := range t.tables {
|
|
||||||
if jt.inner {
|
|
||||||
join += "INNER JOIN "
|
|
||||||
} else {
|
|
||||||
join += "LEFT OUTER JOIN "
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
table string
|
|
||||||
t1, t2 string
|
|
||||||
c1, c2 string
|
|
||||||
)
|
|
||||||
t1 = "T0"
|
|
||||||
if jt.jtl != nil {
|
|
||||||
t1 = jt.jtl.index
|
|
||||||
}
|
|
||||||
t2 = jt.index
|
|
||||||
table = jt.mi.table
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case jt.fi.fieldType == RelManyToMany || jt.fi.fieldType == RelReverseMany || jt.fi.reverse && jt.fi.reverseFieldInfo.fieldType == RelManyToMany:
|
|
||||||
c1 = jt.fi.mi.fields.pk.column
|
|
||||||
for _, ffi := range jt.mi.fields.fieldsRel {
|
|
||||||
if jt.fi.mi == ffi.relModelInfo {
|
|
||||||
c2 = ffi.column
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
c1 = jt.fi.column
|
|
||||||
c2 = jt.fi.relModelInfo.fields.pk.column
|
|
||||||
|
|
||||||
if jt.fi.reverse {
|
|
||||||
c1 = jt.mi.fields.pk.column
|
|
||||||
c2 = jt.fi.reverseFieldInfo.column
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
join += fmt.Sprintf("%s%s%s %s ON %s.%s%s%s = %s.%s%s%s ", Q, table, Q, t2,
|
|
||||||
t2, Q, c2, Q, t1, Q, c1, Q)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse orm model struct field tag expression.
|
|
||||||
func (t *dbTables) parseExprs(mi *modelInfo, exprs []string) (index, name string, info *fieldInfo, success bool) {
|
|
||||||
var (
|
|
||||||
jtl *dbTable
|
|
||||||
fi *fieldInfo
|
|
||||||
fiN *fieldInfo
|
|
||||||
mmi = mi
|
|
||||||
)
|
|
||||||
|
|
||||||
num := len(exprs) - 1
|
|
||||||
var names []string
|
|
||||||
|
|
||||||
inner := true
|
|
||||||
|
|
||||||
loopFor:
|
|
||||||
for i, ex := range exprs {
|
|
||||||
|
|
||||||
var ok, okN bool
|
|
||||||
|
|
||||||
if fiN != nil {
|
|
||||||
fi = fiN
|
|
||||||
ok = true
|
|
||||||
fiN = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if i == 0 {
|
|
||||||
fi, ok = mmi.fields.GetByAny(ex)
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = okN
|
|
||||||
|
|
||||||
if ok {
|
|
||||||
|
|
||||||
isRel := fi.rel || fi.reverse
|
|
||||||
|
|
||||||
names = append(names, fi.name)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fi.rel:
|
|
||||||
mmi = fi.relModelInfo
|
|
||||||
if fi.fieldType == RelManyToMany {
|
|
||||||
mmi = fi.relThroughModelInfo
|
|
||||||
}
|
|
||||||
case fi.reverse:
|
|
||||||
mmi = fi.reverseFieldInfo.mi
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < num {
|
|
||||||
fiN, okN = mmi.fields.GetByAny(exprs[i+1])
|
|
||||||
}
|
|
||||||
|
|
||||||
if isRel && (!fi.mi.isThrough || num != i) {
|
|
||||||
if fi.null || t.skipEnd {
|
|
||||||
inner = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.skipEnd && okN || !t.skipEnd {
|
|
||||||
if t.skipEnd && okN && fiN.pk {
|
|
||||||
goto loopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
jt, _ := t.add(names, mmi, fi, inner)
|
|
||||||
jt.jtl = jtl
|
|
||||||
jtl = jt
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if num != i {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
loopEnd:
|
|
||||||
|
|
||||||
if i == 0 || jtl == nil {
|
|
||||||
index = "T0"
|
|
||||||
} else {
|
|
||||||
index = jtl.index
|
|
||||||
}
|
|
||||||
|
|
||||||
info = fi
|
|
||||||
|
|
||||||
if jtl == nil {
|
|
||||||
name = fi.name
|
|
||||||
} else {
|
|
||||||
name = jtl.name + ExprSep + fi.name
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fi.rel:
|
|
||||||
|
|
||||||
case fi.reverse:
|
|
||||||
switch fi.reverseFieldInfo.fieldType {
|
|
||||||
case RelOneToOne, RelForeignKey:
|
|
||||||
index = jtl.index
|
|
||||||
info = fi.reverseFieldInfo.mi.fields.pk
|
|
||||||
name = info.name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
break loopFor
|
|
||||||
|
|
||||||
} else {
|
|
||||||
index = ""
|
|
||||||
name = ""
|
|
||||||
info = nil
|
|
||||||
success = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
success = index != "" && info != nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate condition sql.
|
|
||||||
func (t *dbTables) getCondSQL(cond *Condition, sub bool, tz *time.Location) (where string, params []interface{}) {
|
|
||||||
if cond == nil || cond.IsEmpty() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
Q := t.base.TableQuote()
|
|
||||||
|
|
||||||
mi := t.mi
|
|
||||||
|
|
||||||
for i, p := range cond.params {
|
|
||||||
if i > 0 {
|
|
||||||
if p.isOr {
|
|
||||||
where += "OR "
|
|
||||||
} else {
|
|
||||||
where += "AND "
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if p.isNot {
|
|
||||||
where += "NOT "
|
|
||||||
}
|
|
||||||
if p.isCond {
|
|
||||||
w, ps := t.getCondSQL(p.cond, true, tz)
|
|
||||||
if w != "" {
|
|
||||||
w = fmt.Sprintf("( %s) ", w)
|
|
||||||
}
|
|
||||||
where += w
|
|
||||||
params = append(params, ps...)
|
|
||||||
} else {
|
|
||||||
exprs := p.exprs
|
|
||||||
|
|
||||||
num := len(exprs) - 1
|
|
||||||
operator := ""
|
|
||||||
if operators[exprs[num]] {
|
|
||||||
operator = exprs[num]
|
|
||||||
exprs = exprs[:num]
|
|
||||||
}
|
|
||||||
|
|
||||||
index, _, fi, suc := t.parseExprs(mi, exprs)
|
|
||||||
if !suc {
|
|
||||||
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(p.exprs, ExprSep)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if operator == "" {
|
|
||||||
operator = "exact"
|
|
||||||
}
|
|
||||||
|
|
||||||
var operSQL string
|
|
||||||
var args []interface{}
|
|
||||||
if p.isRaw {
|
|
||||||
operSQL = p.sql
|
|
||||||
} else {
|
|
||||||
operSQL, args = t.base.GenerateOperatorSQL(mi, fi, operator, p.args, tz)
|
|
||||||
}
|
|
||||||
|
|
||||||
leftCol := fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q)
|
|
||||||
t.base.GenerateOperatorLeftCol(fi, operator, &leftCol)
|
|
||||||
|
|
||||||
where += fmt.Sprintf("%s %s ", leftCol, operSQL)
|
|
||||||
params = append(params, args...)
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !sub && where != "" {
|
|
||||||
where = "WHERE " + where
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate group sql.
|
|
||||||
func (t *dbTables) getGroupSQL(groups []string) (groupSQL string) {
|
|
||||||
if len(groups) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
Q := t.base.TableQuote()
|
|
||||||
|
|
||||||
groupSqls := make([]string, 0, len(groups))
|
|
||||||
for _, group := range groups {
|
|
||||||
exprs := strings.Split(group, ExprSep)
|
|
||||||
|
|
||||||
index, _, fi, suc := t.parseExprs(t.mi, exprs)
|
|
||||||
if !suc {
|
|
||||||
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(exprs, ExprSep)))
|
|
||||||
}
|
|
||||||
|
|
||||||
groupSqls = append(groupSqls, fmt.Sprintf("%s.%s%s%s", index, Q, fi.column, Q))
|
|
||||||
}
|
|
||||||
|
|
||||||
groupSQL = fmt.Sprintf("GROUP BY %s ", strings.Join(groupSqls, ", "))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate order sql.
|
|
||||||
func (t *dbTables) getOrderSQL(orders []*order_clause.Order) (orderSQL string) {
|
|
||||||
if len(orders) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
Q := t.base.TableQuote()
|
|
||||||
|
|
||||||
orderSqls := make([]string, 0, len(orders))
|
|
||||||
for _, order := range orders {
|
|
||||||
column := order.GetColumn()
|
|
||||||
clause := strings.Split(column, clauses.ExprDot)
|
|
||||||
|
|
||||||
if order.IsRaw() {
|
|
||||||
if len(clause) == 2 {
|
|
||||||
orderSqls = append(orderSqls, fmt.Sprintf("%s.%s%s%s %s", clause[0], Q, clause[1], Q, order.SortString()))
|
|
||||||
} else if len(clause) == 1 {
|
|
||||||
orderSqls = append(orderSqls, fmt.Sprintf("%s%s%s %s", Q, clause[0], Q, order.SortString()))
|
|
||||||
} else {
|
|
||||||
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(clause, ExprSep)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
index, _, fi, suc := t.parseExprs(t.mi, clause)
|
|
||||||
if !suc {
|
|
||||||
panic(fmt.Errorf("unknown field/column name `%s`", strings.Join(clause, ExprSep)))
|
|
||||||
}
|
|
||||||
|
|
||||||
orderSqls = append(orderSqls, fmt.Sprintf("%s.%s%s%s %s", index, Q, fi.column, Q, order.SortString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
orderSQL = fmt.Sprintf("ORDER BY %s ", strings.Join(orderSqls, ", "))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate limit sql.
|
|
||||||
func (t *dbTables) getLimitSQL(mi *modelInfo, offset int64, limit int64) (limits string) {
|
|
||||||
if limit == 0 {
|
|
||||||
limit = int64(DefaultRowsLimit)
|
|
||||||
}
|
|
||||||
if limit < 0 {
|
|
||||||
// no limit
|
|
||||||
if offset > 0 {
|
|
||||||
maxLimit := t.base.MaxLimit()
|
|
||||||
if maxLimit == 0 {
|
|
||||||
limits = fmt.Sprintf("OFFSET %d", offset)
|
|
||||||
} else {
|
|
||||||
limits = fmt.Sprintf("LIMIT %d OFFSET %d", maxLimit, offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if offset <= 0 {
|
|
||||||
limits = fmt.Sprintf("LIMIT %d", limit)
|
|
||||||
} else {
|
|
||||||
limits = fmt.Sprintf("LIMIT %d OFFSET %d", limit, offset)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// getIndexSql generate index sql.
|
|
||||||
func (t *dbTables) getIndexSql(tableName string, useIndex int, indexes []string) (clause string) {
|
|
||||||
if len(indexes) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return t.base.GenerateSpecifyIndex(tableName, useIndex, indexes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// crete new tables collection.
|
|
||||||
func newDbTables(mi *modelInfo, base dbBaser) *dbTables {
|
|
||||||
tables := &dbTables{}
|
|
||||||
tables.tablesM = make(map[string]*dbTable)
|
|
||||||
tables.mi = mi
|
|
||||||
tables.base = base
|
|
||||||
return tables
|
|
||||||
}
|
|
@ -1,64 +0,0 @@
|
|||||||
// Copyright 2015 TiDB Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// mysql dbBaser implementation.
|
|
||||||
type dbBaseTidb struct {
|
|
||||||
dbBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ dbBaser = new(dbBaseTidb)
|
|
||||||
|
|
||||||
// get mysql operator.
|
|
||||||
func (d *dbBaseTidb) OperatorSQL(operator string) string {
|
|
||||||
return mysqlOperators[operator]
|
|
||||||
}
|
|
||||||
|
|
||||||
// get mysql table field types.
|
|
||||||
func (d *dbBaseTidb) DbTypes() map[string]string {
|
|
||||||
return mysqlTypes
|
|
||||||
}
|
|
||||||
|
|
||||||
// show table sql for mysql.
|
|
||||||
func (d *dbBaseTidb) ShowTablesQuery() string {
|
|
||||||
return "SELECT table_name FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND table_schema = DATABASE()"
|
|
||||||
}
|
|
||||||
|
|
||||||
// show columns sql of table for mysql.
|
|
||||||
func (d *dbBaseTidb) ShowColumnsQuery(table string) string {
|
|
||||||
return fmt.Sprintf("SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE FROM information_schema.columns "+
|
|
||||||
"WHERE table_schema = DATABASE() AND table_name = '%s'", table)
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute sql to check index exist.
|
|
||||||
func (d *dbBaseTidb) IndexExists(ctx context.Context, db dbQuerier, table string, name string) bool {
|
|
||||||
row := db.QueryRowContext(ctx, "SELECT count(*) FROM information_schema.statistics "+
|
|
||||||
"WHERE table_schema = DATABASE() AND table_name = ? AND index_name = ?", table, name)
|
|
||||||
var cnt int
|
|
||||||
row.Scan(&cnt)
|
|
||||||
return cnt > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new mysql dbBaser.
|
|
||||||
func newdbBaseTidb() dbBaser {
|
|
||||||
b := new(dbBaseTidb)
|
|
||||||
b.ins = b
|
|
||||||
return b
|
|
||||||
}
|
|
@ -1,175 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// get table alias.
|
|
||||||
func getDbAlias(name string) *alias {
|
|
||||||
if al, ok := dataBaseCache.get(name); ok {
|
|
||||||
return al
|
|
||||||
}
|
|
||||||
panic(fmt.Errorf("unknown DataBase alias name %s", name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// get pk column info.
|
|
||||||
func getExistPk(mi *modelInfo, ind reflect.Value) (column string, value interface{}, exist bool) {
|
|
||||||
fi := mi.fields.pk
|
|
||||||
|
|
||||||
v := ind.FieldByIndex(fi.fieldIndex)
|
|
||||||
if fi.fieldType&IsPositiveIntegerField > 0 {
|
|
||||||
vu := v.Uint()
|
|
||||||
exist = vu > 0
|
|
||||||
value = vu
|
|
||||||
} else if fi.fieldType&IsIntegerField > 0 {
|
|
||||||
vu := v.Int()
|
|
||||||
exist = true
|
|
||||||
value = vu
|
|
||||||
} else if fi.fieldType&IsRelField > 0 {
|
|
||||||
_, value, exist = getExistPk(fi.relModelInfo, reflect.Indirect(v))
|
|
||||||
} else {
|
|
||||||
vu := v.String()
|
|
||||||
exist = vu != ""
|
|
||||||
value = vu
|
|
||||||
}
|
|
||||||
|
|
||||||
column = fi.column
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get fields description as flatted string.
|
|
||||||
func getFlatParams(fi *fieldInfo, args []interface{}, tz *time.Location) (params []interface{}) {
|
|
||||||
outFor:
|
|
||||||
for _, arg := range args {
|
|
||||||
if arg == nil {
|
|
||||||
params = append(params, arg)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
val := reflect.ValueOf(arg)
|
|
||||||
kind := val.Kind()
|
|
||||||
if kind == reflect.Ptr {
|
|
||||||
val = val.Elem()
|
|
||||||
kind = val.Kind()
|
|
||||||
arg = val.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch kind {
|
|
||||||
case reflect.String:
|
|
||||||
v := val.String()
|
|
||||||
if fi != nil {
|
|
||||||
if fi.fieldType == TypeTimeField || fi.fieldType == TypeDateField || fi.fieldType == TypeDateTimeField {
|
|
||||||
var t time.Time
|
|
||||||
var err error
|
|
||||||
if len(v) >= 19 {
|
|
||||||
s := v[:19]
|
|
||||||
t, err = time.ParseInLocation(formatDateTime, s, DefaultTimeLoc)
|
|
||||||
} else if len(v) >= 10 {
|
|
||||||
s := v
|
|
||||||
if len(v) > 10 {
|
|
||||||
s = v[:10]
|
|
||||||
}
|
|
||||||
t, err = time.ParseInLocation(formatDate, s, tz)
|
|
||||||
} else {
|
|
||||||
s := v
|
|
||||||
if len(s) > 8 {
|
|
||||||
s = v[:8]
|
|
||||||
}
|
|
||||||
t, err = time.ParseInLocation(formatTime, s, tz)
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
if fi.fieldType == TypeDateField {
|
|
||||||
v = t.In(tz).Format(formatDate)
|
|
||||||
} else if fi.fieldType == TypeDateTimeField {
|
|
||||||
v = t.In(tz).Format(formatDateTime)
|
|
||||||
} else {
|
|
||||||
v = t.In(tz).Format(formatTime)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
arg = v
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
arg = val.Int()
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
arg = val.Uint()
|
|
||||||
case reflect.Float32:
|
|
||||||
arg, _ = StrTo(ToStr(arg)).Float64()
|
|
||||||
case reflect.Float64:
|
|
||||||
arg = val.Float()
|
|
||||||
case reflect.Bool:
|
|
||||||
arg = val.Bool()
|
|
||||||
case reflect.Slice, reflect.Array:
|
|
||||||
if _, ok := arg.([]byte); ok {
|
|
||||||
continue outFor
|
|
||||||
}
|
|
||||||
|
|
||||||
var args []interface{}
|
|
||||||
for i := 0; i < val.Len(); i++ {
|
|
||||||
v := val.Index(i)
|
|
||||||
|
|
||||||
var vu interface{}
|
|
||||||
if v.CanInterface() {
|
|
||||||
vu = v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
if vu == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
args = append(args, vu)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(args) > 0 {
|
|
||||||
p := getFlatParams(fi, args, tz)
|
|
||||||
params = append(params, p...)
|
|
||||||
}
|
|
||||||
continue outFor
|
|
||||||
case reflect.Struct:
|
|
||||||
if v, ok := arg.(time.Time); ok {
|
|
||||||
if fi != nil && fi.fieldType == TypeDateField {
|
|
||||||
arg = v.In(tz).Format(formatDate)
|
|
||||||
} else if fi != nil && fi.fieldType == TypeDateTimeField {
|
|
||||||
arg = v.In(tz).Format(formatDateTime)
|
|
||||||
} else if fi != nil && fi.fieldType == TypeTimeField {
|
|
||||||
arg = v.In(tz).Format(formatTime)
|
|
||||||
} else {
|
|
||||||
arg = v.In(tz).Format(formatDateTime)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
typ := val.Type()
|
|
||||||
name := getFullName(typ)
|
|
||||||
var value interface{}
|
|
||||||
if mmi, ok := defaultModelCache.getByFullName(name); ok {
|
|
||||||
if _, vu, exist := getExistPk(mmi, val); exist {
|
|
||||||
value = vu
|
|
||||||
}
|
|
||||||
}
|
|
||||||
arg = value
|
|
||||||
|
|
||||||
if arg == nil {
|
|
||||||
panic(fmt.Errorf("need a valid args value, unknown table or value `%s`", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
params = append(params, arg)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,181 +0,0 @@
|
|||||||
// Copyright 2020 beego
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/core/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DoNothingOrm won't do anything, usually you use this to custom your mock Ormer implementation
|
|
||||||
// I think golang mocking interface is hard to use
|
|
||||||
// this may help you to integrate with Ormer
|
|
||||||
|
|
||||||
var _ Ormer = new(DoNothingOrm)
|
|
||||||
|
|
||||||
type DoNothingOrm struct{}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Read(md interface{}, cols ...string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) ReadWithCtx(ctx context.Context, md interface{}, cols ...string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) ReadForUpdate(md interface{}, cols ...string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) ReadForUpdateWithCtx(ctx context.Context, md interface{}, cols ...string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error) {
|
|
||||||
return false, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) ReadOrCreateWithCtx(ctx context.Context, md interface{}, col1 string, cols ...string) (bool, int64, error) {
|
|
||||||
return false, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) LoadRelated(md interface{}, name string, args ...utils.KV) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) LoadRelatedWithCtx(ctx context.Context, md interface{}, name string, args ...utils.KV) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) QueryM2M(md interface{}, name string) QueryM2Mer {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
func (d *DoNothingOrm) QueryM2MWithCtx(ctx context.Context, md interface{}, name string) QueryM2Mer {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) QueryTable(ptrStructOrTableName interface{}) QuerySeter {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
func (d *DoNothingOrm) QueryTableWithCtx(ctx context.Context, ptrStructOrTableName interface{}) QuerySeter {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) DBStats() *sql.DBStats {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Insert(md interface{}) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) InsertWithCtx(ctx context.Context, md interface{}) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) InsertOrUpdate(md interface{}, colConflitAndArgs ...string) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) InsertOrUpdateWithCtx(ctx context.Context, md interface{}, colConflitAndArgs ...string) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) InsertMulti(bulk int, mds interface{}) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) InsertMultiWithCtx(ctx context.Context, bulk int, mds interface{}) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Update(md interface{}, cols ...string) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) UpdateWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Delete(md interface{}, cols ...string) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) DeleteWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Raw(query string, args ...interface{}) RawSeter {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) RawWithCtx(ctx context.Context, query string, args ...interface{}) RawSeter {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Driver() Driver {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) Begin() (TxOrmer, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) BeginWithCtx(ctx context.Context) (TxOrmer, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) BeginWithOpts(opts *sql.TxOptions) (TxOrmer, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) BeginWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions) (TxOrmer, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) DoTx(task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) DoTxWithCtx(ctx context.Context, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) DoTxWithOpts(opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingOrm) DoTxWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoNothingTxOrm is similar with DoNothingOrm, usually you use it to test
|
|
||||||
type DoNothingTxOrm struct {
|
|
||||||
DoNothingOrm
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingTxOrm) Commit() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DoNothingTxOrm) Rollback() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
// Copyright 2020 beego
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FilterChain is used to build a Filter
|
|
||||||
// don't forget to call next(...) inside your Filter
|
|
||||||
type FilterChain func(next Filter) Filter
|
|
||||||
|
|
||||||
// Filter's behavior is a little big strange.
|
|
||||||
// it's only be called when users call methods of Ormer
|
|
||||||
// return value is an array. it's a little bit hard to understand,
|
|
||||||
// for example, the Ormer's Read method only return error
|
|
||||||
// so the filter processing this method should return an array whose first element is error
|
|
||||||
// and, Ormer's ReadOrCreateWithCtx return three values, so the Filter's result should contains three values
|
|
||||||
type Filter func(ctx context.Context, inv *Invocation) []interface{}
|
|
||||||
|
|
||||||
var globalFilterChains = make([]FilterChain, 0, 4)
|
|
||||||
|
|
||||||
// AddGlobalFilterChain adds a new FilterChain
|
|
||||||
// All orm instances built after this invocation will use this filterChain,
|
|
||||||
// but instances built before this invocation will not be affected
|
|
||||||
func AddGlobalFilterChain(filterChain ...FilterChain) {
|
|
||||||
globalFilterChains = append(globalFilterChains, filterChain...)
|
|
||||||
}
|
|
@ -1,534 +0,0 @@
|
|||||||
// Copyright 2020 beego
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/core/logs"
|
|
||||||
"github.com/beego/beego/v2/core/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
TxNameKey = "TxName"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ Ormer = new(filterOrmDecorator)
|
|
||||||
_ TxOrmer = new(filterOrmDecorator)
|
|
||||||
)
|
|
||||||
|
|
||||||
type filterOrmDecorator struct {
|
|
||||||
ormer
|
|
||||||
TxBeginner
|
|
||||||
TxCommitter
|
|
||||||
|
|
||||||
root Filter
|
|
||||||
|
|
||||||
insideTx bool
|
|
||||||
txStartTime time.Time
|
|
||||||
txName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFilterOrmDecorator(delegate Ormer, filterChains ...FilterChain) Ormer {
|
|
||||||
res := &filterOrmDecorator{
|
|
||||||
ormer: delegate,
|
|
||||||
TxBeginner: delegate,
|
|
||||||
root: func(ctx context.Context, inv *Invocation) []interface{} {
|
|
||||||
return inv.execute(ctx)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := len(filterChains) - 1; i >= 0; i-- {
|
|
||||||
node := filterChains[i]
|
|
||||||
res.root = node(res.root)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewFilterTxOrmDecorator(delegate TxOrmer, root Filter, txName string) TxOrmer {
|
|
||||||
res := &filterOrmDecorator{
|
|
||||||
ormer: delegate,
|
|
||||||
TxCommitter: delegate,
|
|
||||||
root: root,
|
|
||||||
insideTx: true,
|
|
||||||
txStartTime: time.Now(),
|
|
||||||
txName: txName,
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Read(md interface{}, cols ...string) error {
|
|
||||||
return f.ReadWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) ReadWithCtx(ctx context.Context, md interface{}, cols ...string) error {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "ReadWithCtx",
|
|
||||||
Args: []interface{}{md, cols},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
err := f.ormer.ReadWithCtx(c, md, cols...)
|
|
||||||
return []interface{}{err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return f.convertError(res[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) ReadForUpdate(md interface{}, cols ...string) error {
|
|
||||||
return f.ReadForUpdateWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) ReadForUpdateWithCtx(ctx context.Context, md interface{}, cols ...string) error {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "ReadForUpdateWithCtx",
|
|
||||||
Args: []interface{}{md, cols},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
err := f.ormer.ReadForUpdateWithCtx(c, md, cols...)
|
|
||||||
return []interface{}{err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return f.convertError(res[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error) {
|
|
||||||
return f.ReadOrCreateWithCtx(context.Background(), md, col1, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) ReadOrCreateWithCtx(ctx context.Context, md interface{}, col1 string, cols ...string) (bool, int64, error) {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "ReadOrCreateWithCtx",
|
|
||||||
Args: []interface{}{md, col1, cols},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
ok, res, err := f.ormer.ReadOrCreateWithCtx(c, md, col1, cols...)
|
|
||||||
return []interface{}{ok, res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(bool), res[1].(int64), f.convertError(res[2])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) LoadRelated(md interface{}, name string, args ...utils.KV) (int64, error) {
|
|
||||||
return f.LoadRelatedWithCtx(context.Background(), md, name, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) LoadRelatedWithCtx(ctx context.Context, md interface{}, name string, args ...utils.KV) (int64, error) {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "LoadRelatedWithCtx",
|
|
||||||
Args: []interface{}{md, name, args},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.ormer.LoadRelatedWithCtx(c, md, name, args...)
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(int64), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) QueryM2M(md interface{}, name string) QueryM2Mer {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "QueryM2M",
|
|
||||||
Args: []interface{}{md, name},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res := f.ormer.QueryM2M(md, name)
|
|
||||||
return []interface{}{res}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
if res[0] == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return res[0].(QueryM2Mer)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
func (f *filterOrmDecorator) QueryM2MWithCtx(_ context.Context, md interface{}, name string) QueryM2Mer {
|
|
||||||
logs.Warn("QueryM2MWithCtx is DEPRECATED. Use methods with `WithCtx` on QueryM2Mer suffix as replacement.")
|
|
||||||
return f.QueryM2M(md, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) QueryTable(ptrStructOrTableName interface{}) QuerySeter {
|
|
||||||
var (
|
|
||||||
name string
|
|
||||||
md interface{}
|
|
||||||
mi *modelInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
if table, ok := ptrStructOrTableName.(string); ok {
|
|
||||||
name = table
|
|
||||||
} else {
|
|
||||||
name = getFullName(indirectType(reflect.TypeOf(ptrStructOrTableName)))
|
|
||||||
md = ptrStructOrTableName
|
|
||||||
}
|
|
||||||
|
|
||||||
if m, ok := defaultModelCache.getByFullName(name); ok {
|
|
||||||
mi = m
|
|
||||||
}
|
|
||||||
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "QueryTable",
|
|
||||||
Args: []interface{}{ptrStructOrTableName},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res := f.ormer.QueryTable(ptrStructOrTableName)
|
|
||||||
return []interface{}{res}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
|
|
||||||
if res[0] == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return res[0].(QuerySeter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
func (f *filterOrmDecorator) QueryTableWithCtx(_ context.Context, ptrStructOrTableName interface{}) QuerySeter {
|
|
||||||
logs.Warn("QueryTableWithCtx is DEPRECATED. Use methods with `WithCtx`on QuerySeter suffix as replacement.")
|
|
||||||
return f.QueryTable(ptrStructOrTableName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) DBStats() *sql.DBStats {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "DBStats",
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res := f.ormer.DBStats()
|
|
||||||
return []interface{}{res}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
|
|
||||||
if res[0] == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return res[0].(*sql.DBStats)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Insert(md interface{}) (int64, error) {
|
|
||||||
return f.InsertWithCtx(context.Background(), md)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) InsertWithCtx(ctx context.Context, md interface{}) (int64, error) {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "InsertWithCtx",
|
|
||||||
Args: []interface{}{md},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.ormer.InsertWithCtx(c, md)
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(int64), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) InsertOrUpdate(md interface{}, colConflitAndArgs ...string) (int64, error) {
|
|
||||||
return f.InsertOrUpdateWithCtx(context.Background(), md, colConflitAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) InsertOrUpdateWithCtx(ctx context.Context, md interface{}, colConflitAndArgs ...string) (int64, error) {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "InsertOrUpdateWithCtx",
|
|
||||||
Args: []interface{}{md, colConflitAndArgs},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.ormer.InsertOrUpdateWithCtx(c, md, colConflitAndArgs...)
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(int64), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) InsertMulti(bulk int, mds interface{}) (int64, error) {
|
|
||||||
return f.InsertMultiWithCtx(context.Background(), bulk, mds)
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertMultiWithCtx uses the first element's model info
|
|
||||||
func (f *filterOrmDecorator) InsertMultiWithCtx(ctx context.Context, bulk int, mds interface{}) (int64, error) {
|
|
||||||
var (
|
|
||||||
md interface{}
|
|
||||||
mi *modelInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
sind := reflect.Indirect(reflect.ValueOf(mds))
|
|
||||||
|
|
||||||
if (sind.Kind() == reflect.Array || sind.Kind() == reflect.Slice) && sind.Len() > 0 {
|
|
||||||
ind := reflect.Indirect(sind.Index(0))
|
|
||||||
md = ind.Interface()
|
|
||||||
mi, _ = defaultModelCache.getByMd(md)
|
|
||||||
}
|
|
||||||
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "InsertMultiWithCtx",
|
|
||||||
Args: []interface{}{bulk, mds},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.ormer.InsertMultiWithCtx(c, bulk, mds)
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(int64), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Update(md interface{}, cols ...string) (int64, error) {
|
|
||||||
return f.UpdateWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) UpdateWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "UpdateWithCtx",
|
|
||||||
Args: []interface{}{md, cols},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.ormer.UpdateWithCtx(c, md, cols...)
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(int64), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Delete(md interface{}, cols ...string) (int64, error) {
|
|
||||||
return f.DeleteWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) DeleteWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) {
|
|
||||||
mi, _ := defaultModelCache.getByMd(md)
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "DeleteWithCtx",
|
|
||||||
Args: []interface{}{md, cols},
|
|
||||||
Md: md,
|
|
||||||
mi: mi,
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.ormer.DeleteWithCtx(c, md, cols...)
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(int64), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Raw(query string, args ...interface{}) RawSeter {
|
|
||||||
return f.RawWithCtx(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) RawWithCtx(ctx context.Context, query string, args ...interface{}) RawSeter {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "RawWithCtx",
|
|
||||||
Args: []interface{}{query, args},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res := f.ormer.RawWithCtx(c, query, args...)
|
|
||||||
return []interface{}{res}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
|
|
||||||
if res[0] == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return res[0].(RawSeter)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Driver() Driver {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "Driver",
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res := f.ormer.Driver()
|
|
||||||
return []interface{}{res}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
if res[0] == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return res[0].(Driver)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Begin() (TxOrmer, error) {
|
|
||||||
return f.BeginWithCtxAndOpts(context.Background(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) BeginWithCtx(ctx context.Context) (TxOrmer, error) {
|
|
||||||
return f.BeginWithCtxAndOpts(ctx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) BeginWithOpts(opts *sql.TxOptions) (TxOrmer, error) {
|
|
||||||
return f.BeginWithCtxAndOpts(context.Background(), opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) BeginWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions) (TxOrmer, error) {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "BeginWithCtxAndOpts",
|
|
||||||
Args: []interface{}{opts},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
res, err := f.TxBeginner.BeginWithCtxAndOpts(c, opts)
|
|
||||||
res = NewFilterTxOrmDecorator(res, f.root, getTxNameFromCtx(c))
|
|
||||||
return []interface{}{res, err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return res[0].(TxOrmer), f.convertError(res[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) DoTx(task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return f.DoTxWithCtxAndOpts(context.Background(), nil, task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) DoTxWithCtx(ctx context.Context, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return f.DoTxWithCtxAndOpts(ctx, nil, task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) DoTxWithOpts(opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return f.DoTxWithCtxAndOpts(context.Background(), opts, task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) DoTxWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "DoTxWithCtxAndOpts",
|
|
||||||
Args: []interface{}{opts, task},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
TxName: getTxNameFromCtx(ctx),
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
err := doTxTemplate(c, f, opts, task)
|
|
||||||
return []interface{}{err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(ctx, inv)
|
|
||||||
return f.convertError(res[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Commit() error {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "Commit",
|
|
||||||
Args: []interface{}{},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
TxName: f.txName,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
err := f.TxCommitter.Commit()
|
|
||||||
return []interface{}{err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
return f.convertError(res[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) Rollback() error {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "Rollback",
|
|
||||||
Args: []interface{}{},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
TxName: f.txName,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
err := f.TxCommitter.Rollback()
|
|
||||||
return []interface{}{err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
return f.convertError(res[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *filterOrmDecorator) RollbackUnlessCommit() error {
|
|
||||||
inv := &Invocation{
|
|
||||||
Method: "RollbackUnlessCommit",
|
|
||||||
Args: []interface{}{},
|
|
||||||
InsideTx: f.insideTx,
|
|
||||||
TxStartTime: f.txStartTime,
|
|
||||||
TxName: f.txName,
|
|
||||||
f: func(c context.Context) []interface{} {
|
|
||||||
err := f.TxCommitter.RollbackUnlessCommit()
|
|
||||||
return []interface{}{err}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
res := f.root(context.Background(), inv)
|
|
||||||
return f.convertError(res[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*filterOrmDecorator) convertError(v interface{}) error {
|
|
||||||
if v == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return v.(error)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTxNameFromCtx(ctx context.Context) string {
|
|
||||||
txName := ""
|
|
||||||
if n, ok := ctx.Value(TxNameKey).(string); ok {
|
|
||||||
txName = n
|
|
||||||
}
|
|
||||||
return txName
|
|
||||||
}
|
|
@ -1,103 +0,0 @@
|
|||||||
// Copyright 2020 beego-dev
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package hints
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/beego/beego/v2/core/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// query level
|
|
||||||
KeyForceIndex = iota
|
|
||||||
KeyUseIndex
|
|
||||||
KeyIgnoreIndex
|
|
||||||
KeyForUpdate
|
|
||||||
KeyLimit
|
|
||||||
KeyOffset
|
|
||||||
KeyOrderBy
|
|
||||||
KeyRelDepth
|
|
||||||
)
|
|
||||||
|
|
||||||
type Hint struct {
|
|
||||||
key interface{}
|
|
||||||
value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ utils.KV = new(Hint)
|
|
||||||
|
|
||||||
// GetKey return key
|
|
||||||
func (s *Hint) GetKey() interface{} {
|
|
||||||
return s.key
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetValue return value
|
|
||||||
func (s *Hint) GetValue() interface{} {
|
|
||||||
return s.value
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ utils.KV = new(Hint)
|
|
||||||
|
|
||||||
// ForceIndex return a hint about ForceIndex
|
|
||||||
func ForceIndex(indexes ...string) *Hint {
|
|
||||||
return NewHint(KeyForceIndex, indexes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseIndex return a hint about UseIndex
|
|
||||||
func UseIndex(indexes ...string) *Hint {
|
|
||||||
return NewHint(KeyUseIndex, indexes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreIndex return a hint about IgnoreIndex
|
|
||||||
func IgnoreIndex(indexes ...string) *Hint {
|
|
||||||
return NewHint(KeyIgnoreIndex, indexes)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForUpdate return a hint about ForUpdate
|
|
||||||
func ForUpdate() *Hint {
|
|
||||||
return NewHint(KeyForUpdate, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultRelDepth return a hint about DefaultRelDepth
|
|
||||||
func DefaultRelDepth() *Hint {
|
|
||||||
return NewHint(KeyRelDepth, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RelDepth return a hint about RelDepth
|
|
||||||
func RelDepth(d int) *Hint {
|
|
||||||
return NewHint(KeyRelDepth, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit return a hint about Limit
|
|
||||||
func Limit(d int64) *Hint {
|
|
||||||
return NewHint(KeyLimit, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offset return a hint about Offset
|
|
||||||
func Offset(d int64) *Hint {
|
|
||||||
return NewHint(KeyOffset, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderBy return a hint about OrderBy
|
|
||||||
func OrderBy(s string) *Hint {
|
|
||||||
return NewHint(KeyOrderBy, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHint return a hint
|
|
||||||
func NewHint(key interface{}, value interface{}) *Hint {
|
|
||||||
return &Hint{
|
|
||||||
key: key,
|
|
||||||
value: value,
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,58 +0,0 @@
|
|||||||
// Copyright 2020 beego
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Invocation represents an "Orm" invocation
|
|
||||||
type Invocation struct {
|
|
||||||
Method string
|
|
||||||
// Md may be nil in some cases. It depends on method
|
|
||||||
Md interface{}
|
|
||||||
// the args are all arguments except context.Context
|
|
||||||
Args []interface{}
|
|
||||||
|
|
||||||
mi *modelInfo
|
|
||||||
// f is the Orm operation
|
|
||||||
f func(ctx context.Context) []interface{}
|
|
||||||
|
|
||||||
// insideTx indicates whether this is inside a transaction
|
|
||||||
InsideTx bool
|
|
||||||
TxStartTime time.Time
|
|
||||||
TxName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (inv *Invocation) GetTableName() string {
|
|
||||||
if inv.mi != nil {
|
|
||||||
return inv.mi.table
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (inv *Invocation) execute(ctx context.Context) []interface{} {
|
|
||||||
return inv.f(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPkFieldName return the primary key of this table
|
|
||||||
// if not found, "" is returned
|
|
||||||
func (inv *Invocation) GetPkFieldName() string {
|
|
||||||
if inv.mi.fields.pk != nil {
|
|
||||||
return inv.mi.fields.pk.name
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
@ -1,573 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"runtime/debug"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
odCascade = "cascade"
|
|
||||||
odSetNULL = "set_null"
|
|
||||||
odSetDefault = "set_default"
|
|
||||||
odDoNothing = "do_nothing"
|
|
||||||
defaultStructTagName = "orm"
|
|
||||||
defaultStructTagDelim = ";"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultModelCache = NewModelCacheHandler()
|
|
||||||
|
|
||||||
// model info collection
|
|
||||||
type modelCache struct {
|
|
||||||
sync.RWMutex // only used outsite for bootStrap
|
|
||||||
orders []string
|
|
||||||
cache map[string]*modelInfo
|
|
||||||
cacheByFullName map[string]*modelInfo
|
|
||||||
done bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewModelCacheHandler generator of modelCache
|
|
||||||
func NewModelCacheHandler() *modelCache {
|
|
||||||
return &modelCache{
|
|
||||||
cache: make(map[string]*modelInfo),
|
|
||||||
cacheByFullName: make(map[string]*modelInfo),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// get all model info
|
|
||||||
func (mc *modelCache) all() map[string]*modelInfo {
|
|
||||||
m := make(map[string]*modelInfo, len(mc.cache))
|
|
||||||
for k, v := range mc.cache {
|
|
||||||
m[k] = v
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// get ordered model info
|
|
||||||
func (mc *modelCache) allOrdered() []*modelInfo {
|
|
||||||
m := make([]*modelInfo, 0, len(mc.orders))
|
|
||||||
for _, table := range mc.orders {
|
|
||||||
m = append(m, mc.cache[table])
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// get model info by table name
|
|
||||||
func (mc *modelCache) get(table string) (mi *modelInfo, ok bool) {
|
|
||||||
mi, ok = mc.cache[table]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get model info by full name
|
|
||||||
func (mc *modelCache) getByFullName(name string) (mi *modelInfo, ok bool) {
|
|
||||||
mi, ok = mc.cacheByFullName[name]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mc *modelCache) getByMd(md interface{}) (*modelInfo, bool) {
|
|
||||||
val := reflect.ValueOf(md)
|
|
||||||
ind := reflect.Indirect(val)
|
|
||||||
typ := ind.Type()
|
|
||||||
name := getFullName(typ)
|
|
||||||
return mc.getByFullName(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set model info to collection
|
|
||||||
func (mc *modelCache) set(table string, mi *modelInfo) *modelInfo {
|
|
||||||
mii := mc.cache[table]
|
|
||||||
mc.cache[table] = mi
|
|
||||||
mc.cacheByFullName[mi.fullName] = mi
|
|
||||||
if mii == nil {
|
|
||||||
mc.orders = append(mc.orders, table)
|
|
||||||
}
|
|
||||||
return mii
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean all model info.
|
|
||||||
func (mc *modelCache) clean() {
|
|
||||||
mc.Lock()
|
|
||||||
defer mc.Unlock()
|
|
||||||
|
|
||||||
mc.orders = make([]string, 0)
|
|
||||||
mc.cache = make(map[string]*modelInfo)
|
|
||||||
mc.cacheByFullName = make(map[string]*modelInfo)
|
|
||||||
mc.done = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// bootstrap bootstrap for models
|
|
||||||
func (mc *modelCache) bootstrap() {
|
|
||||||
mc.Lock()
|
|
||||||
defer mc.Unlock()
|
|
||||||
if mc.done {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
models map[string]*modelInfo
|
|
||||||
)
|
|
||||||
if dataBaseCache.getDefault() == nil {
|
|
||||||
err = fmt.Errorf("must have one register DataBase alias named `default`")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
|
|
||||||
// set rel and reverse model
|
|
||||||
// RelManyToMany set the relTable
|
|
||||||
models = mc.all()
|
|
||||||
for _, mi := range models {
|
|
||||||
for _, fi := range mi.fields.columns {
|
|
||||||
if fi.rel || fi.reverse {
|
|
||||||
elm := fi.addrValue.Type().Elem()
|
|
||||||
if fi.fieldType == RelReverseMany || fi.fieldType == RelManyToMany {
|
|
||||||
elm = elm.Elem()
|
|
||||||
}
|
|
||||||
// check the rel or reverse model already register
|
|
||||||
name := getFullName(elm)
|
|
||||||
mii, ok := mc.getByFullName(name)
|
|
||||||
if !ok || mii.pkg != elm.PkgPath() {
|
|
||||||
err = fmt.Errorf("can not find rel in field `%s`, `%s` may be miss register", fi.fullName, elm.String())
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
fi.relModelInfo = mii
|
|
||||||
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelManyToMany:
|
|
||||||
if fi.relThrough != "" {
|
|
||||||
if i := strings.LastIndex(fi.relThrough, "."); i != -1 && len(fi.relThrough) > (i+1) {
|
|
||||||
pn := fi.relThrough[:i]
|
|
||||||
rmi, ok := mc.getByFullName(fi.relThrough)
|
|
||||||
if !ok || pn != rmi.pkg {
|
|
||||||
err = fmt.Errorf("field `%s` wrong rel_through value `%s` cannot find table", fi.fullName, fi.relThrough)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
fi.relThroughModelInfo = rmi
|
|
||||||
fi.relTable = rmi.table
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("field `%s` wrong rel_through value `%s`", fi.fullName, fi.relThrough)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
i := newM2MModelInfo(mi, mii)
|
|
||||||
if fi.relTable != "" {
|
|
||||||
i.table = fi.relTable
|
|
||||||
}
|
|
||||||
if v := mc.set(i.table, i); v != nil {
|
|
||||||
err = fmt.Errorf("the rel table name `%s` already registered, cannot be use, please change one", fi.relTable)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
fi.relTable = i.table
|
|
||||||
fi.relThroughModelInfo = i
|
|
||||||
}
|
|
||||||
|
|
||||||
fi.relThroughModelInfo.isThrough = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the rel filed while the relModelInfo also has filed point to current model
|
|
||||||
// if not exist, add a new field to the relModelInfo
|
|
||||||
models = mc.all()
|
|
||||||
for _, mi := range models {
|
|
||||||
for _, fi := range mi.fields.fieldsRel {
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelForeignKey, RelOneToOne, RelManyToMany:
|
|
||||||
inModel := false
|
|
||||||
for _, ffi := range fi.relModelInfo.fields.fieldsReverse {
|
|
||||||
if ffi.relModelInfo == mi {
|
|
||||||
inModel = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !inModel {
|
|
||||||
rmi := fi.relModelInfo
|
|
||||||
ffi := new(fieldInfo)
|
|
||||||
ffi.name = mi.name
|
|
||||||
ffi.column = ffi.name
|
|
||||||
ffi.fullName = rmi.fullName + "." + ffi.name
|
|
||||||
ffi.reverse = true
|
|
||||||
ffi.relModelInfo = mi
|
|
||||||
ffi.mi = rmi
|
|
||||||
if fi.fieldType == RelOneToOne {
|
|
||||||
ffi.fieldType = RelReverseOne
|
|
||||||
} else {
|
|
||||||
ffi.fieldType = RelReverseMany
|
|
||||||
}
|
|
||||||
if !rmi.fields.Add(ffi) {
|
|
||||||
added := false
|
|
||||||
for cnt := 0; cnt < 5; cnt++ {
|
|
||||||
ffi.name = fmt.Sprintf("%s%d", mi.name, cnt)
|
|
||||||
ffi.column = ffi.name
|
|
||||||
ffi.fullName = rmi.fullName + "." + ffi.name
|
|
||||||
if added = rmi.fields.Add(ffi); added {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !added {
|
|
||||||
panic(fmt.Errorf("cannot generate auto reverse field info `%s` to `%s`", fi.fullName, ffi.fullName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
models = mc.all()
|
|
||||||
for _, mi := range models {
|
|
||||||
for _, fi := range mi.fields.fieldsRel {
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelManyToMany:
|
|
||||||
for _, ffi := range fi.relThroughModelInfo.fields.fieldsRel {
|
|
||||||
switch ffi.fieldType {
|
|
||||||
case RelOneToOne, RelForeignKey:
|
|
||||||
if ffi.relModelInfo == fi.relModelInfo {
|
|
||||||
fi.reverseFieldInfoTwo = ffi
|
|
||||||
}
|
|
||||||
if ffi.relModelInfo == mi {
|
|
||||||
fi.reverseField = ffi.name
|
|
||||||
fi.reverseFieldInfo = ffi
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if fi.reverseFieldInfoTwo == nil {
|
|
||||||
err = fmt.Errorf("can not find m2m field for m2m model `%s`, ensure your m2m model defined correct",
|
|
||||||
fi.relThroughModelInfo.fullName)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
models = mc.all()
|
|
||||||
for _, mi := range models {
|
|
||||||
for _, fi := range mi.fields.fieldsReverse {
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelReverseOne:
|
|
||||||
found := false
|
|
||||||
mForA:
|
|
||||||
for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelOneToOne] {
|
|
||||||
if ffi.relModelInfo == mi {
|
|
||||||
found = true
|
|
||||||
fi.reverseField = ffi.name
|
|
||||||
fi.reverseFieldInfo = ffi
|
|
||||||
|
|
||||||
ffi.reverseField = fi.name
|
|
||||||
ffi.reverseFieldInfo = fi
|
|
||||||
break mForA
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
err = fmt.Errorf("reverse field `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
case RelReverseMany:
|
|
||||||
found := false
|
|
||||||
mForB:
|
|
||||||
for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelForeignKey] {
|
|
||||||
if ffi.relModelInfo == mi {
|
|
||||||
found = true
|
|
||||||
fi.reverseField = ffi.name
|
|
||||||
fi.reverseFieldInfo = ffi
|
|
||||||
|
|
||||||
ffi.reverseField = fi.name
|
|
||||||
ffi.reverseFieldInfo = fi
|
|
||||||
|
|
||||||
break mForB
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
mForC:
|
|
||||||
for _, ffi := range fi.relModelInfo.fields.fieldsByType[RelManyToMany] {
|
|
||||||
conditions := fi.relThrough != "" && fi.relThrough == ffi.relThrough ||
|
|
||||||
fi.relTable != "" && fi.relTable == ffi.relTable ||
|
|
||||||
fi.relThrough == "" && fi.relTable == ""
|
|
||||||
if ffi.relModelInfo == mi && conditions {
|
|
||||||
found = true
|
|
||||||
|
|
||||||
fi.reverseField = ffi.reverseFieldInfoTwo.name
|
|
||||||
fi.reverseFieldInfo = ffi.reverseFieldInfoTwo
|
|
||||||
fi.relThroughModelInfo = ffi.relThroughModelInfo
|
|
||||||
fi.reverseFieldInfoTwo = ffi.reverseFieldInfo
|
|
||||||
fi.reverseFieldInfoM2M = ffi
|
|
||||||
ffi.reverseFieldInfoM2M = fi
|
|
||||||
|
|
||||||
break mForC
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
err = fmt.Errorf("reverse field for `%s` not found in model `%s`", fi.fullName, fi.relModelInfo.fullName)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
end:
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
debug.PrintStack()
|
|
||||||
}
|
|
||||||
mc.done = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// register register models to model cache
|
|
||||||
func (mc *modelCache) register(prefixOrSuffixStr string, prefixOrSuffix bool, models ...interface{}) (err error) {
|
|
||||||
for _, model := range models {
|
|
||||||
val := reflect.ValueOf(model)
|
|
||||||
typ := reflect.Indirect(val).Type()
|
|
||||||
|
|
||||||
if val.Kind() != reflect.Ptr {
|
|
||||||
err = fmt.Errorf("<orm.RegisterModel> cannot use non-ptr model struct `%s`", getFullName(typ))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// For this case:
|
|
||||||
// u := &User{}
|
|
||||||
// registerModel(&u)
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
err = fmt.Errorf("<orm.RegisterModel> only allow ptr model struct, it looks you use two reference to the struct `%s`", typ)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if val.Elem().Kind() == reflect.Slice {
|
|
||||||
val = reflect.New(val.Elem().Type().Elem())
|
|
||||||
}
|
|
||||||
table := getTableName(val)
|
|
||||||
|
|
||||||
if prefixOrSuffixStr != "" {
|
|
||||||
if prefixOrSuffix {
|
|
||||||
table = prefixOrSuffixStr + table
|
|
||||||
} else {
|
|
||||||
table = table + prefixOrSuffixStr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// models's fullname is pkgpath + struct name
|
|
||||||
name := getFullName(typ)
|
|
||||||
if _, ok := mc.getByFullName(name); ok {
|
|
||||||
err = fmt.Errorf("<orm.RegisterModel> model `%s` repeat register, must be unique\n", name)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := mc.get(table); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
mi := newModelInfo(val)
|
|
||||||
if mi.fields.pk == nil {
|
|
||||||
outFor:
|
|
||||||
for _, fi := range mi.fields.fieldsDB {
|
|
||||||
if strings.ToLower(fi.name) == "id" {
|
|
||||||
switch fi.addrValue.Elem().Kind() {
|
|
||||||
case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
|
|
||||||
fi.auto = true
|
|
||||||
fi.pk = true
|
|
||||||
mi.fields.pk = fi
|
|
||||||
break outFor
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mi.table = table
|
|
||||||
mi.pkg = typ.PkgPath()
|
|
||||||
mi.model = model
|
|
||||||
mi.manual = true
|
|
||||||
|
|
||||||
mc.set(table, mi)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDbDropSQL get database scheme drop sql queries
|
|
||||||
func (mc *modelCache) getDbDropSQL(al *alias) (queries []string, err error) {
|
|
||||||
if len(mc.cache) == 0 {
|
|
||||||
err = errors.New("no Model found, need register your model")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
Q := al.DbBaser.TableQuote()
|
|
||||||
|
|
||||||
for _, mi := range mc.allOrdered() {
|
|
||||||
queries = append(queries, fmt.Sprintf(`DROP TABLE IF EXISTS %s%s%s`, Q, mi.table, Q))
|
|
||||||
}
|
|
||||||
return queries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDbCreateSQL get database scheme creation sql queries
|
|
||||||
func (mc *modelCache) getDbCreateSQL(al *alias) (queries []string, tableIndexes map[string][]dbIndex, err error) {
|
|
||||||
if len(mc.cache) == 0 {
|
|
||||||
err = errors.New("no Model found, need register your model")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
Q := al.DbBaser.TableQuote()
|
|
||||||
T := al.DbBaser.DbTypes()
|
|
||||||
sep := fmt.Sprintf("%s, %s", Q, Q)
|
|
||||||
|
|
||||||
tableIndexes = make(map[string][]dbIndex)
|
|
||||||
|
|
||||||
for _, mi := range mc.allOrdered() {
|
|
||||||
sql := fmt.Sprintf("-- %s\n", strings.Repeat("-", 50))
|
|
||||||
sql += fmt.Sprintf("-- Table Structure for `%s`\n", mi.fullName)
|
|
||||||
sql += fmt.Sprintf("-- %s\n", strings.Repeat("-", 50))
|
|
||||||
|
|
||||||
sql += fmt.Sprintf("CREATE TABLE IF NOT EXISTS %s%s%s (\n", Q, mi.table, Q)
|
|
||||||
|
|
||||||
columns := make([]string, 0, len(mi.fields.fieldsDB))
|
|
||||||
|
|
||||||
sqlIndexes := [][]string{}
|
|
||||||
var commentIndexes []int // store comment indexes for postgres
|
|
||||||
|
|
||||||
for i, fi := range mi.fields.fieldsDB {
|
|
||||||
column := fmt.Sprintf(" %s%s%s ", Q, fi.column, Q)
|
|
||||||
col := getColumnTyp(al, fi)
|
|
||||||
|
|
||||||
if fi.auto {
|
|
||||||
switch al.Driver {
|
|
||||||
case DRSqlite, DRPostgres:
|
|
||||||
column += T["auto"]
|
|
||||||
default:
|
|
||||||
column += col + " " + T["auto"]
|
|
||||||
}
|
|
||||||
} else if fi.pk {
|
|
||||||
column += col + " " + T["pk"]
|
|
||||||
} else {
|
|
||||||
column += col
|
|
||||||
|
|
||||||
if !fi.null {
|
|
||||||
column += " " + "NOT NULL"
|
|
||||||
}
|
|
||||||
|
|
||||||
// if fi.initial.String() != "" {
|
|
||||||
// column += " DEFAULT " + fi.initial.String()
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Append attribute DEFAULT
|
|
||||||
column += getColumnDefault(fi)
|
|
||||||
|
|
||||||
if fi.unique {
|
|
||||||
column += " " + "UNIQUE"
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.index {
|
|
||||||
sqlIndexes = append(sqlIndexes, []string{fi.column})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.Contains(column, "%COL%") {
|
|
||||||
column = strings.Replace(column, "%COL%", fi.column, -1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.description != "" && al.Driver != DRSqlite {
|
|
||||||
if al.Driver == DRPostgres {
|
|
||||||
commentIndexes = append(commentIndexes, i)
|
|
||||||
} else {
|
|
||||||
column += " " + fmt.Sprintf("COMMENT '%s'", fi.description)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
columns = append(columns, column)
|
|
||||||
}
|
|
||||||
|
|
||||||
if mi.model != nil {
|
|
||||||
allnames := getTableUnique(mi.addrField)
|
|
||||||
if !mi.manual && len(mi.uniques) > 0 {
|
|
||||||
allnames = append(allnames, mi.uniques)
|
|
||||||
}
|
|
||||||
for _, names := range allnames {
|
|
||||||
cols := make([]string, 0, len(names))
|
|
||||||
for _, name := range names {
|
|
||||||
if fi, ok := mi.fields.GetByAny(name); ok && fi.dbcol {
|
|
||||||
cols = append(cols, fi.column)
|
|
||||||
} else {
|
|
||||||
panic(fmt.Errorf("cannot found column `%s` when parse UNIQUE in `%s.TableUnique`", name, mi.fullName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
column := fmt.Sprintf(" UNIQUE (%s%s%s)", Q, strings.Join(cols, sep), Q)
|
|
||||||
columns = append(columns, column)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sql += strings.Join(columns, ",\n")
|
|
||||||
sql += "\n)"
|
|
||||||
|
|
||||||
if al.Driver == DRMySQL {
|
|
||||||
var engine string
|
|
||||||
if mi.model != nil {
|
|
||||||
engine = getTableEngine(mi.addrField)
|
|
||||||
}
|
|
||||||
if engine == "" {
|
|
||||||
engine = al.Engine
|
|
||||||
}
|
|
||||||
sql += " ENGINE=" + engine
|
|
||||||
}
|
|
||||||
|
|
||||||
sql += ";"
|
|
||||||
if al.Driver == DRPostgres && len(commentIndexes) > 0 {
|
|
||||||
// append comments for postgres only
|
|
||||||
for _, index := range commentIndexes {
|
|
||||||
sql += fmt.Sprintf("\nCOMMENT ON COLUMN %s%s%s.%s%s%s is '%s';",
|
|
||||||
Q,
|
|
||||||
mi.table,
|
|
||||||
Q,
|
|
||||||
Q,
|
|
||||||
mi.fields.fieldsDB[index].column,
|
|
||||||
Q,
|
|
||||||
mi.fields.fieldsDB[index].description)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
queries = append(queries, sql)
|
|
||||||
|
|
||||||
if mi.model != nil {
|
|
||||||
for _, names := range getTableIndex(mi.addrField) {
|
|
||||||
cols := make([]string, 0, len(names))
|
|
||||||
for _, name := range names {
|
|
||||||
if fi, ok := mi.fields.GetByAny(name); ok && fi.dbcol {
|
|
||||||
cols = append(cols, fi.column)
|
|
||||||
} else {
|
|
||||||
panic(fmt.Errorf("cannot found column `%s` when parse INDEX in `%s.TableIndex`", name, mi.fullName))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sqlIndexes = append(sqlIndexes, cols)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, names := range sqlIndexes {
|
|
||||||
name := mi.table + "_" + strings.Join(names, "_")
|
|
||||||
cols := strings.Join(names, sep)
|
|
||||||
sql := fmt.Sprintf("CREATE INDEX %s%s%s ON %s%s%s (%s%s%s);", Q, name, Q, Q, mi.table, Q, Q, cols, Q)
|
|
||||||
|
|
||||||
index := dbIndex{}
|
|
||||||
index.Table = mi.table
|
|
||||||
index.Name = name
|
|
||||||
index.SQL = sql
|
|
||||||
|
|
||||||
tableIndexes[mi.table] = append(tableIndexes[mi.table], index)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetModelCache Clean model cache. Then you can re-RegisterModel.
|
|
||||||
// Common use this api for test case.
|
|
||||||
func ResetModelCache() {
|
|
||||||
defaultModelCache.clean()
|
|
||||||
}
|
|
@ -1,40 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
// RegisterModel register models
|
|
||||||
func RegisterModel(models ...interface{}) {
|
|
||||||
RegisterModelWithPrefix("", models...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterModelWithPrefix register models with a prefix
|
|
||||||
func RegisterModelWithPrefix(prefix string, models ...interface{}) {
|
|
||||||
if err := defaultModelCache.register(prefix, true, models...); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterModelWithSuffix register models with a suffix
|
|
||||||
func RegisterModelWithSuffix(suffix string, models ...interface{}) {
|
|
||||||
if err := defaultModelCache.register(suffix, false, models...); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BootStrap bootstrap models.
|
|
||||||
// make all model parsed and can not add more models
|
|
||||||
func BootStrap() {
|
|
||||||
defaultModelCache.bootstrap()
|
|
||||||
}
|
|
@ -1,485 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errSkipField = errors.New("skip field")
|
|
||||||
|
|
||||||
// field info collection
|
|
||||||
type fields struct {
|
|
||||||
pk *fieldInfo
|
|
||||||
columns map[string]*fieldInfo
|
|
||||||
fields map[string]*fieldInfo
|
|
||||||
fieldsLow map[string]*fieldInfo
|
|
||||||
fieldsByType map[int][]*fieldInfo
|
|
||||||
fieldsRel []*fieldInfo
|
|
||||||
fieldsReverse []*fieldInfo
|
|
||||||
fieldsDB []*fieldInfo
|
|
||||||
rels []*fieldInfo
|
|
||||||
orders []string
|
|
||||||
dbcols []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// add field info
|
|
||||||
func (f *fields) Add(fi *fieldInfo) (added bool) {
|
|
||||||
if f.fields[fi.name] == nil && f.columns[fi.column] == nil {
|
|
||||||
f.columns[fi.column] = fi
|
|
||||||
f.fields[fi.name] = fi
|
|
||||||
f.fieldsLow[strings.ToLower(fi.name)] = fi
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if _, ok := f.fieldsByType[fi.fieldType]; !ok {
|
|
||||||
f.fieldsByType[fi.fieldType] = make([]*fieldInfo, 0)
|
|
||||||
}
|
|
||||||
f.fieldsByType[fi.fieldType] = append(f.fieldsByType[fi.fieldType], fi)
|
|
||||||
f.orders = append(f.orders, fi.column)
|
|
||||||
if fi.dbcol {
|
|
||||||
f.dbcols = append(f.dbcols, fi.column)
|
|
||||||
f.fieldsDB = append(f.fieldsDB, fi)
|
|
||||||
}
|
|
||||||
if fi.rel {
|
|
||||||
f.fieldsRel = append(f.fieldsRel, fi)
|
|
||||||
}
|
|
||||||
if fi.reverse {
|
|
||||||
f.fieldsReverse = append(f.fieldsReverse, fi)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// get field info by name
|
|
||||||
func (f *fields) GetByName(name string) *fieldInfo {
|
|
||||||
return f.fields[name]
|
|
||||||
}
|
|
||||||
|
|
||||||
// get field info by column name
|
|
||||||
func (f *fields) GetByColumn(column string) *fieldInfo {
|
|
||||||
return f.columns[column]
|
|
||||||
}
|
|
||||||
|
|
||||||
// get field info by string, name is prior
|
|
||||||
func (f *fields) GetByAny(name string) (*fieldInfo, bool) {
|
|
||||||
if fi, ok := f.fields[name]; ok {
|
|
||||||
return fi, ok
|
|
||||||
}
|
|
||||||
if fi, ok := f.fieldsLow[strings.ToLower(name)]; ok {
|
|
||||||
return fi, ok
|
|
||||||
}
|
|
||||||
if fi, ok := f.columns[name]; ok {
|
|
||||||
return fi, ok
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new field info collection
|
|
||||||
func newFields() *fields {
|
|
||||||
f := new(fields)
|
|
||||||
f.fields = make(map[string]*fieldInfo)
|
|
||||||
f.fieldsLow = make(map[string]*fieldInfo)
|
|
||||||
f.columns = make(map[string]*fieldInfo)
|
|
||||||
f.fieldsByType = make(map[int][]*fieldInfo)
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// single field info
|
|
||||||
type fieldInfo struct {
|
|
||||||
dbcol bool // table column fk and onetoone
|
|
||||||
inModel bool
|
|
||||||
auto bool
|
|
||||||
pk bool
|
|
||||||
null bool
|
|
||||||
index bool
|
|
||||||
unique bool
|
|
||||||
colDefault bool // whether has default tag
|
|
||||||
toText bool
|
|
||||||
autoNow bool
|
|
||||||
autoNowAdd bool
|
|
||||||
rel bool // if type equal to RelForeignKey, RelOneToOne, RelManyToMany then true
|
|
||||||
reverse bool
|
|
||||||
isFielder bool // implement Fielder interface
|
|
||||||
mi *modelInfo
|
|
||||||
fieldIndex []int
|
|
||||||
fieldType int
|
|
||||||
name string
|
|
||||||
fullName string
|
|
||||||
column string
|
|
||||||
addrValue reflect.Value
|
|
||||||
sf reflect.StructField
|
|
||||||
initial StrTo // store the default value
|
|
||||||
size int
|
|
||||||
reverseField string
|
|
||||||
reverseFieldInfo *fieldInfo
|
|
||||||
reverseFieldInfoTwo *fieldInfo
|
|
||||||
reverseFieldInfoM2M *fieldInfo
|
|
||||||
relTable string
|
|
||||||
relThrough string
|
|
||||||
relThroughModelInfo *modelInfo
|
|
||||||
relModelInfo *modelInfo
|
|
||||||
digits int
|
|
||||||
decimals int
|
|
||||||
onDelete string
|
|
||||||
description string
|
|
||||||
timePrecision *int
|
|
||||||
}
|
|
||||||
|
|
||||||
// new field info
|
|
||||||
func newFieldInfo(mi *modelInfo, field reflect.Value, sf reflect.StructField, mName string) (fi *fieldInfo, err error) {
|
|
||||||
var (
|
|
||||||
tag string
|
|
||||||
tagValue string
|
|
||||||
initial StrTo // store the default value
|
|
||||||
fieldType int
|
|
||||||
attrs map[string]bool
|
|
||||||
tags map[string]string
|
|
||||||
addrField reflect.Value
|
|
||||||
)
|
|
||||||
|
|
||||||
fi = new(fieldInfo)
|
|
||||||
|
|
||||||
// if field which CanAddr is the follow type
|
|
||||||
// A value is addressable if it is an element of a slice,
|
|
||||||
// an element of an addressable array, a field of an
|
|
||||||
// addressable struct, or the result of dereferencing a pointer.
|
|
||||||
addrField = field
|
|
||||||
if field.CanAddr() && field.Kind() != reflect.Ptr {
|
|
||||||
addrField = field.Addr()
|
|
||||||
if _, ok := addrField.Interface().(Fielder); !ok {
|
|
||||||
if field.Kind() == reflect.Slice {
|
|
||||||
addrField = field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs, tags = parseStructTag(sf.Tag.Get(defaultStructTagName))
|
|
||||||
|
|
||||||
if _, ok := attrs["-"]; ok {
|
|
||||||
return nil, errSkipField
|
|
||||||
}
|
|
||||||
|
|
||||||
digits := tags["digits"]
|
|
||||||
decimals := tags["decimals"]
|
|
||||||
size := tags["size"]
|
|
||||||
onDelete := tags["on_delete"]
|
|
||||||
precision := tags["precision"]
|
|
||||||
initial.Clear()
|
|
||||||
if v, ok := tags["default"]; ok {
|
|
||||||
initial.Set(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
checkType:
|
|
||||||
switch f := addrField.Interface().(type) {
|
|
||||||
case Fielder:
|
|
||||||
fi.isFielder = true
|
|
||||||
if field.Kind() == reflect.Ptr {
|
|
||||||
err = fmt.Errorf("the model Fielder can not be use ptr")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
fieldType = f.FieldType()
|
|
||||||
if fieldType&IsRelField > 0 {
|
|
||||||
err = fmt.Errorf("unsupport type custom field, please refer to https://github.com/beego/beego/v2/blob/master/orm/models_fields.go#L24-L42")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
tag = "rel"
|
|
||||||
tagValue = tags[tag]
|
|
||||||
if tagValue != "" {
|
|
||||||
switch tagValue {
|
|
||||||
case "fk":
|
|
||||||
fieldType = RelForeignKey
|
|
||||||
break checkType
|
|
||||||
case "one":
|
|
||||||
fieldType = RelOneToOne
|
|
||||||
break checkType
|
|
||||||
case "m2m":
|
|
||||||
fieldType = RelManyToMany
|
|
||||||
if tv := tags["rel_table"]; tv != "" {
|
|
||||||
fi.relTable = tv
|
|
||||||
} else if tv := tags["rel_through"]; tv != "" {
|
|
||||||
fi.relThrough = tv
|
|
||||||
}
|
|
||||||
break checkType
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("rel only allow these value: fk, one, m2m")
|
|
||||||
goto wrongTag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tag = "reverse"
|
|
||||||
tagValue = tags[tag]
|
|
||||||
if tagValue != "" {
|
|
||||||
switch tagValue {
|
|
||||||
case "one":
|
|
||||||
fieldType = RelReverseOne
|
|
||||||
break checkType
|
|
||||||
case "many":
|
|
||||||
fieldType = RelReverseMany
|
|
||||||
if tv := tags["rel_table"]; tv != "" {
|
|
||||||
fi.relTable = tv
|
|
||||||
} else if tv := tags["rel_through"]; tv != "" {
|
|
||||||
fi.relThrough = tv
|
|
||||||
}
|
|
||||||
break checkType
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("reverse only allow these value: one, many")
|
|
||||||
goto wrongTag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldType, err = getFieldType(addrField)
|
|
||||||
if err != nil {
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
if fieldType == TypeVarCharField {
|
|
||||||
switch tags["type"] {
|
|
||||||
case "char":
|
|
||||||
fieldType = TypeCharField
|
|
||||||
case "text":
|
|
||||||
fieldType = TypeTextField
|
|
||||||
case "json":
|
|
||||||
fieldType = TypeJSONField
|
|
||||||
case "jsonb":
|
|
||||||
fieldType = TypeJsonbField
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if fieldType == TypeFloatField && (digits != "" || decimals != "") {
|
|
||||||
fieldType = TypeDecimalField
|
|
||||||
}
|
|
||||||
if fieldType == TypeDateTimeField && tags["type"] == "date" {
|
|
||||||
fieldType = TypeDateField
|
|
||||||
}
|
|
||||||
if fieldType == TypeTimeField && tags["type"] == "time" {
|
|
||||||
fieldType = TypeTimeField
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// check the rel and reverse type
|
|
||||||
// rel should Ptr
|
|
||||||
// reverse should slice []*struct
|
|
||||||
switch fieldType {
|
|
||||||
case RelForeignKey, RelOneToOne, RelReverseOne:
|
|
||||||
if field.Kind() != reflect.Ptr {
|
|
||||||
err = fmt.Errorf("rel/reverse:one field must be *%s", field.Type().Name())
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
case RelManyToMany, RelReverseMany:
|
|
||||||
if field.Kind() != reflect.Slice {
|
|
||||||
err = fmt.Errorf("rel/reverse:many field must be slice")
|
|
||||||
goto end
|
|
||||||
} else {
|
|
||||||
if field.Type().Elem().Kind() != reflect.Ptr {
|
|
||||||
err = fmt.Errorf("rel/reverse:many slice must be []*%s", field.Type().Elem().Name())
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fieldType&IsFieldType == 0 {
|
|
||||||
err = fmt.Errorf("wrong field type")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
|
|
||||||
fi.fieldType = fieldType
|
|
||||||
fi.name = sf.Name
|
|
||||||
fi.column = getColumnName(fieldType, addrField, sf, tags["column"])
|
|
||||||
fi.addrValue = addrField
|
|
||||||
fi.sf = sf
|
|
||||||
fi.fullName = mi.fullName + mName + "." + sf.Name
|
|
||||||
|
|
||||||
fi.description = tags["description"]
|
|
||||||
fi.null = attrs["null"]
|
|
||||||
fi.index = attrs["index"]
|
|
||||||
fi.auto = attrs["auto"]
|
|
||||||
fi.pk = attrs["pk"]
|
|
||||||
fi.unique = attrs["unique"]
|
|
||||||
|
|
||||||
// Mark object property if there is attribute "default" in the orm configuration
|
|
||||||
if _, ok := tags["default"]; ok {
|
|
||||||
fi.colDefault = true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fieldType {
|
|
||||||
case RelManyToMany, RelReverseMany, RelReverseOne:
|
|
||||||
fi.null = false
|
|
||||||
fi.index = false
|
|
||||||
fi.auto = false
|
|
||||||
fi.pk = false
|
|
||||||
fi.unique = false
|
|
||||||
default:
|
|
||||||
fi.dbcol = true
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fieldType {
|
|
||||||
case RelForeignKey, RelOneToOne, RelManyToMany:
|
|
||||||
fi.rel = true
|
|
||||||
if fieldType == RelOneToOne {
|
|
||||||
fi.unique = true
|
|
||||||
}
|
|
||||||
case RelReverseMany, RelReverseOne:
|
|
||||||
fi.reverse = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.rel && fi.dbcol {
|
|
||||||
switch onDelete {
|
|
||||||
case odCascade, odDoNothing:
|
|
||||||
case odSetDefault:
|
|
||||||
if !initial.Exist() {
|
|
||||||
err = errors.New("on_delete: set_default need set field a default value")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
case odSetNULL:
|
|
||||||
if !fi.null {
|
|
||||||
err = errors.New("on_delete: set_null need set field null")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if onDelete == "" {
|
|
||||||
onDelete = odCascade
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("on_delete value expected choice in `cascade,set_null,set_default,do_nothing`, unknown `%s`", onDelete)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fi.onDelete = onDelete
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fieldType {
|
|
||||||
case TypeBooleanField:
|
|
||||||
case TypeVarCharField, TypeCharField, TypeJSONField, TypeJsonbField:
|
|
||||||
if size != "" {
|
|
||||||
v, e := StrTo(size).Int32()
|
|
||||||
if e != nil {
|
|
||||||
err = fmt.Errorf("wrong size value `%s`", size)
|
|
||||||
} else {
|
|
||||||
fi.size = int(v)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fi.size = 255
|
|
||||||
fi.toText = true
|
|
||||||
}
|
|
||||||
case TypeTextField:
|
|
||||||
fi.index = false
|
|
||||||
fi.unique = false
|
|
||||||
case TypeTimeField, TypeDateField, TypeDateTimeField:
|
|
||||||
if fieldType == TypeDateTimeField {
|
|
||||||
if precision != "" {
|
|
||||||
v, e := StrTo(precision).Int()
|
|
||||||
if e != nil {
|
|
||||||
err = fmt.Errorf("convert %s to int error:%v", precision, e)
|
|
||||||
} else {
|
|
||||||
fi.timePrecision = &v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if attrs["auto_now"] {
|
|
||||||
fi.autoNow = true
|
|
||||||
} else if attrs["auto_now_add"] {
|
|
||||||
fi.autoNowAdd = true
|
|
||||||
}
|
|
||||||
case TypeFloatField:
|
|
||||||
case TypeDecimalField:
|
|
||||||
d1 := digits
|
|
||||||
d2 := decimals
|
|
||||||
v1, er1 := StrTo(d1).Int8()
|
|
||||||
v2, er2 := StrTo(d2).Int8()
|
|
||||||
if er1 != nil || er2 != nil {
|
|
||||||
err = fmt.Errorf("wrong digits/decimals value %s/%s", d2, d1)
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
fi.digits = int(v1)
|
|
||||||
fi.decimals = int(v2)
|
|
||||||
default:
|
|
||||||
switch {
|
|
||||||
case fieldType&IsIntegerField > 0:
|
|
||||||
case fieldType&IsRelField > 0:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fieldType&IsIntegerField == 0 {
|
|
||||||
if fi.auto {
|
|
||||||
err = fmt.Errorf("non-integer type cannot set auto")
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.auto || fi.pk {
|
|
||||||
if fi.auto {
|
|
||||||
switch addrField.Elem().Kind() {
|
|
||||||
case reflect.Int, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint32, reflect.Uint64:
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("auto primary key only support int, int32, int64, uint, uint32, uint64 but found `%s`", addrField.Elem().Kind())
|
|
||||||
goto end
|
|
||||||
}
|
|
||||||
fi.pk = true
|
|
||||||
}
|
|
||||||
fi.null = false
|
|
||||||
fi.index = false
|
|
||||||
fi.unique = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if fi.unique {
|
|
||||||
fi.index = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// can not set default for these type
|
|
||||||
if fi.auto || fi.pk || fi.unique || fieldType == TypeTimeField || fieldType == TypeDateField || fieldType == TypeDateTimeField {
|
|
||||||
initial.Clear()
|
|
||||||
}
|
|
||||||
|
|
||||||
if initial.Exist() {
|
|
||||||
v := initial
|
|
||||||
switch fieldType {
|
|
||||||
case TypeBooleanField:
|
|
||||||
_, err = v.Bool()
|
|
||||||
case TypeFloatField, TypeDecimalField:
|
|
||||||
_, err = v.Float64()
|
|
||||||
case TypeBitField:
|
|
||||||
_, err = v.Int8()
|
|
||||||
case TypeSmallIntegerField:
|
|
||||||
_, err = v.Int16()
|
|
||||||
case TypeIntegerField:
|
|
||||||
_, err = v.Int32()
|
|
||||||
case TypeBigIntegerField:
|
|
||||||
_, err = v.Int64()
|
|
||||||
case TypePositiveBitField:
|
|
||||||
_, err = v.Uint8()
|
|
||||||
case TypePositiveSmallIntegerField:
|
|
||||||
_, err = v.Uint16()
|
|
||||||
case TypePositiveIntegerField:
|
|
||||||
_, err = v.Uint32()
|
|
||||||
case TypePositiveBigIntegerField:
|
|
||||||
_, err = v.Uint64()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
tag, tagValue = "default", tags["default"]
|
|
||||||
goto wrongTag
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fi.initial = initial
|
|
||||||
end:
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return
|
|
||||||
wrongTag:
|
|
||||||
return nil, fmt.Errorf("wrong tag format: `%s:\"%s\"`, %s", tag, tagValue, err)
|
|
||||||
}
|
|
@ -1,148 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// single model info
|
|
||||||
type modelInfo struct {
|
|
||||||
manual bool
|
|
||||||
isThrough bool
|
|
||||||
pkg string
|
|
||||||
name string
|
|
||||||
fullName string
|
|
||||||
table string
|
|
||||||
model interface{}
|
|
||||||
fields *fields
|
|
||||||
addrField reflect.Value // store the original struct value
|
|
||||||
uniques []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// new model info
|
|
||||||
func newModelInfo(val reflect.Value) (mi *modelInfo) {
|
|
||||||
mi = &modelInfo{}
|
|
||||||
mi.fields = newFields()
|
|
||||||
ind := reflect.Indirect(val)
|
|
||||||
mi.addrField = val
|
|
||||||
mi.name = ind.Type().Name()
|
|
||||||
mi.fullName = getFullName(ind.Type())
|
|
||||||
addModelFields(mi, ind, "", []int{})
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// index: FieldByIndex returns the nested field corresponding to index
|
|
||||||
func addModelFields(mi *modelInfo, ind reflect.Value, mName string, index []int) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
fi *fieldInfo
|
|
||||||
sf reflect.StructField
|
|
||||||
)
|
|
||||||
|
|
||||||
for i := 0; i < ind.NumField(); i++ {
|
|
||||||
field := ind.Field(i)
|
|
||||||
sf = ind.Type().Field(i)
|
|
||||||
// if the field is unexported skip
|
|
||||||
if sf.PkgPath != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// add anonymous struct fields
|
|
||||||
if sf.Anonymous {
|
|
||||||
addModelFields(mi, field, mName+"."+sf.Name, append(index, i))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err = newFieldInfo(mi, field, sf, mName)
|
|
||||||
if err == errSkipField {
|
|
||||||
err = nil
|
|
||||||
continue
|
|
||||||
} else if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// record current field index
|
|
||||||
fi.fieldIndex = append(fi.fieldIndex, index...)
|
|
||||||
fi.fieldIndex = append(fi.fieldIndex, i)
|
|
||||||
fi.mi = mi
|
|
||||||
fi.inModel = true
|
|
||||||
if !mi.fields.Add(fi) {
|
|
||||||
err = fmt.Errorf("duplicate column name: %s", fi.column)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if fi.pk {
|
|
||||||
if mi.fields.pk != nil {
|
|
||||||
err = fmt.Errorf("one model must have one pk field only")
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
mi.fields.pk = fi
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(fmt.Errorf("field: %s.%s, %s", ind.Type(), sf.Name, err))
|
|
||||||
os.Exit(2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// combine related model info to new model info.
|
|
||||||
// prepare for relation models query.
|
|
||||||
func newM2MModelInfo(m1, m2 *modelInfo) (mi *modelInfo) {
|
|
||||||
mi = new(modelInfo)
|
|
||||||
mi.fields = newFields()
|
|
||||||
mi.table = m1.table + "_" + m2.table + "s"
|
|
||||||
mi.name = camelString(mi.table)
|
|
||||||
mi.fullName = m1.pkg + "." + mi.name
|
|
||||||
|
|
||||||
fa := new(fieldInfo) // pk
|
|
||||||
f1 := new(fieldInfo) // m1 table RelForeignKey
|
|
||||||
f2 := new(fieldInfo) // m2 table RelForeignKey
|
|
||||||
fa.fieldType = TypeBigIntegerField
|
|
||||||
fa.auto = true
|
|
||||||
fa.pk = true
|
|
||||||
fa.dbcol = true
|
|
||||||
fa.name = "Id"
|
|
||||||
fa.column = "id"
|
|
||||||
fa.fullName = mi.fullName + "." + fa.name
|
|
||||||
|
|
||||||
f1.dbcol = true
|
|
||||||
f2.dbcol = true
|
|
||||||
f1.fieldType = RelForeignKey
|
|
||||||
f2.fieldType = RelForeignKey
|
|
||||||
f1.name = camelString(m1.table)
|
|
||||||
f2.name = camelString(m2.table)
|
|
||||||
f1.fullName = mi.fullName + "." + f1.name
|
|
||||||
f2.fullName = mi.fullName + "." + f2.name
|
|
||||||
f1.column = m1.table + "_id"
|
|
||||||
f2.column = m2.table + "_id"
|
|
||||||
f1.rel = true
|
|
||||||
f2.rel = true
|
|
||||||
f1.relTable = m1.table
|
|
||||||
f2.relTable = m2.table
|
|
||||||
f1.relModelInfo = m1
|
|
||||||
f2.relModelInfo = m2
|
|
||||||
f1.mi = mi
|
|
||||||
f2.mi = mi
|
|
||||||
|
|
||||||
mi.fields.Add(fa)
|
|
||||||
mi.fields.Add(f1)
|
|
||||||
mi.fields.Add(f2)
|
|
||||||
mi.fields.pk = fa
|
|
||||||
|
|
||||||
mi.uniques = []string{f1.column, f2.column}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,243 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// 1 is attr
|
|
||||||
// 2 is tag
|
|
||||||
var supportTag = map[string]int{
|
|
||||||
"-": 1,
|
|
||||||
"null": 1,
|
|
||||||
"index": 1,
|
|
||||||
"unique": 1,
|
|
||||||
"pk": 1,
|
|
||||||
"auto": 1,
|
|
||||||
"auto_now": 1,
|
|
||||||
"auto_now_add": 1,
|
|
||||||
"size": 2,
|
|
||||||
"column": 2,
|
|
||||||
"default": 2,
|
|
||||||
"rel": 2,
|
|
||||||
"reverse": 2,
|
|
||||||
"rel_table": 2,
|
|
||||||
"rel_through": 2,
|
|
||||||
"digits": 2,
|
|
||||||
"decimals": 2,
|
|
||||||
"on_delete": 2,
|
|
||||||
"type": 2,
|
|
||||||
"description": 2,
|
|
||||||
"precision": 2,
|
|
||||||
}
|
|
||||||
|
|
||||||
// get reflect.Type name with package path.
|
|
||||||
func getFullName(typ reflect.Type) string {
|
|
||||||
return typ.PkgPath() + "." + typ.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// getTableName get struct table name.
|
|
||||||
// If the struct implement the TableName, then get the result as tablename
|
|
||||||
// else use the struct name which will apply snakeString.
|
|
||||||
func getTableName(val reflect.Value) string {
|
|
||||||
if fun := val.MethodByName("TableName"); fun.IsValid() {
|
|
||||||
vals := fun.Call([]reflect.Value{})
|
|
||||||
// has return and the first val is string
|
|
||||||
if len(vals) > 0 && vals[0].Kind() == reflect.String {
|
|
||||||
return vals[0].String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return snakeString(reflect.Indirect(val).Type().Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
// get table engine, myisam or innodb.
|
|
||||||
func getTableEngine(val reflect.Value) string {
|
|
||||||
fun := val.MethodByName("TableEngine")
|
|
||||||
if fun.IsValid() {
|
|
||||||
vals := fun.Call([]reflect.Value{})
|
|
||||||
if len(vals) > 0 && vals[0].Kind() == reflect.String {
|
|
||||||
return vals[0].String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// get table index from method.
|
|
||||||
func getTableIndex(val reflect.Value) [][]string {
|
|
||||||
fun := val.MethodByName("TableIndex")
|
|
||||||
if fun.IsValid() {
|
|
||||||
vals := fun.Call([]reflect.Value{})
|
|
||||||
if len(vals) > 0 && vals[0].CanInterface() {
|
|
||||||
if d, ok := vals[0].Interface().([][]string); ok {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// get table unique from method
|
|
||||||
func getTableUnique(val reflect.Value) [][]string {
|
|
||||||
fun := val.MethodByName("TableUnique")
|
|
||||||
if fun.IsValid() {
|
|
||||||
vals := fun.Call([]reflect.Value{})
|
|
||||||
if len(vals) > 0 && vals[0].CanInterface() {
|
|
||||||
if d, ok := vals[0].Interface().([][]string); ok {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// get whether the table needs to be created for the database alias
|
|
||||||
func isApplicableTableForDB(val reflect.Value, db string) bool {
|
|
||||||
if !val.IsValid() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
fun := val.MethodByName("IsApplicableTableForDB")
|
|
||||||
if fun.IsValid() {
|
|
||||||
vals := fun.Call([]reflect.Value{reflect.ValueOf(db)})
|
|
||||||
if len(vals) > 0 && vals[0].Kind() == reflect.Bool {
|
|
||||||
return vals[0].Bool()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// get snaked column name
|
|
||||||
func getColumnName(ft int, addrField reflect.Value, sf reflect.StructField, col string) string {
|
|
||||||
column := col
|
|
||||||
if col == "" {
|
|
||||||
column = nameStrategyMap[nameStrategy](sf.Name)
|
|
||||||
}
|
|
||||||
switch ft {
|
|
||||||
case RelForeignKey, RelOneToOne:
|
|
||||||
if len(col) == 0 {
|
|
||||||
column = column + "_id"
|
|
||||||
}
|
|
||||||
case RelManyToMany, RelReverseMany, RelReverseOne:
|
|
||||||
column = sf.Name
|
|
||||||
}
|
|
||||||
return column
|
|
||||||
}
|
|
||||||
|
|
||||||
// return field type as type constant from reflect.Value
|
|
||||||
func getFieldType(val reflect.Value) (ft int, err error) {
|
|
||||||
switch val.Type() {
|
|
||||||
case reflect.TypeOf(new(int8)):
|
|
||||||
ft = TypeBitField
|
|
||||||
case reflect.TypeOf(new(int16)):
|
|
||||||
ft = TypeSmallIntegerField
|
|
||||||
case reflect.TypeOf(new(int32)),
|
|
||||||
reflect.TypeOf(new(int)):
|
|
||||||
ft = TypeIntegerField
|
|
||||||
case reflect.TypeOf(new(int64)):
|
|
||||||
ft = TypeBigIntegerField
|
|
||||||
case reflect.TypeOf(new(uint8)):
|
|
||||||
ft = TypePositiveBitField
|
|
||||||
case reflect.TypeOf(new(uint16)):
|
|
||||||
ft = TypePositiveSmallIntegerField
|
|
||||||
case reflect.TypeOf(new(uint32)),
|
|
||||||
reflect.TypeOf(new(uint)):
|
|
||||||
ft = TypePositiveIntegerField
|
|
||||||
case reflect.TypeOf(new(uint64)):
|
|
||||||
ft = TypePositiveBigIntegerField
|
|
||||||
case reflect.TypeOf(new(float32)),
|
|
||||||
reflect.TypeOf(new(float64)):
|
|
||||||
ft = TypeFloatField
|
|
||||||
case reflect.TypeOf(new(bool)):
|
|
||||||
ft = TypeBooleanField
|
|
||||||
case reflect.TypeOf(new(string)):
|
|
||||||
ft = TypeVarCharField
|
|
||||||
case reflect.TypeOf(new(time.Time)):
|
|
||||||
ft = TypeDateTimeField
|
|
||||||
default:
|
|
||||||
elm := reflect.Indirect(val)
|
|
||||||
switch elm.Kind() {
|
|
||||||
case reflect.Int8:
|
|
||||||
ft = TypeBitField
|
|
||||||
case reflect.Int16:
|
|
||||||
ft = TypeSmallIntegerField
|
|
||||||
case reflect.Int32, reflect.Int:
|
|
||||||
ft = TypeIntegerField
|
|
||||||
case reflect.Int64:
|
|
||||||
ft = TypeBigIntegerField
|
|
||||||
case reflect.Uint8:
|
|
||||||
ft = TypePositiveBitField
|
|
||||||
case reflect.Uint16:
|
|
||||||
ft = TypePositiveSmallIntegerField
|
|
||||||
case reflect.Uint32, reflect.Uint:
|
|
||||||
ft = TypePositiveIntegerField
|
|
||||||
case reflect.Uint64:
|
|
||||||
ft = TypePositiveBigIntegerField
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
ft = TypeFloatField
|
|
||||||
case reflect.Bool:
|
|
||||||
ft = TypeBooleanField
|
|
||||||
case reflect.String:
|
|
||||||
ft = TypeVarCharField
|
|
||||||
default:
|
|
||||||
if elm.Interface() == nil {
|
|
||||||
panic(fmt.Errorf("%s is nil pointer, may be miss setting tag", val))
|
|
||||||
}
|
|
||||||
switch elm.Interface().(type) {
|
|
||||||
case sql.NullInt64:
|
|
||||||
ft = TypeBigIntegerField
|
|
||||||
case sql.NullFloat64:
|
|
||||||
ft = TypeFloatField
|
|
||||||
case sql.NullBool:
|
|
||||||
ft = TypeBooleanField
|
|
||||||
case sql.NullString:
|
|
||||||
ft = TypeVarCharField
|
|
||||||
case time.Time:
|
|
||||||
ft = TypeDateTimeField
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ft&IsFieldType == 0 {
|
|
||||||
err = fmt.Errorf("unsupport field type %s, may be miss setting tag", val)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse struct tag string
|
|
||||||
func parseStructTag(data string) (attrs map[string]bool, tags map[string]string) {
|
|
||||||
attrs = make(map[string]bool)
|
|
||||||
tags = make(map[string]string)
|
|
||||||
for _, v := range strings.Split(data, defaultStructTagDelim) {
|
|
||||||
if v == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
v = strings.TrimSpace(v)
|
|
||||||
if t := strings.ToLower(v); supportTag[t] == 1 {
|
|
||||||
attrs[t] = true
|
|
||||||
} else if i := strings.Index(v, "("); i > 0 && strings.Index(v, ")") == len(v)-1 {
|
|
||||||
name := t[:i]
|
|
||||||
if supportTag[name] == 2 {
|
|
||||||
v = v[i+1 : len(v)-1]
|
|
||||||
tags[name] = v
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
DebugLog.Println("unsupport orm tag", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,661 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build go1.8
|
|
||||||
// +build go1.8
|
|
||||||
|
|
||||||
// Package orm provide ORM for MySQL/PostgreSQL/sqlite
|
|
||||||
// Simple Usage
|
|
||||||
//
|
|
||||||
// package main
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "fmt"
|
|
||||||
// "github.com/beego/beego/v2/client/orm"
|
|
||||||
// _ "github.com/go-sql-driver/mysql" // import your used driver
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// // Model Struct
|
|
||||||
// type User struct {
|
|
||||||
// Id int `orm:"auto"`
|
|
||||||
// Name string `orm:"size(100)"`
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func init() {
|
|
||||||
// orm.RegisterDataBase("default", "mysql", "root:root@/my_db?charset=utf8", 30)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func main() {
|
|
||||||
// o := orm.NewOrm()
|
|
||||||
// user := User{Name: "slene"}
|
|
||||||
// // insert
|
|
||||||
// id, err := o.Insert(&user)
|
|
||||||
// // update
|
|
||||||
// user.Name = "astaxie"
|
|
||||||
// num, err := o.Update(&user)
|
|
||||||
// // read one
|
|
||||||
// u := User{Id: user.Id}
|
|
||||||
// err = o.Read(&u)
|
|
||||||
// // delete
|
|
||||||
// num, err = o.Delete(&u)
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// more docs: http://beego.vip/docs/mvc/model/overview.md
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses/order_clause"
|
|
||||||
"github.com/beego/beego/v2/client/orm/hints"
|
|
||||||
"github.com/beego/beego/v2/core/logs"
|
|
||||||
"github.com/beego/beego/v2/core/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DebugQueries define the debug
|
|
||||||
const (
|
|
||||||
DebugQueries = iota
|
|
||||||
)
|
|
||||||
|
|
||||||
// Define common vars
|
|
||||||
var (
|
|
||||||
Debug = false
|
|
||||||
DebugLog = NewLog(os.Stdout)
|
|
||||||
DefaultRowsLimit = -1
|
|
||||||
DefaultRelsDepth = 2
|
|
||||||
DefaultTimeLoc = time.Local
|
|
||||||
ErrTxDone = errors.New("<TxOrmer.Commit/Rollback> transaction already done")
|
|
||||||
ErrMultiRows = errors.New("<QuerySeter> return multi rows")
|
|
||||||
ErrNoRows = errors.New("<QuerySeter> no row found")
|
|
||||||
ErrStmtClosed = errors.New("<QuerySeter> stmt already closed")
|
|
||||||
ErrArgs = errors.New("<Ormer> args error may be empty")
|
|
||||||
ErrNotImplement = errors.New("have not implement")
|
|
||||||
|
|
||||||
ErrLastInsertIdUnavailable = errors.New("<Ormer> last insert id is unavailable")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Params stores the Params
|
|
||||||
type Params map[string]interface{}
|
|
||||||
|
|
||||||
// ParamsList stores paramslist
|
|
||||||
type ParamsList []interface{}
|
|
||||||
|
|
||||||
type ormBase struct {
|
|
||||||
alias *alias
|
|
||||||
db dbQuerier
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ DQL = new(ormBase)
|
|
||||||
_ DML = new(ormBase)
|
|
||||||
_ DriverGetter = new(ormBase)
|
|
||||||
)
|
|
||||||
|
|
||||||
// get model info and model reflect value
|
|
||||||
func (*ormBase) getMi(md interface{}) (mi *modelInfo) {
|
|
||||||
val := reflect.ValueOf(md)
|
|
||||||
ind := reflect.Indirect(val)
|
|
||||||
typ := ind.Type()
|
|
||||||
mi = getTypeMi(typ)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// get need ptr model info and model reflect value
|
|
||||||
func (*ormBase) getPtrMiInd(md interface{}) (mi *modelInfo, ind reflect.Value) {
|
|
||||||
val := reflect.ValueOf(md)
|
|
||||||
ind = reflect.Indirect(val)
|
|
||||||
typ := ind.Type()
|
|
||||||
if val.Kind() != reflect.Ptr {
|
|
||||||
panic(fmt.Errorf("<Ormer> cannot use non-ptr model struct `%s`", getFullName(typ)))
|
|
||||||
}
|
|
||||||
mi = getTypeMi(typ)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTypeMi(mdTyp reflect.Type) *modelInfo {
|
|
||||||
name := getFullName(mdTyp)
|
|
||||||
if mi, ok := defaultModelCache.getByFullName(name); ok {
|
|
||||||
return mi
|
|
||||||
}
|
|
||||||
panic(fmt.Errorf("<Ormer> table: `%s` not found, make sure it was registered with `RegisterModel()`", name))
|
|
||||||
}
|
|
||||||
|
|
||||||
// get field info from model info by given field name
|
|
||||||
func (*ormBase) getFieldInfo(mi *modelInfo, name string) *fieldInfo {
|
|
||||||
fi, ok := mi.fields.GetByAny(name)
|
|
||||||
if !ok {
|
|
||||||
panic(fmt.Errorf("<Ormer> cannot find field `%s` for model `%s`", name, mi.fullName))
|
|
||||||
}
|
|
||||||
return fi
|
|
||||||
}
|
|
||||||
|
|
||||||
// read data to model
|
|
||||||
func (o *ormBase) Read(md interface{}, cols ...string) error {
|
|
||||||
return o.ReadWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) ReadWithCtx(ctx context.Context, md interface{}, cols ...string) error {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
return o.alias.DbBaser.Read(ctx, o.db, mi, ind, o.alias.TZ, cols, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// read data to model, like Read(), but use "SELECT FOR UPDATE" form
|
|
||||||
func (o *ormBase) ReadForUpdate(md interface{}, cols ...string) error {
|
|
||||||
return o.ReadForUpdateWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) ReadForUpdateWithCtx(ctx context.Context, md interface{}, cols ...string) error {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
return o.alias.DbBaser.Read(ctx, o.db, mi, ind, o.alias.TZ, cols, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to read a row from the database, or insert one if it doesn't exist
|
|
||||||
func (o *ormBase) ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error) {
|
|
||||||
return o.ReadOrCreateWithCtx(context.Background(), md, col1, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) ReadOrCreateWithCtx(ctx context.Context, md interface{}, col1 string, cols ...string) (bool, int64, error) {
|
|
||||||
cols = append([]string{col1}, cols...)
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
err := o.alias.DbBaser.Read(ctx, o.db, mi, ind, o.alias.TZ, cols, false)
|
|
||||||
if err == ErrNoRows {
|
|
||||||
// Create
|
|
||||||
id, err := o.InsertWithCtx(ctx, md)
|
|
||||||
return err == nil, id, err
|
|
||||||
}
|
|
||||||
|
|
||||||
id, vid := int64(0), ind.FieldByIndex(mi.fields.pk.fieldIndex)
|
|
||||||
if mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
|
|
||||||
id = int64(vid.Uint())
|
|
||||||
} else if mi.fields.pk.rel {
|
|
||||||
return o.ReadOrCreateWithCtx(ctx, vid.Interface(), mi.fields.pk.relModelInfo.fields.pk.name)
|
|
||||||
} else {
|
|
||||||
id = vid.Int()
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, id, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert model data to database
|
|
||||||
func (o *ormBase) Insert(md interface{}) (int64, error) {
|
|
||||||
return o.InsertWithCtx(context.Background(), md)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) InsertWithCtx(ctx context.Context, md interface{}) (int64, error) {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
id, err := o.alias.DbBaser.Insert(ctx, o.db, mi, ind, o.alias.TZ)
|
|
||||||
if err != nil {
|
|
||||||
return id, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o.setPk(mi, ind, id)
|
|
||||||
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// set auto pk field
|
|
||||||
func (*ormBase) setPk(mi *modelInfo, ind reflect.Value, id int64) {
|
|
||||||
if mi.fields.pk.auto {
|
|
||||||
if mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
|
|
||||||
ind.FieldByIndex(mi.fields.pk.fieldIndex).SetUint(uint64(id))
|
|
||||||
} else {
|
|
||||||
ind.FieldByIndex(mi.fields.pk.fieldIndex).SetInt(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// insert some models to database
|
|
||||||
func (o *ormBase) InsertMulti(bulk int, mds interface{}) (int64, error) {
|
|
||||||
return o.InsertMultiWithCtx(context.Background(), bulk, mds)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) InsertMultiWithCtx(ctx context.Context, bulk int, mds interface{}) (int64, error) {
|
|
||||||
var cnt int64
|
|
||||||
|
|
||||||
sind := reflect.Indirect(reflect.ValueOf(mds))
|
|
||||||
|
|
||||||
switch sind.Kind() {
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if sind.Len() == 0 {
|
|
||||||
return cnt, ErrArgs
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return cnt, ErrArgs
|
|
||||||
}
|
|
||||||
|
|
||||||
if bulk <= 1 {
|
|
||||||
for i := 0; i < sind.Len(); i++ {
|
|
||||||
ind := reflect.Indirect(sind.Index(i))
|
|
||||||
mi := o.getMi(ind.Interface())
|
|
||||||
id, err := o.alias.DbBaser.Insert(ctx, o.db, mi, ind, o.alias.TZ)
|
|
||||||
if err != nil {
|
|
||||||
return cnt, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o.setPk(mi, ind, id)
|
|
||||||
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mi := o.getMi(sind.Index(0).Interface())
|
|
||||||
return o.alias.DbBaser.InsertMulti(ctx, o.db, mi, sind, bulk, o.alias.TZ)
|
|
||||||
}
|
|
||||||
return cnt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertOrUpdate data to database
|
|
||||||
func (o *ormBase) InsertOrUpdate(md interface{}, colConflictAndArgs ...string) (int64, error) {
|
|
||||||
return o.InsertOrUpdateWithCtx(context.Background(), md, colConflictAndArgs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) InsertOrUpdateWithCtx(ctx context.Context, md interface{}, colConflitAndArgs ...string) (int64, error) {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
id, err := o.alias.DbBaser.InsertOrUpdate(ctx, o.db, mi, ind, o.alias, colConflitAndArgs...)
|
|
||||||
if err != nil {
|
|
||||||
return id, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o.setPk(mi, ind, id)
|
|
||||||
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// update model to database.
|
|
||||||
// cols set the columns those want to update.
|
|
||||||
func (o *ormBase) Update(md interface{}, cols ...string) (int64, error) {
|
|
||||||
return o.UpdateWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) UpdateWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
return o.alias.DbBaser.Update(ctx, o.db, mi, ind, o.alias.TZ, cols)
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete model in database
|
|
||||||
// cols shows the delete conditions values read from. default is pk
|
|
||||||
func (o *ormBase) Delete(md interface{}, cols ...string) (int64, error) {
|
|
||||||
return o.DeleteWithCtx(context.Background(), md, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) DeleteWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error) {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
num, err := o.alias.DbBaser.Delete(ctx, o.db, mi, ind, o.alias.TZ, cols)
|
|
||||||
return num, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// create a models to models queryer
|
|
||||||
func (o *ormBase) QueryM2M(md interface{}, name string) QueryM2Mer {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
fi := o.getFieldInfo(mi, name)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case fi.fieldType == RelManyToMany:
|
|
||||||
case fi.fieldType == RelReverseMany && fi.reverseFieldInfo.mi.isThrough:
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("<Ormer.QueryM2M> model `%s` . name `%s` is not a m2m field", fi.name, mi.fullName))
|
|
||||||
}
|
|
||||||
|
|
||||||
return newQueryM2M(md, o, mi, fi, ind)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
func (o *ormBase) QueryM2MWithCtx(_ context.Context, md interface{}, name string) QueryM2Mer {
|
|
||||||
logs.Warn("QueryM2MWithCtx is DEPRECATED. Use methods with `WithCtx` suffix on QueryM2M as replacement please.")
|
|
||||||
return o.QueryM2M(md, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// load related models to md model.
|
|
||||||
// args are limit, offset int and order string.
|
|
||||||
//
|
|
||||||
// example:
|
|
||||||
// orm.LoadRelated(post,"Tags")
|
|
||||||
// for _,tag := range post.Tags{...}
|
|
||||||
//
|
|
||||||
// make sure the relation is defined in model struct tags.
|
|
||||||
func (o *ormBase) LoadRelated(md interface{}, name string, args ...utils.KV) (int64, error) {
|
|
||||||
return o.LoadRelatedWithCtx(context.Background(), md, name, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) LoadRelatedWithCtx(_ context.Context, md interface{}, name string, args ...utils.KV) (int64, error) {
|
|
||||||
_, fi, ind, qs := o.queryRelated(md, name)
|
|
||||||
|
|
||||||
var relDepth int
|
|
||||||
var limit, offset int64
|
|
||||||
var order string
|
|
||||||
|
|
||||||
kvs := utils.NewKVs(args...)
|
|
||||||
kvs.IfContains(hints.KeyRelDepth, func(value interface{}) {
|
|
||||||
if v, ok := value.(bool); ok {
|
|
||||||
if v {
|
|
||||||
relDepth = DefaultRelsDepth
|
|
||||||
}
|
|
||||||
} else if v, ok := value.(int); ok {
|
|
||||||
relDepth = v
|
|
||||||
}
|
|
||||||
}).IfContains(hints.KeyLimit, func(value interface{}) {
|
|
||||||
if v, ok := value.(int64); ok {
|
|
||||||
limit = v
|
|
||||||
}
|
|
||||||
}).IfContains(hints.KeyOffset, func(value interface{}) {
|
|
||||||
if v, ok := value.(int64); ok {
|
|
||||||
offset = v
|
|
||||||
}
|
|
||||||
}).IfContains(hints.KeyOrderBy, func(value interface{}) {
|
|
||||||
if v, ok := value.(string); ok {
|
|
||||||
order = v
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelOneToOne, RelForeignKey, RelReverseOne:
|
|
||||||
limit = 1
|
|
||||||
offset = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
qs.limit = limit
|
|
||||||
qs.offset = offset
|
|
||||||
qs.relDepth = relDepth
|
|
||||||
|
|
||||||
if len(order) > 0 {
|
|
||||||
qs.orders = order_clause.ParseOrder(order)
|
|
||||||
}
|
|
||||||
|
|
||||||
find := ind.FieldByIndex(fi.fieldIndex)
|
|
||||||
|
|
||||||
var nums int64
|
|
||||||
var err error
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelOneToOne, RelForeignKey, RelReverseOne:
|
|
||||||
val := reflect.New(find.Type().Elem())
|
|
||||||
container := val.Interface()
|
|
||||||
err = qs.One(container)
|
|
||||||
if err == nil {
|
|
||||||
find.Set(val)
|
|
||||||
nums = 1
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
nums, err = qs.All(find.Addr().Interface())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nums, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get QuerySeter for related models to md model
|
|
||||||
func (o *ormBase) queryRelated(md interface{}, name string) (*modelInfo, *fieldInfo, reflect.Value, *querySet) {
|
|
||||||
mi, ind := o.getPtrMiInd(md)
|
|
||||||
fi := o.getFieldInfo(mi, name)
|
|
||||||
|
|
||||||
_, _, exist := getExistPk(mi, ind)
|
|
||||||
if !exist {
|
|
||||||
panic(ErrMissPK)
|
|
||||||
}
|
|
||||||
|
|
||||||
var qs *querySet
|
|
||||||
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelOneToOne, RelForeignKey, RelManyToMany:
|
|
||||||
if !fi.inModel {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
qs = o.getRelQs(md, mi, fi)
|
|
||||||
case RelReverseOne, RelReverseMany:
|
|
||||||
if !fi.inModel {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
qs = o.getReverseQs(md, mi, fi)
|
|
||||||
}
|
|
||||||
|
|
||||||
if qs == nil {
|
|
||||||
panic(fmt.Errorf("<Ormer> name `%s` for model `%s` is not an available rel/reverse field", md, name))
|
|
||||||
}
|
|
||||||
|
|
||||||
return mi, fi, ind, qs
|
|
||||||
}
|
|
||||||
|
|
||||||
// get reverse relation QuerySeter
|
|
||||||
func (o *ormBase) getReverseQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet {
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelReverseOne, RelReverseMany:
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("<Ormer> name `%s` for model `%s` is not an available reverse field", fi.name, mi.fullName))
|
|
||||||
}
|
|
||||||
|
|
||||||
var q *querySet
|
|
||||||
|
|
||||||
if fi.fieldType == RelReverseMany && fi.reverseFieldInfo.mi.isThrough {
|
|
||||||
q = newQuerySet(o, fi.relModelInfo).(*querySet)
|
|
||||||
q.cond = NewCondition().And(fi.reverseFieldInfoM2M.column+ExprSep+fi.reverseFieldInfo.column, md)
|
|
||||||
} else {
|
|
||||||
q = newQuerySet(o, fi.reverseFieldInfo.mi).(*querySet)
|
|
||||||
q.cond = NewCondition().And(fi.reverseFieldInfo.column, md)
|
|
||||||
}
|
|
||||||
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// get relation QuerySeter
|
|
||||||
func (o *ormBase) getRelQs(md interface{}, mi *modelInfo, fi *fieldInfo) *querySet {
|
|
||||||
switch fi.fieldType {
|
|
||||||
case RelOneToOne, RelForeignKey, RelManyToMany:
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("<Ormer> name `%s` for model `%s` is not an available rel field", fi.name, mi.fullName))
|
|
||||||
}
|
|
||||||
|
|
||||||
q := newQuerySet(o, fi.relModelInfo).(*querySet)
|
|
||||||
q.cond = NewCondition()
|
|
||||||
|
|
||||||
if fi.fieldType == RelManyToMany {
|
|
||||||
q.cond = q.cond.And(fi.reverseFieldInfoM2M.column+ExprSep+fi.reverseFieldInfo.column, md)
|
|
||||||
} else {
|
|
||||||
q.cond = q.cond.And(fi.reverseFieldInfo.column, md)
|
|
||||||
}
|
|
||||||
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
// return a QuerySeter for table operations.
|
|
||||||
// table name can be string or struct.
|
|
||||||
// e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)),
|
|
||||||
func (o *ormBase) QueryTable(ptrStructOrTableName interface{}) (qs QuerySeter) {
|
|
||||||
var name string
|
|
||||||
if table, ok := ptrStructOrTableName.(string); ok {
|
|
||||||
name = nameStrategyMap[defaultNameStrategy](table)
|
|
||||||
if mi, ok := defaultModelCache.get(name); ok {
|
|
||||||
qs = newQuerySet(o, mi)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
name = getFullName(indirectType(reflect.TypeOf(ptrStructOrTableName)))
|
|
||||||
if mi, ok := defaultModelCache.getByFullName(name); ok {
|
|
||||||
qs = newQuerySet(o, mi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if qs == nil {
|
|
||||||
panic(fmt.Errorf("<Ormer.QueryTable> table name: `%s` not exists", name))
|
|
||||||
}
|
|
||||||
return qs
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
func (o *ormBase) QueryTableWithCtx(_ context.Context, ptrStructOrTableName interface{}) (qs QuerySeter) {
|
|
||||||
logs.Warn("QueryTableWithCtx is DEPRECATED. Use methods with `WithCtx` suffix on QuerySeter as replacement please.")
|
|
||||||
return o.QueryTable(ptrStructOrTableName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return a raw query seter for raw sql string.
|
|
||||||
func (o *ormBase) Raw(query string, args ...interface{}) RawSeter {
|
|
||||||
return o.RawWithCtx(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ormBase) RawWithCtx(_ context.Context, query string, args ...interface{}) RawSeter {
|
|
||||||
return newRawSet(o, query, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return current using database Driver
|
|
||||||
func (o *ormBase) Driver() Driver {
|
|
||||||
return driver(o.alias.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return sql.DBStats for current database
|
|
||||||
func (o *ormBase) DBStats() *sql.DBStats {
|
|
||||||
if o.alias != nil && o.alias.DB != nil {
|
|
||||||
stats := o.alias.DB.DB.Stats()
|
|
||||||
return &stats
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type orm struct {
|
|
||||||
ormBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Ormer = new(orm)
|
|
||||||
|
|
||||||
func (o *orm) Begin() (TxOrmer, error) {
|
|
||||||
return o.BeginWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) BeginWithCtx(ctx context.Context) (TxOrmer, error) {
|
|
||||||
return o.BeginWithCtxAndOpts(ctx, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) BeginWithOpts(opts *sql.TxOptions) (TxOrmer, error) {
|
|
||||||
return o.BeginWithCtxAndOpts(context.Background(), opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) BeginWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions) (TxOrmer, error) {
|
|
||||||
tx, err := o.db.(txer).BeginTx(ctx, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
_txOrm := &txOrm{
|
|
||||||
ormBase: ormBase{
|
|
||||||
alias: o.alias,
|
|
||||||
db: &TxDB{tx: tx},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if Debug {
|
|
||||||
_txOrm.db = newDbQueryLog(o.alias, _txOrm.db)
|
|
||||||
}
|
|
||||||
|
|
||||||
var taskTxOrm TxOrmer = _txOrm
|
|
||||||
return taskTxOrm, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) DoTx(task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return o.DoTxWithCtx(context.Background(), task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) DoTxWithCtx(ctx context.Context, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return o.DoTxWithCtxAndOpts(ctx, nil, task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) DoTxWithOpts(opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return o.DoTxWithCtxAndOpts(context.Background(), opts, task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *orm) DoTxWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
return doTxTemplate(ctx, o, opts, task)
|
|
||||||
}
|
|
||||||
|
|
||||||
func doTxTemplate(ctx context.Context, o TxBeginner, opts *sql.TxOptions,
|
|
||||||
task func(ctx context.Context, txOrm TxOrmer) error) error {
|
|
||||||
_txOrm, err := o.BeginWithCtxAndOpts(ctx, opts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
panicked := true
|
|
||||||
defer func() {
|
|
||||||
if panicked || err != nil {
|
|
||||||
e := _txOrm.Rollback()
|
|
||||||
if e != nil {
|
|
||||||
logs.Error("rollback transaction failed: %v,%v", e, panicked)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
e := _txOrm.Commit()
|
|
||||||
if e != nil {
|
|
||||||
logs.Error("commit transaction failed: %v,%v", e, panicked)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
taskTxOrm := _txOrm
|
|
||||||
err = task(ctx, taskTxOrm)
|
|
||||||
panicked = false
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type txOrm struct {
|
|
||||||
ormBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ TxOrmer = new(txOrm)
|
|
||||||
|
|
||||||
func (t *txOrm) Commit() error {
|
|
||||||
return t.db.(txEnder).Commit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *txOrm) Rollback() error {
|
|
||||||
return t.db.(txEnder).Rollback()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *txOrm) RollbackUnlessCommit() error {
|
|
||||||
return t.db.(txEnder).RollbackUnlessCommit()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOrm create new orm
|
|
||||||
func NewOrm() Ormer {
|
|
||||||
BootStrap() // execute only once
|
|
||||||
return NewOrmUsingDB(`default`)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOrmUsingDB create new orm with the name
|
|
||||||
func NewOrmUsingDB(aliasName string) Ormer {
|
|
||||||
if al, ok := dataBaseCache.get(aliasName); ok {
|
|
||||||
return newDBWithAlias(al)
|
|
||||||
}
|
|
||||||
panic(fmt.Errorf("<Ormer.Using> unknown db alias name `%s`", aliasName))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOrmWithDB create a new ormer object with specify *sql.DB for query
|
|
||||||
func NewOrmWithDB(driverName, aliasName string, db *sql.DB, params ...DBOption) (Ormer, error) {
|
|
||||||
al, err := newAliasWithDb(aliasName, driverName, db, params...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newDBWithAlias(al), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDBWithAlias(al *alias) Ormer {
|
|
||||||
o := new(orm)
|
|
||||||
o.alias = al
|
|
||||||
|
|
||||||
if Debug {
|
|
||||||
o.db = newDbQueryLog(al, al.DB)
|
|
||||||
} else {
|
|
||||||
o.db = al.DB
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(globalFilterChains) > 0 {
|
|
||||||
return NewFilterOrmDecorator(o, globalFilterChains...)
|
|
||||||
}
|
|
||||||
return o
|
|
||||||
}
|
|
@ -1,160 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ExprSep define the expression separation
|
|
||||||
const (
|
|
||||||
ExprSep = clauses.ExprSep
|
|
||||||
)
|
|
||||||
|
|
||||||
type condValue struct {
|
|
||||||
exprs []string
|
|
||||||
args []interface{}
|
|
||||||
cond *Condition
|
|
||||||
isOr bool
|
|
||||||
isNot bool
|
|
||||||
isCond bool
|
|
||||||
isRaw bool
|
|
||||||
sql string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Condition struct.
|
|
||||||
// work for WHERE conditions.
|
|
||||||
type Condition struct {
|
|
||||||
params []condValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCondition return new condition struct
|
|
||||||
func NewCondition() *Condition {
|
|
||||||
c := &Condition{}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Raw add raw sql to condition
|
|
||||||
func (c Condition) Raw(expr string, sql string) *Condition {
|
|
||||||
if len(sql) == 0 {
|
|
||||||
panic(fmt.Errorf("<Condition.Raw> sql cannot empty"))
|
|
||||||
}
|
|
||||||
c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), sql: sql, isRaw: true})
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
|
|
||||||
// And add expression to condition
|
|
||||||
func (c Condition) And(expr string, args ...interface{}) *Condition {
|
|
||||||
if expr == "" || len(args) == 0 {
|
|
||||||
panic(fmt.Errorf("<Condition.And> args cannot empty"))
|
|
||||||
}
|
|
||||||
c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args})
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
|
|
||||||
// AndNot add NOT expression to condition
|
|
||||||
func (c Condition) AndNot(expr string, args ...interface{}) *Condition {
|
|
||||||
if expr == "" || len(args) == 0 {
|
|
||||||
panic(fmt.Errorf("<Condition.AndNot> args cannot empty"))
|
|
||||||
}
|
|
||||||
c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isNot: true})
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
|
|
||||||
// AndCond combine a condition to current condition
|
|
||||||
func (c *Condition) AndCond(cond *Condition) *Condition {
|
|
||||||
if c == cond {
|
|
||||||
panic(fmt.Errorf("<Condition.AndCond> cannot use self as sub cond"))
|
|
||||||
}
|
|
||||||
|
|
||||||
c = c.clone()
|
|
||||||
|
|
||||||
if cond != nil {
|
|
||||||
c.params = append(c.params, condValue{cond: cond, isCond: true})
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// AndNotCond combine an AND NOT condition to current condition
|
|
||||||
func (c *Condition) AndNotCond(cond *Condition) *Condition {
|
|
||||||
c = c.clone()
|
|
||||||
if c == cond {
|
|
||||||
panic(fmt.Errorf("<Condition.AndNotCond> cannot use self as sub cond"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cond != nil {
|
|
||||||
c.params = append(c.params, condValue{cond: cond, isCond: true, isNot: true})
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or add OR expression to condition
|
|
||||||
func (c Condition) Or(expr string, args ...interface{}) *Condition {
|
|
||||||
if expr == "" || len(args) == 0 {
|
|
||||||
panic(fmt.Errorf("<Condition.Or> args cannot empty"))
|
|
||||||
}
|
|
||||||
c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isOr: true})
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrNot add OR NOT expression to condition
|
|
||||||
func (c Condition) OrNot(expr string, args ...interface{}) *Condition {
|
|
||||||
if expr == "" || len(args) == 0 {
|
|
||||||
panic(fmt.Errorf("<Condition.OrNot> args cannot empty"))
|
|
||||||
}
|
|
||||||
c.params = append(c.params, condValue{exprs: strings.Split(expr, ExprSep), args: args, isNot: true, isOr: true})
|
|
||||||
return &c
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrCond combine an OR condition to current condition
|
|
||||||
func (c *Condition) OrCond(cond *Condition) *Condition {
|
|
||||||
c = c.clone()
|
|
||||||
if c == cond {
|
|
||||||
panic(fmt.Errorf("<Condition.OrCond> cannot use self as sub cond"))
|
|
||||||
}
|
|
||||||
if cond != nil {
|
|
||||||
c.params = append(c.params, condValue{cond: cond, isCond: true, isOr: true})
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrNotCond combine an OR NOT condition to current condition
|
|
||||||
func (c *Condition) OrNotCond(cond *Condition) *Condition {
|
|
||||||
c = c.clone()
|
|
||||||
if c == cond {
|
|
||||||
panic(fmt.Errorf("<Condition.OrNotCond> cannot use self as sub cond"))
|
|
||||||
}
|
|
||||||
|
|
||||||
if cond != nil {
|
|
||||||
c.params = append(c.params, condValue{cond: cond, isCond: true, isNot: true, isOr: true})
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEmpty check the condition arguments are empty or not.
|
|
||||||
func (c *Condition) IsEmpty() bool {
|
|
||||||
return len(c.params) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// clone clone a condition
|
|
||||||
func (c Condition) clone() *Condition {
|
|
||||||
params := make([]condValue, len(c.params))
|
|
||||||
copy(params, c.params)
|
|
||||||
c.params = params
|
|
||||||
return &c
|
|
||||||
}
|
|
@ -1,228 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Log implement the log.Logger
|
|
||||||
type Log struct {
|
|
||||||
*log.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// costomer log func
|
|
||||||
var LogFunc func(query map[string]interface{})
|
|
||||||
|
|
||||||
// NewLog set io.Writer to create a Logger.
|
|
||||||
func NewLog(out io.Writer) *Log {
|
|
||||||
d := new(Log)
|
|
||||||
d.Logger = log.New(out, "[ORM]", log.LstdFlags)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
func debugLogQueies(alias *alias, operaton, query string, t time.Time, err error, args ...interface{}) {
|
|
||||||
logMap := make(map[string]interface{})
|
|
||||||
sub := time.Since(t) / 1e5
|
|
||||||
elsp := float64(int(sub)) / 10.0
|
|
||||||
logMap["cost_time"] = elsp
|
|
||||||
flag := " OK"
|
|
||||||
if err != nil {
|
|
||||||
flag = "FAIL"
|
|
||||||
}
|
|
||||||
logMap["flag"] = flag
|
|
||||||
con := fmt.Sprintf(" -[Queries/%s] - [%s / %11s / %7.1fms] - [%s]", alias.Name, flag, operaton, elsp, query)
|
|
||||||
cons := make([]string, 0, len(args))
|
|
||||||
for _, arg := range args {
|
|
||||||
cons = append(cons, fmt.Sprintf("%v", arg))
|
|
||||||
}
|
|
||||||
if len(cons) > 0 {
|
|
||||||
con += fmt.Sprintf(" - `%s`", strings.Join(cons, "`, `"))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
con += " - " + err.Error()
|
|
||||||
}
|
|
||||||
logMap["sql"] = fmt.Sprintf("%s-`%s`", query, strings.Join(cons, "`, `"))
|
|
||||||
if LogFunc != nil {
|
|
||||||
LogFunc(logMap)
|
|
||||||
}
|
|
||||||
DebugLog.Println(con)
|
|
||||||
}
|
|
||||||
|
|
||||||
// statement query logger struct.
|
|
||||||
// if dev mode, use stmtQueryLog, or use stmtQuerier.
|
|
||||||
type stmtQueryLog struct {
|
|
||||||
alias *alias
|
|
||||||
query string
|
|
||||||
stmt stmtQuerier
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ stmtQuerier = new(stmtQueryLog)
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) Close() error {
|
|
||||||
a := time.Now()
|
|
||||||
err := d.stmt.Close()
|
|
||||||
debugLogQueies(d.alias, "st.Close", d.query, a, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) Exec(args ...interface{}) (sql.Result, error) {
|
|
||||||
return d.ExecContext(context.Background(), args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error) {
|
|
||||||
a := time.Now()
|
|
||||||
res, err := d.stmt.ExecContext(ctx, args...)
|
|
||||||
debugLogQueies(d.alias, "st.Exec", d.query, a, err, args...)
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) Query(args ...interface{}) (*sql.Rows, error) {
|
|
||||||
return d.QueryContext(context.Background(), args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) QueryContext(ctx context.Context, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
a := time.Now()
|
|
||||||
res, err := d.stmt.QueryContext(ctx, args...)
|
|
||||||
debugLogQueies(d.alias, "st.Query", d.query, a, err, args...)
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) QueryRow(args ...interface{}) *sql.Row {
|
|
||||||
return d.QueryRowContext(context.Background(), args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *stmtQueryLog) QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row {
|
|
||||||
a := time.Now()
|
|
||||||
res := d.stmt.QueryRow(args...)
|
|
||||||
debugLogQueies(d.alias, "st.QueryRow", d.query, a, nil, args...)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStmtQueryLog(alias *alias, stmt stmtQuerier, query string) stmtQuerier {
|
|
||||||
d := new(stmtQueryLog)
|
|
||||||
d.stmt = stmt
|
|
||||||
d.alias = alias
|
|
||||||
d.query = query
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// database query logger struct.
|
|
||||||
// if dev mode, use dbQueryLog, or use dbQuerier.
|
|
||||||
type dbQueryLog struct {
|
|
||||||
alias *alias
|
|
||||||
db dbQuerier
|
|
||||||
tx txer
|
|
||||||
txe txEnder
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
_ dbQuerier = new(dbQueryLog)
|
|
||||||
_ txer = new(dbQueryLog)
|
|
||||||
_ txEnder = new(dbQueryLog)
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d *dbQueryLog) Prepare(query string) (*sql.Stmt, error) {
|
|
||||||
return d.PrepareContext(context.Background(), query)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) {
|
|
||||||
a := time.Now()
|
|
||||||
stmt, err := d.db.PrepareContext(ctx, query)
|
|
||||||
debugLogQueies(d.alias, "db.Prepare", query, a, err)
|
|
||||||
return stmt, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) Exec(query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
return d.ExecContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
|
|
||||||
a := time.Now()
|
|
||||||
res, err := d.db.ExecContext(ctx, query, args...)
|
|
||||||
debugLogQueies(d.alias, "db.Exec", query, a, err, args...)
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
return d.QueryContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
|
|
||||||
a := time.Now()
|
|
||||||
res, err := d.db.QueryContext(ctx, query, args...)
|
|
||||||
debugLogQueies(d.alias, "db.Query", query, a, err, args...)
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) QueryRow(query string, args ...interface{}) *sql.Row {
|
|
||||||
return d.QueryRowContext(context.Background(), query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
|
||||||
a := time.Now()
|
|
||||||
res := d.db.QueryRowContext(ctx, query, args...)
|
|
||||||
debugLogQueies(d.alias, "db.QueryRow", query, a, nil, args...)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) Begin() (*sql.Tx, error) {
|
|
||||||
return d.BeginTx(context.Background(), nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error) {
|
|
||||||
a := time.Now()
|
|
||||||
tx, err := d.db.(txer).BeginTx(ctx, opts)
|
|
||||||
debugLogQueies(d.alias, "db.BeginTx", "START TRANSACTION", a, err)
|
|
||||||
return tx, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) Commit() error {
|
|
||||||
a := time.Now()
|
|
||||||
err := d.db.(txEnder).Commit()
|
|
||||||
debugLogQueies(d.alias, "tx.Commit", "COMMIT", a, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) Rollback() error {
|
|
||||||
a := time.Now()
|
|
||||||
err := d.db.(txEnder).Rollback()
|
|
||||||
debugLogQueies(d.alias, "tx.Rollback", "ROLLBACK", a, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) RollbackUnlessCommit() error {
|
|
||||||
a := time.Now()
|
|
||||||
err := d.db.(txEnder).RollbackUnlessCommit()
|
|
||||||
debugLogQueies(d.alias, "tx.RollbackUnlessCommit", "ROLLBACK UNLESS COMMIT", a, err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *dbQueryLog) SetDB(db dbQuerier) {
|
|
||||||
d.db = db
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDbQueryLog(alias *alias, db dbQuerier) dbQuerier {
|
|
||||||
d := new(dbQueryLog)
|
|
||||||
d.alias = alias
|
|
||||||
d.db = db
|
|
||||||
return d
|
|
||||||
}
|
|
@ -1,92 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// an insert queryer struct
|
|
||||||
type insertSet struct {
|
|
||||||
mi *modelInfo
|
|
||||||
orm *ormBase
|
|
||||||
stmt stmtQuerier
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ Inserter = new(insertSet)
|
|
||||||
|
|
||||||
// insert model ignore it's registered or not.
|
|
||||||
func (o *insertSet) Insert(md interface{}) (int64, error) {
|
|
||||||
return o.InsertWithCtx(context.Background(), md)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *insertSet) InsertWithCtx(ctx context.Context, md interface{}) (int64, error) {
|
|
||||||
if o.closed {
|
|
||||||
return 0, ErrStmtClosed
|
|
||||||
}
|
|
||||||
val := reflect.ValueOf(md)
|
|
||||||
ind := reflect.Indirect(val)
|
|
||||||
typ := ind.Type()
|
|
||||||
name := getFullName(typ)
|
|
||||||
if val.Kind() != reflect.Ptr {
|
|
||||||
panic(fmt.Errorf("<Inserter.Insert> cannot use non-ptr model struct `%s`", name))
|
|
||||||
}
|
|
||||||
if name != o.mi.fullName {
|
|
||||||
panic(fmt.Errorf("<Inserter.Insert> need model `%s` but found `%s`", o.mi.fullName, name))
|
|
||||||
}
|
|
||||||
id, err := o.orm.alias.DbBaser.InsertStmt(ctx, o.stmt, o.mi, ind, o.orm.alias.TZ)
|
|
||||||
if err != nil {
|
|
||||||
return id, err
|
|
||||||
}
|
|
||||||
if id > 0 {
|
|
||||||
if o.mi.fields.pk.auto {
|
|
||||||
if o.mi.fields.pk.fieldType&IsPositiveIntegerField > 0 {
|
|
||||||
ind.FieldByIndex(o.mi.fields.pk.fieldIndex).SetUint(uint64(id))
|
|
||||||
} else {
|
|
||||||
ind.FieldByIndex(o.mi.fields.pk.fieldIndex).SetInt(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// close insert queryer statement
|
|
||||||
func (o *insertSet) Close() error {
|
|
||||||
if o.closed {
|
|
||||||
return ErrStmtClosed
|
|
||||||
}
|
|
||||||
o.closed = true
|
|
||||||
return o.stmt.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new insert queryer.
|
|
||||||
func newInsertSet(ctx context.Context, orm *ormBase, mi *modelInfo) (Inserter, error) {
|
|
||||||
bi := new(insertSet)
|
|
||||||
bi.orm = orm
|
|
||||||
bi.mi = mi
|
|
||||||
st, query, err := orm.alias.DbBaser.PrepareInsert(ctx, orm.db, mi)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if Debug {
|
|
||||||
bi.stmt = newStmtQueryLog(orm.alias, st, query)
|
|
||||||
} else {
|
|
||||||
bi.stmt = st
|
|
||||||
}
|
|
||||||
return bi, nil
|
|
||||||
}
|
|
@ -1,163 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// model to model struct
|
|
||||||
type queryM2M struct {
|
|
||||||
md interface{}
|
|
||||||
mi *modelInfo
|
|
||||||
fi *fieldInfo
|
|
||||||
qs *querySet
|
|
||||||
ind reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// add models to origin models when creating queryM2M.
|
|
||||||
// example:
|
|
||||||
// m2m := orm.QueryM2M(post,"Tag")
|
|
||||||
// m2m.Add(&Tag1{},&Tag2{})
|
|
||||||
// for _,tag := range post.Tags{}
|
|
||||||
//
|
|
||||||
// make sure the relation is defined in post model struct tag.
|
|
||||||
func (o *queryM2M) Add(mds ...interface{}) (int64, error) {
|
|
||||||
return o.AddWithCtx(context.Background(), mds...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *queryM2M) AddWithCtx(ctx context.Context, mds ...interface{}) (int64, error) {
|
|
||||||
fi := o.fi
|
|
||||||
mi := fi.relThroughModelInfo
|
|
||||||
mfi := fi.reverseFieldInfo
|
|
||||||
rfi := fi.reverseFieldInfoTwo
|
|
||||||
|
|
||||||
orm := o.qs.orm
|
|
||||||
dbase := orm.alias.DbBaser
|
|
||||||
|
|
||||||
var models []interface{}
|
|
||||||
var otherValues []interface{}
|
|
||||||
var otherNames []string
|
|
||||||
|
|
||||||
for _, colname := range mi.fields.dbcols {
|
|
||||||
if colname != mfi.column && colname != rfi.column && colname != fi.mi.fields.pk.column &&
|
|
||||||
mi.fields.columns[colname] != mi.fields.pk {
|
|
||||||
otherNames = append(otherNames, colname)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, md := range mds {
|
|
||||||
if reflect.Indirect(reflect.ValueOf(md)).Kind() != reflect.Struct && i > 0 {
|
|
||||||
otherValues = append(otherValues, md)
|
|
||||||
mds = append(mds[:i], mds[i+1:]...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, md := range mds {
|
|
||||||
val := reflect.ValueOf(md)
|
|
||||||
if val.Kind() == reflect.Slice || val.Kind() == reflect.Array {
|
|
||||||
for i := 0; i < val.Len(); i++ {
|
|
||||||
v := val.Index(i)
|
|
||||||
if v.CanInterface() {
|
|
||||||
models = append(models, v.Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
models = append(models, md)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, v1, exist := getExistPk(o.mi, o.ind)
|
|
||||||
if !exist {
|
|
||||||
panic(ErrMissPK)
|
|
||||||
}
|
|
||||||
|
|
||||||
names := []string{mfi.column, rfi.column}
|
|
||||||
|
|
||||||
values := make([]interface{}, 0, len(models)*2)
|
|
||||||
for _, md := range models {
|
|
||||||
|
|
||||||
ind := reflect.Indirect(reflect.ValueOf(md))
|
|
||||||
var v2 interface{}
|
|
||||||
if ind.Kind() != reflect.Struct {
|
|
||||||
v2 = ind.Interface()
|
|
||||||
} else {
|
|
||||||
_, v2, exist = getExistPk(fi.relModelInfo, ind)
|
|
||||||
if !exist {
|
|
||||||
panic(ErrMissPK)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
values = append(values, v1, v2)
|
|
||||||
|
|
||||||
}
|
|
||||||
names = append(names, otherNames...)
|
|
||||||
values = append(values, otherValues...)
|
|
||||||
return dbase.InsertValue(ctx, orm.db, mi, true, names, values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// remove models following the origin model relationship
|
|
||||||
func (o *queryM2M) Remove(mds ...interface{}) (int64, error) {
|
|
||||||
return o.RemoveWithCtx(context.Background(), mds...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *queryM2M) RemoveWithCtx(ctx context.Context, mds ...interface{}) (int64, error) {
|
|
||||||
fi := o.fi
|
|
||||||
qs := o.qs.Filter(fi.reverseFieldInfo.name, o.md)
|
|
||||||
|
|
||||||
return qs.Filter(fi.reverseFieldInfoTwo.name+ExprSep+"in", mds).Delete()
|
|
||||||
}
|
|
||||||
|
|
||||||
// check model is existed in relationship of origin model
|
|
||||||
func (o *queryM2M) Exist(md interface{}) bool {
|
|
||||||
return o.ExistWithCtx(context.Background(), md)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *queryM2M) ExistWithCtx(ctx context.Context, md interface{}) bool {
|
|
||||||
fi := o.fi
|
|
||||||
return o.qs.Filter(fi.reverseFieldInfo.name, o.md).
|
|
||||||
Filter(fi.reverseFieldInfoTwo.name, md).ExistWithCtx(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean all models in related of origin model
|
|
||||||
func (o *queryM2M) Clear() (int64, error) {
|
|
||||||
return o.ClearWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *queryM2M) ClearWithCtx(ctx context.Context) (int64, error) {
|
|
||||||
fi := o.fi
|
|
||||||
return o.qs.Filter(fi.reverseFieldInfo.name, o.md).DeleteWithCtx(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// count all related models of origin model
|
|
||||||
func (o *queryM2M) Count() (int64, error) {
|
|
||||||
return o.CountWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *queryM2M) CountWithCtx(ctx context.Context) (int64, error) {
|
|
||||||
fi := o.fi
|
|
||||||
return o.qs.Filter(fi.reverseFieldInfo.name, o.md).CountWithCtx(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ QueryM2Mer = new(queryM2M)
|
|
||||||
|
|
||||||
// create new M2M queryer.
|
|
||||||
func newQueryM2M(md interface{}, o *ormBase, mi *modelInfo, fi *fieldInfo, ind reflect.Value) QueryM2Mer {
|
|
||||||
qm2m := new(queryM2M)
|
|
||||||
qm2m.md = md
|
|
||||||
qm2m.mi = mi
|
|
||||||
qm2m.fi = fi
|
|
||||||
qm2m.ind = ind
|
|
||||||
qm2m.qs = newQuerySet(o, fi.relThroughModelInfo).(*querySet)
|
|
||||||
return qm2m
|
|
||||||
}
|
|
@ -1,376 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses/order_clause"
|
|
||||||
"github.com/beego/beego/v2/client/orm/hints"
|
|
||||||
)
|
|
||||||
|
|
||||||
type colValue struct {
|
|
||||||
value int64
|
|
||||||
opt operator
|
|
||||||
}
|
|
||||||
|
|
||||||
type operator int
|
|
||||||
|
|
||||||
// define Col operations
|
|
||||||
const (
|
|
||||||
ColAdd operator = iota
|
|
||||||
ColMinus
|
|
||||||
ColMultiply
|
|
||||||
ColExcept
|
|
||||||
ColBitAnd
|
|
||||||
ColBitRShift
|
|
||||||
ColBitLShift
|
|
||||||
ColBitXOR
|
|
||||||
ColBitOr
|
|
||||||
)
|
|
||||||
|
|
||||||
// ColValue do the field raw changes. e.g Nums = Nums + 10. usage:
|
|
||||||
// Params{
|
|
||||||
// "Nums": ColValue(Col_Add, 10),
|
|
||||||
// }
|
|
||||||
func ColValue(opt operator, value interface{}) interface{} {
|
|
||||||
switch opt {
|
|
||||||
case ColAdd, ColMinus, ColMultiply, ColExcept, ColBitAnd, ColBitRShift,
|
|
||||||
ColBitLShift, ColBitXOR, ColBitOr:
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("orm.ColValue wrong operator"))
|
|
||||||
}
|
|
||||||
v, err := StrTo(ToStr(value)).Int64()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("orm.ColValue doesn't support non string/numeric type, %s", err))
|
|
||||||
}
|
|
||||||
var val colValue
|
|
||||||
val.value = v
|
|
||||||
val.opt = opt
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// real query struct
|
|
||||||
type querySet struct {
|
|
||||||
mi *modelInfo
|
|
||||||
cond *Condition
|
|
||||||
related []string
|
|
||||||
relDepth int
|
|
||||||
limit int64
|
|
||||||
offset int64
|
|
||||||
groups []string
|
|
||||||
orders []*order_clause.Order
|
|
||||||
distinct bool
|
|
||||||
forUpdate bool
|
|
||||||
useIndex int
|
|
||||||
indexes []string
|
|
||||||
orm *ormBase
|
|
||||||
aggregate string
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ QuerySeter = new(querySet)
|
|
||||||
|
|
||||||
// add condition expression to QuerySeter.
|
|
||||||
func (o querySet) Filter(expr string, args ...interface{}) QuerySeter {
|
|
||||||
if o.cond == nil {
|
|
||||||
o.cond = NewCondition()
|
|
||||||
}
|
|
||||||
o.cond = o.cond.And(expr, args...)
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add raw sql to querySeter.
|
|
||||||
func (o querySet) FilterRaw(expr string, sql string) QuerySeter {
|
|
||||||
if o.cond == nil {
|
|
||||||
o.cond = NewCondition()
|
|
||||||
}
|
|
||||||
o.cond = o.cond.Raw(expr, sql)
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add NOT condition to querySeter.
|
|
||||||
func (o querySet) Exclude(expr string, args ...interface{}) QuerySeter {
|
|
||||||
if o.cond == nil {
|
|
||||||
o.cond = NewCondition()
|
|
||||||
}
|
|
||||||
o.cond = o.cond.AndNot(expr, args...)
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// set offset number
|
|
||||||
func (o *querySet) setOffset(num interface{}) {
|
|
||||||
o.offset = ToInt64(num)
|
|
||||||
}
|
|
||||||
|
|
||||||
// add LIMIT value.
|
|
||||||
// args[0] means offset, e.g. LIMIT num,offset.
|
|
||||||
func (o querySet) Limit(limit interface{}, args ...interface{}) QuerySeter {
|
|
||||||
o.limit = ToInt64(limit)
|
|
||||||
if len(args) > 0 {
|
|
||||||
o.setOffset(args[0])
|
|
||||||
}
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add OFFSET value
|
|
||||||
func (o querySet) Offset(offset interface{}) QuerySeter {
|
|
||||||
o.setOffset(offset)
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add GROUP expression
|
|
||||||
func (o querySet) GroupBy(exprs ...string) QuerySeter {
|
|
||||||
o.groups = exprs
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add ORDER expression.
|
|
||||||
// "column" means ASC, "-column" means DESC.
|
|
||||||
func (o querySet) OrderBy(expressions ...string) QuerySeter {
|
|
||||||
if len(expressions) <= 0 {
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
o.orders = order_clause.ParseOrder(expressions...)
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add ORDER expression.
|
|
||||||
func (o querySet) OrderClauses(orders ...*order_clause.Order) QuerySeter {
|
|
||||||
if len(orders) <= 0 {
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
o.orders = orders
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add DISTINCT to SELECT
|
|
||||||
func (o querySet) Distinct() QuerySeter {
|
|
||||||
o.distinct = true
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// add FOR UPDATE to SELECT
|
|
||||||
func (o querySet) ForUpdate() QuerySeter {
|
|
||||||
o.forUpdate = true
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForceIndex force index for query
|
|
||||||
func (o querySet) ForceIndex(indexes ...string) QuerySeter {
|
|
||||||
o.useIndex = hints.KeyForceIndex
|
|
||||||
o.indexes = indexes
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// UseIndex use index for query
|
|
||||||
func (o querySet) UseIndex(indexes ...string) QuerySeter {
|
|
||||||
o.useIndex = hints.KeyUseIndex
|
|
||||||
o.indexes = indexes
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// IgnoreIndex ignore index for query
|
|
||||||
func (o querySet) IgnoreIndex(indexes ...string) QuerySeter {
|
|
||||||
o.useIndex = hints.KeyIgnoreIndex
|
|
||||||
o.indexes = indexes
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// set relation model to query together.
|
|
||||||
// it will query relation models and assign to parent model.
|
|
||||||
func (o querySet) RelatedSel(params ...interface{}) QuerySeter {
|
|
||||||
if len(params) == 0 {
|
|
||||||
o.relDepth = DefaultRelsDepth
|
|
||||||
} else {
|
|
||||||
for _, p := range params {
|
|
||||||
switch val := p.(type) {
|
|
||||||
case string:
|
|
||||||
o.related = append(o.related, val)
|
|
||||||
case int:
|
|
||||||
o.relDepth = val
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("<QuerySeter.RelatedSel> wrong param kind: %v", val))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// set condition to QuerySeter.
|
|
||||||
func (o querySet) SetCond(cond *Condition) QuerySeter {
|
|
||||||
o.cond = cond
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// get condition from QuerySeter
|
|
||||||
func (o querySet) GetCond() *Condition {
|
|
||||||
return o.cond
|
|
||||||
}
|
|
||||||
|
|
||||||
// return QuerySeter execution result number
|
|
||||||
func (o *querySet) Count() (int64, error) {
|
|
||||||
return o.CountWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) CountWithCtx(ctx context.Context) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.Count(ctx, o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// check result empty or not after QuerySeter executed
|
|
||||||
func (o *querySet) Exist() bool {
|
|
||||||
return o.ExistWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) ExistWithCtx(ctx context.Context) bool {
|
|
||||||
cnt, _ := o.orm.alias.DbBaser.Count(ctx, o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ)
|
|
||||||
return cnt > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute update with parameters
|
|
||||||
func (o *querySet) Update(values Params) (int64, error) {
|
|
||||||
return o.UpdateWithCtx(context.Background(), values)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) UpdateWithCtx(ctx context.Context, values Params) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.UpdateBatch(ctx, o.orm.db, o, o.mi, o.cond, values, o.orm.alias.TZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute delete
|
|
||||||
func (o *querySet) Delete() (int64, error) {
|
|
||||||
return o.DeleteWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) DeleteWithCtx(ctx context.Context) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.DeleteBatch(ctx, o.orm.db, o, o.mi, o.cond, o.orm.alias.TZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return an insert queryer.
|
|
||||||
// it can be used in times.
|
|
||||||
// example:
|
|
||||||
// i,err := sq.PrepareInsert()
|
|
||||||
// i.Add(&user1{},&user2{})
|
|
||||||
func (o *querySet) PrepareInsert() (Inserter, error) {
|
|
||||||
return o.PrepareInsertWithCtx(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) PrepareInsertWithCtx(ctx context.Context) (Inserter, error) {
|
|
||||||
return newInsertSet(ctx, o.orm, o.mi)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all data and map to containers.
|
|
||||||
// cols means the columns when querying.
|
|
||||||
func (o *querySet) All(container interface{}, cols ...string) (int64, error) {
|
|
||||||
return o.AllWithCtx(context.Background(), container, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) AllWithCtx(ctx context.Context, container interface{}, cols ...string) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.ReadBatch(ctx, o.orm.db, o, o.mi, o.cond, container, o.orm.alias.TZ, cols)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query one row data and map to containers.
|
|
||||||
// cols means the columns when querying.
|
|
||||||
func (o *querySet) One(container interface{}, cols ...string) error {
|
|
||||||
return o.OneWithCtx(context.Background(), container, cols...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) OneWithCtx(ctx context.Context, container interface{}, cols ...string) error {
|
|
||||||
o.limit = 1
|
|
||||||
num, err := o.orm.alias.DbBaser.ReadBatch(ctx, o.orm.db, o, o.mi, o.cond, container, o.orm.alias.TZ, cols)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if num == 0 {
|
|
||||||
return ErrNoRows
|
|
||||||
}
|
|
||||||
|
|
||||||
if num > 1 {
|
|
||||||
return ErrMultiRows
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all data and map to []map[string]interface.
|
|
||||||
// expres means condition expression.
|
|
||||||
// it converts data to []map[column]value.
|
|
||||||
func (o *querySet) Values(results *[]Params, exprs ...string) (int64, error) {
|
|
||||||
return o.ValuesWithCtx(context.Background(), results, exprs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) ValuesWithCtx(ctx context.Context, results *[]Params, exprs ...string) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.ReadValues(ctx, o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all data and map to [][]interface
|
|
||||||
// it converts data to [][column_index]value
|
|
||||||
func (o *querySet) ValuesList(results *[]ParamsList, exprs ...string) (int64, error) {
|
|
||||||
return o.ValuesListWithCtx(context.Background(), results, exprs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) ValuesListWithCtx(ctx context.Context, results *[]ParamsList, exprs ...string) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.ReadValues(ctx, o.orm.db, o, o.mi, o.cond, exprs, results, o.orm.alias.TZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all data and map to []interface.
|
|
||||||
// it's designed for one row record set, auto change to []value, not [][column]value.
|
|
||||||
func (o *querySet) ValuesFlat(result *ParamsList, expr string) (int64, error) {
|
|
||||||
return o.ValuesFlatWithCtx(context.Background(), result, expr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *querySet) ValuesFlatWithCtx(ctx context.Context, result *ParamsList, expr string) (int64, error) {
|
|
||||||
return o.orm.alias.DbBaser.ReadValues(ctx, o.orm.db, o, o.mi, o.cond, []string{expr}, result, o.orm.alias.TZ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all rows into map[string]interface with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to map[string]interface{}{
|
|
||||||
// "total": 100,
|
|
||||||
// "found": 200,
|
|
||||||
// }
|
|
||||||
func (o *querySet) RowsToMap(result *Params, keyCol, valueCol string) (int64, error) {
|
|
||||||
panic(ErrNotImplement)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all rows into struct with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to struct {
|
|
||||||
// Total int
|
|
||||||
// Found int
|
|
||||||
// }
|
|
||||||
func (o *querySet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) {
|
|
||||||
panic(ErrNotImplement)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create new QuerySeter.
|
|
||||||
func newQuerySet(orm *ormBase, mi *modelInfo) QuerySeter {
|
|
||||||
o := new(querySet)
|
|
||||||
o.mi = mi
|
|
||||||
o.orm = orm
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// aggregate func
|
|
||||||
func (o querySet) Aggregate(s string) QuerySeter {
|
|
||||||
o.aggregate = s
|
|
||||||
return &o
|
|
||||||
}
|
|
@ -1,910 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// raw sql string prepared statement
|
|
||||||
type rawPrepare struct {
|
|
||||||
rs *rawSet
|
|
||||||
stmt stmtQuerier
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *rawPrepare) Exec(args ...interface{}) (sql.Result, error) {
|
|
||||||
if o.closed {
|
|
||||||
return nil, ErrStmtClosed
|
|
||||||
}
|
|
||||||
flatParams := getFlatParams(nil, args, o.rs.orm.alias.TZ)
|
|
||||||
return o.stmt.Exec(flatParams...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *rawPrepare) Close() error {
|
|
||||||
o.closed = true
|
|
||||||
return o.stmt.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRawPreparer(rs *rawSet) (RawPreparer, error) {
|
|
||||||
o := new(rawPrepare)
|
|
||||||
o.rs = rs
|
|
||||||
|
|
||||||
query := rs.query
|
|
||||||
rs.orm.alias.DbBaser.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
st, err := rs.orm.db.Prepare(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if Debug {
|
|
||||||
o.stmt = newStmtQueryLog(rs.orm.alias, st, query)
|
|
||||||
} else {
|
|
||||||
o.stmt = st
|
|
||||||
}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// raw query seter
|
|
||||||
type rawSet struct {
|
|
||||||
query string
|
|
||||||
args []interface{}
|
|
||||||
orm *ormBase
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ RawSeter = new(rawSet)
|
|
||||||
|
|
||||||
// set args for every query
|
|
||||||
func (o rawSet) SetArgs(args ...interface{}) RawSeter {
|
|
||||||
o.args = args
|
|
||||||
return &o
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute raw sql and return sql.Result
|
|
||||||
func (o *rawSet) Exec() (sql.Result, error) {
|
|
||||||
query := o.query
|
|
||||||
o.orm.alias.DbBaser.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
args := getFlatParams(nil, o.args, o.orm.alias.TZ)
|
|
||||||
return o.orm.db.Exec(query, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// set field value to row container
|
|
||||||
func (o *rawSet) setFieldValue(ind reflect.Value, value interface{}) {
|
|
||||||
switch ind.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
if value == nil {
|
|
||||||
ind.SetBool(false)
|
|
||||||
} else if v, ok := value.(bool); ok {
|
|
||||||
ind.SetBool(v)
|
|
||||||
} else {
|
|
||||||
v, _ := StrTo(ToStr(value)).Bool()
|
|
||||||
ind.SetBool(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.String:
|
|
||||||
if value == nil {
|
|
||||||
ind.SetString("")
|
|
||||||
} else {
|
|
||||||
ind.SetString(ToStr(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
if value == nil {
|
|
||||||
ind.SetInt(0)
|
|
||||||
} else {
|
|
||||||
val := reflect.ValueOf(value)
|
|
||||||
switch val.Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
ind.SetInt(val.Int())
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
ind.SetInt(int64(val.Uint()))
|
|
||||||
default:
|
|
||||||
v, _ := StrTo(ToStr(value)).Int64()
|
|
||||||
ind.SetInt(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
if value == nil {
|
|
||||||
ind.SetUint(0)
|
|
||||||
} else {
|
|
||||||
val := reflect.ValueOf(value)
|
|
||||||
switch val.Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
ind.SetUint(uint64(val.Int()))
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
ind.SetUint(val.Uint())
|
|
||||||
default:
|
|
||||||
v, _ := StrTo(ToStr(value)).Uint64()
|
|
||||||
ind.SetUint(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Float64, reflect.Float32:
|
|
||||||
if value == nil {
|
|
||||||
ind.SetFloat(0)
|
|
||||||
} else {
|
|
||||||
val := reflect.ValueOf(value)
|
|
||||||
switch val.Kind() {
|
|
||||||
case reflect.Float64:
|
|
||||||
ind.SetFloat(val.Float())
|
|
||||||
default:
|
|
||||||
v, _ := StrTo(ToStr(value)).Float64()
|
|
||||||
ind.SetFloat(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Struct:
|
|
||||||
if value == nil {
|
|
||||||
ind.Set(reflect.Zero(ind.Type()))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch ind.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
var str string
|
|
||||||
switch d := value.(type) {
|
|
||||||
case time.Time:
|
|
||||||
o.orm.alias.DbBaser.TimeFromDB(&d, o.orm.alias.TZ)
|
|
||||||
ind.Set(reflect.ValueOf(d))
|
|
||||||
case []byte:
|
|
||||||
str = string(d)
|
|
||||||
case string:
|
|
||||||
str = d
|
|
||||||
}
|
|
||||||
if str != "" {
|
|
||||||
if len(str) >= 19 {
|
|
||||||
str = str[:19]
|
|
||||||
t, err := time.ParseInLocation(formatDateTime, str, o.orm.alias.TZ)
|
|
||||||
if err == nil {
|
|
||||||
t = t.In(DefaultTimeLoc)
|
|
||||||
ind.Set(reflect.ValueOf(t))
|
|
||||||
}
|
|
||||||
} else if len(str) >= 10 {
|
|
||||||
str = str[:10]
|
|
||||||
t, err := time.ParseInLocation(formatDate, str, DefaultTimeLoc)
|
|
||||||
if err == nil {
|
|
||||||
ind.Set(reflect.ValueOf(t))
|
|
||||||
}
|
|
||||||
} else if len(str) >= 8 {
|
|
||||||
str = str[:8]
|
|
||||||
t, err := time.ParseInLocation(formatTime, str, DefaultTimeLoc)
|
|
||||||
if err == nil {
|
|
||||||
ind.Set(reflect.ValueOf(t))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case sql.NullString, sql.NullInt64, sql.NullFloat64, sql.NullBool:
|
|
||||||
indi := reflect.New(ind.Type()).Interface()
|
|
||||||
sc, ok := indi.(sql.Scanner)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err := sc.Scan(value)
|
|
||||||
if err == nil {
|
|
||||||
ind.Set(reflect.Indirect(reflect.ValueOf(sc)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
|
||||||
if value == nil {
|
|
||||||
ind.Set(reflect.Zero(ind.Type()))
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ind.Set(reflect.New(ind.Type().Elem()))
|
|
||||||
o.setFieldValue(reflect.Indirect(ind), value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set field value in loop for slice container
|
|
||||||
func (o *rawSet) loopSetRefs(refs []interface{}, sInds []reflect.Value, nIndsPtr *[]reflect.Value, eTyps []reflect.Type, init bool) {
|
|
||||||
nInds := *nIndsPtr
|
|
||||||
|
|
||||||
cur := 0
|
|
||||||
for i := 0; i < len(sInds); i++ {
|
|
||||||
sInd := sInds[i]
|
|
||||||
eTyp := eTyps[i]
|
|
||||||
|
|
||||||
typ := eTyp
|
|
||||||
isPtr := false
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
isPtr = true
|
|
||||||
typ = typ.Elem()
|
|
||||||
}
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
isPtr = true
|
|
||||||
typ = typ.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
var nInd reflect.Value
|
|
||||||
if init {
|
|
||||||
nInd = reflect.New(sInd.Type()).Elem()
|
|
||||||
} else {
|
|
||||||
nInd = nInds[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
val := reflect.New(typ)
|
|
||||||
ind := val.Elem()
|
|
||||||
|
|
||||||
tpName := ind.Type().String()
|
|
||||||
|
|
||||||
if ind.Kind() == reflect.Struct {
|
|
||||||
if tpName == "time.Time" {
|
|
||||||
value := reflect.ValueOf(refs[cur]).Elem().Interface()
|
|
||||||
if isPtr && value == nil {
|
|
||||||
val = reflect.New(val.Type()).Elem()
|
|
||||||
} else {
|
|
||||||
o.setFieldValue(ind, value)
|
|
||||||
}
|
|
||||||
cur++
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
value := reflect.ValueOf(refs[cur]).Elem().Interface()
|
|
||||||
if isPtr && value == nil {
|
|
||||||
val = reflect.New(val.Type()).Elem()
|
|
||||||
} else {
|
|
||||||
o.setFieldValue(ind, value)
|
|
||||||
}
|
|
||||||
cur++
|
|
||||||
}
|
|
||||||
|
|
||||||
if nInd.Kind() == reflect.Slice {
|
|
||||||
if isPtr {
|
|
||||||
nInd = reflect.Append(nInd, val)
|
|
||||||
} else {
|
|
||||||
nInd = reflect.Append(nInd, ind)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if isPtr {
|
|
||||||
nInd.Set(val)
|
|
||||||
} else {
|
|
||||||
nInd.Set(ind)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nInds[i] = nInd
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// query data and map to container
|
|
||||||
func (o *rawSet) QueryRow(containers ...interface{}) error {
|
|
||||||
var (
|
|
||||||
refs = make([]interface{}, 0, len(containers))
|
|
||||||
sInds []reflect.Value
|
|
||||||
eTyps []reflect.Type
|
|
||||||
sMi *modelInfo
|
|
||||||
)
|
|
||||||
structMode := false
|
|
||||||
for _, container := range containers {
|
|
||||||
val := reflect.ValueOf(container)
|
|
||||||
ind := reflect.Indirect(val)
|
|
||||||
|
|
||||||
if val.Kind() != reflect.Ptr {
|
|
||||||
panic(fmt.Errorf("<RawSeter.QueryRow> all args must be use ptr"))
|
|
||||||
}
|
|
||||||
|
|
||||||
etyp := ind.Type()
|
|
||||||
typ := etyp
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
typ = typ.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
sInds = append(sInds, ind)
|
|
||||||
eTyps = append(eTyps, etyp)
|
|
||||||
|
|
||||||
if typ.Kind() == reflect.Struct && typ.String() != "time.Time" {
|
|
||||||
if len(containers) > 1 {
|
|
||||||
panic(fmt.Errorf("<RawSeter.QueryRow> now support one struct only. see #384"))
|
|
||||||
}
|
|
||||||
|
|
||||||
structMode = true
|
|
||||||
fn := getFullName(typ)
|
|
||||||
if mi, ok := defaultModelCache.getByFullName(fn); ok {
|
|
||||||
sMi = mi
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var ref interface{}
|
|
||||||
refs = append(refs, &ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
query := o.query
|
|
||||||
o.orm.alias.DbBaser.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
args := getFlatParams(nil, o.args, o.orm.alias.TZ)
|
|
||||||
rows, err := o.orm.db.Query(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
return ErrNoRows
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
structTagMap := make(map[reflect.StructTag]map[string]string)
|
|
||||||
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
if rows.Next() {
|
|
||||||
if structMode {
|
|
||||||
columns, err := rows.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
columnsMp := make(map[string]interface{}, len(columns))
|
|
||||||
|
|
||||||
refs = make([]interface{}, 0, len(columns))
|
|
||||||
for _, col := range columns {
|
|
||||||
var ref interface{}
|
|
||||||
columnsMp[col] = &ref
|
|
||||||
refs = append(refs, &ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := rows.Scan(refs...); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ind := sInds[0]
|
|
||||||
|
|
||||||
if ind.Kind() == reflect.Ptr {
|
|
||||||
if ind.IsNil() || !ind.IsValid() {
|
|
||||||
ind.Set(reflect.New(eTyps[0].Elem()))
|
|
||||||
}
|
|
||||||
ind = ind.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
if sMi != nil {
|
|
||||||
for _, col := range columns {
|
|
||||||
if fi := sMi.fields.GetByColumn(col); fi != nil {
|
|
||||||
value := reflect.ValueOf(columnsMp[col]).Elem().Interface()
|
|
||||||
field := ind.FieldByIndex(fi.fieldIndex)
|
|
||||||
if fi.fieldType&IsRelField > 0 {
|
|
||||||
mf := reflect.New(fi.relModelInfo.addrField.Elem().Type())
|
|
||||||
field.Set(mf)
|
|
||||||
field = mf.Elem().FieldByIndex(fi.relModelInfo.fields.pk.fieldIndex)
|
|
||||||
}
|
|
||||||
if fi.isFielder {
|
|
||||||
fd := field.Addr().Interface().(Fielder)
|
|
||||||
err := fd.SetRaw(value)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("set raw error:%s", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
o.setFieldValue(field, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// define recursive function
|
|
||||||
var recursiveSetField func(rv reflect.Value)
|
|
||||||
recursiveSetField = func(rv reflect.Value) {
|
|
||||||
for i := 0; i < rv.NumField(); i++ {
|
|
||||||
f := rv.Field(i)
|
|
||||||
fe := rv.Type().Field(i)
|
|
||||||
|
|
||||||
// check if the field is a Struct
|
|
||||||
// recursive the Struct type
|
|
||||||
if fe.Type.Kind() == reflect.Struct {
|
|
||||||
recursiveSetField(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// thanks @Gazeboxu.
|
|
||||||
tags := structTagMap[fe.Tag]
|
|
||||||
if tags == nil {
|
|
||||||
_, tags = parseStructTag(fe.Tag.Get(defaultStructTagName))
|
|
||||||
structTagMap[fe.Tag] = tags
|
|
||||||
}
|
|
||||||
var col string
|
|
||||||
if col = tags["column"]; col == "" {
|
|
||||||
col = nameStrategyMap[nameStrategy](fe.Name)
|
|
||||||
}
|
|
||||||
if v, ok := columnsMp[col]; ok {
|
|
||||||
value := reflect.ValueOf(v).Elem().Interface()
|
|
||||||
o.setFieldValue(f, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// init call the recursive function
|
|
||||||
recursiveSetField(ind)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
if err := rows.Scan(refs...); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
nInds := make([]reflect.Value, len(sInds))
|
|
||||||
o.loopSetRefs(refs, sInds, &nInds, eTyps, true)
|
|
||||||
for i, sInd := range sInds {
|
|
||||||
nInd := nInds[i]
|
|
||||||
sInd.Set(nInd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return ErrNoRows
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// query data rows and map to container
|
|
||||||
func (o *rawSet) QueryRows(containers ...interface{}) (int64, error) {
|
|
||||||
var (
|
|
||||||
refs = make([]interface{}, 0, len(containers))
|
|
||||||
sInds []reflect.Value
|
|
||||||
eTyps []reflect.Type
|
|
||||||
sMi *modelInfo
|
|
||||||
)
|
|
||||||
structMode := false
|
|
||||||
for _, container := range containers {
|
|
||||||
val := reflect.ValueOf(container)
|
|
||||||
sInd := reflect.Indirect(val)
|
|
||||||
if val.Kind() != reflect.Ptr || sInd.Kind() != reflect.Slice {
|
|
||||||
panic(fmt.Errorf("<RawSeter.QueryRows> all args must be use ptr slice"))
|
|
||||||
}
|
|
||||||
|
|
||||||
etyp := sInd.Type().Elem()
|
|
||||||
typ := etyp
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
typ = typ.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
sInds = append(sInds, sInd)
|
|
||||||
eTyps = append(eTyps, etyp)
|
|
||||||
|
|
||||||
if typ.Kind() == reflect.Struct && typ.String() != "time.Time" {
|
|
||||||
if len(containers) > 1 {
|
|
||||||
panic(fmt.Errorf("<RawSeter.QueryRow> now support one struct only. see #384"))
|
|
||||||
}
|
|
||||||
|
|
||||||
structMode = true
|
|
||||||
fn := getFullName(typ)
|
|
||||||
if mi, ok := defaultModelCache.getByFullName(fn); ok {
|
|
||||||
sMi = mi
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var ref interface{}
|
|
||||||
refs = append(refs, &ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
query := o.query
|
|
||||||
o.orm.alias.DbBaser.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
args := getFlatParams(nil, o.args, o.orm.alias.TZ)
|
|
||||||
rows, err := o.orm.db.Query(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer rows.Close()
|
|
||||||
|
|
||||||
var cnt int64
|
|
||||||
nInds := make([]reflect.Value, len(sInds))
|
|
||||||
sInd := sInds[0]
|
|
||||||
|
|
||||||
for rows.Next() {
|
|
||||||
|
|
||||||
if structMode {
|
|
||||||
columns, err := rows.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
columnsMp := make(map[string]interface{}, len(columns))
|
|
||||||
|
|
||||||
refs = make([]interface{}, 0, len(columns))
|
|
||||||
for _, col := range columns {
|
|
||||||
var ref interface{}
|
|
||||||
columnsMp[col] = &ref
|
|
||||||
refs = append(refs, &ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := rows.Scan(refs...); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cnt == 0 && !sInd.IsNil() {
|
|
||||||
sInd.Set(reflect.New(sInd.Type()).Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
var ind reflect.Value
|
|
||||||
if eTyps[0].Kind() == reflect.Ptr {
|
|
||||||
ind = reflect.New(eTyps[0].Elem())
|
|
||||||
} else {
|
|
||||||
ind = reflect.New(eTyps[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
if ind.Kind() == reflect.Ptr {
|
|
||||||
ind = ind.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
if sMi != nil {
|
|
||||||
for _, col := range columns {
|
|
||||||
if fi := sMi.fields.GetByColumn(col); fi != nil {
|
|
||||||
value := reflect.ValueOf(columnsMp[col]).Elem().Interface()
|
|
||||||
field := ind.FieldByIndex(fi.fieldIndex)
|
|
||||||
if fi.fieldType&IsRelField > 0 {
|
|
||||||
mf := reflect.New(fi.relModelInfo.addrField.Elem().Type())
|
|
||||||
field.Set(mf)
|
|
||||||
field = mf.Elem().FieldByIndex(fi.relModelInfo.fields.pk.fieldIndex)
|
|
||||||
}
|
|
||||||
if fi.isFielder {
|
|
||||||
fd := field.Addr().Interface().(Fielder)
|
|
||||||
err := fd.SetRaw(value)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Errorf("set raw error:%s", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
o.setFieldValue(field, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// define recursive function
|
|
||||||
var recursiveSetField func(rv reflect.Value)
|
|
||||||
recursiveSetField = func(rv reflect.Value) {
|
|
||||||
for i := 0; i < rv.NumField(); i++ {
|
|
||||||
f := rv.Field(i)
|
|
||||||
fe := rv.Type().Field(i)
|
|
||||||
|
|
||||||
// check if the field is a Struct
|
|
||||||
// recursive the Struct type
|
|
||||||
if fe.Type.Kind() == reflect.Struct {
|
|
||||||
recursiveSetField(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, tags := parseStructTag(fe.Tag.Get(defaultStructTagName))
|
|
||||||
var col string
|
|
||||||
if col = tags["column"]; col == "" {
|
|
||||||
col = nameStrategyMap[nameStrategy](fe.Name)
|
|
||||||
}
|
|
||||||
if v, ok := columnsMp[col]; ok {
|
|
||||||
value := reflect.ValueOf(v).Elem().Interface()
|
|
||||||
o.setFieldValue(f, value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// init call the recursive function
|
|
||||||
recursiveSetField(ind)
|
|
||||||
}
|
|
||||||
|
|
||||||
if eTyps[0].Kind() == reflect.Ptr {
|
|
||||||
ind = ind.Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
sInd = reflect.Append(sInd, ind)
|
|
||||||
|
|
||||||
} else {
|
|
||||||
if err := rows.Scan(refs...); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
o.loopSetRefs(refs, sInds, &nInds, eTyps, cnt == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
|
|
||||||
if cnt > 0 {
|
|
||||||
if structMode {
|
|
||||||
sInds[0].Set(sInd)
|
|
||||||
} else {
|
|
||||||
for i, sInd := range sInds {
|
|
||||||
nInd := nInds[i]
|
|
||||||
sInd.Set(nInd)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cnt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *rawSet) readValues(container interface{}, needCols []string) (int64, error) {
|
|
||||||
var (
|
|
||||||
maps []Params
|
|
||||||
lists []ParamsList
|
|
||||||
list ParamsList
|
|
||||||
)
|
|
||||||
|
|
||||||
typ := 0
|
|
||||||
switch container.(type) {
|
|
||||||
case *[]Params:
|
|
||||||
typ = 1
|
|
||||||
case *[]ParamsList:
|
|
||||||
typ = 2
|
|
||||||
case *ParamsList:
|
|
||||||
typ = 3
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("<RawSeter> unsupport read values type `%T`", container))
|
|
||||||
}
|
|
||||||
|
|
||||||
query := o.query
|
|
||||||
o.orm.alias.DbBaser.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
args := getFlatParams(nil, o.args, o.orm.alias.TZ)
|
|
||||||
|
|
||||||
var rs *sql.Rows
|
|
||||||
rs, err := o.orm.db.Query(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer rs.Close()
|
|
||||||
|
|
||||||
var (
|
|
||||||
refs []interface{}
|
|
||||||
cnt int64
|
|
||||||
cols []string
|
|
||||||
indexs []int
|
|
||||||
)
|
|
||||||
|
|
||||||
for rs.Next() {
|
|
||||||
if cnt == 0 {
|
|
||||||
columns, err := rs.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if len(needCols) > 0 {
|
|
||||||
indexs = make([]int, 0, len(needCols))
|
|
||||||
} else {
|
|
||||||
indexs = make([]int, 0, len(columns))
|
|
||||||
}
|
|
||||||
|
|
||||||
cols = columns
|
|
||||||
refs = make([]interface{}, len(cols))
|
|
||||||
for i := range refs {
|
|
||||||
var ref sql.NullString
|
|
||||||
refs[i] = &ref
|
|
||||||
|
|
||||||
if len(needCols) > 0 {
|
|
||||||
for _, c := range needCols {
|
|
||||||
if c == cols[i] {
|
|
||||||
indexs = append(indexs, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
indexs = append(indexs, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := rs.Scan(refs...); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch typ {
|
|
||||||
case 1:
|
|
||||||
params := make(Params, len(cols))
|
|
||||||
for _, i := range indexs {
|
|
||||||
ref := refs[i]
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString)
|
|
||||||
if value.Valid {
|
|
||||||
params[cols[i]] = value.String
|
|
||||||
} else {
|
|
||||||
params[cols[i]] = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
maps = append(maps, params)
|
|
||||||
case 2:
|
|
||||||
params := make(ParamsList, 0, len(cols))
|
|
||||||
for _, i := range indexs {
|
|
||||||
ref := refs[i]
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString)
|
|
||||||
if value.Valid {
|
|
||||||
params = append(params, value.String)
|
|
||||||
} else {
|
|
||||||
params = append(params, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lists = append(lists, params)
|
|
||||||
case 3:
|
|
||||||
for _, i := range indexs {
|
|
||||||
ref := refs[i]
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(ref)).Interface().(sql.NullString)
|
|
||||||
if value.Valid {
|
|
||||||
list = append(list, value.String)
|
|
||||||
} else {
|
|
||||||
list = append(list, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
|
|
||||||
switch v := container.(type) {
|
|
||||||
case *[]Params:
|
|
||||||
*v = maps
|
|
||||||
case *[]ParamsList:
|
|
||||||
*v = lists
|
|
||||||
case *ParamsList:
|
|
||||||
*v = list
|
|
||||||
}
|
|
||||||
|
|
||||||
return cnt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *rawSet) queryRowsTo(container interface{}, keyCol, valueCol string) (int64, error) {
|
|
||||||
var (
|
|
||||||
maps Params
|
|
||||||
ind *reflect.Value
|
|
||||||
)
|
|
||||||
|
|
||||||
var typ int
|
|
||||||
switch container.(type) {
|
|
||||||
case *Params:
|
|
||||||
typ = 1
|
|
||||||
default:
|
|
||||||
typ = 2
|
|
||||||
vl := reflect.ValueOf(container)
|
|
||||||
id := reflect.Indirect(vl)
|
|
||||||
if vl.Kind() != reflect.Ptr || id.Kind() != reflect.Struct {
|
|
||||||
panic(fmt.Errorf("<RawSeter> RowsTo unsupport type `%T` need ptr struct", container))
|
|
||||||
}
|
|
||||||
|
|
||||||
ind = &id
|
|
||||||
}
|
|
||||||
|
|
||||||
query := o.query
|
|
||||||
o.orm.alias.DbBaser.ReplaceMarks(&query)
|
|
||||||
|
|
||||||
args := getFlatParams(nil, o.args, o.orm.alias.TZ)
|
|
||||||
|
|
||||||
rs, err := o.orm.db.Query(query, args...)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer rs.Close()
|
|
||||||
|
|
||||||
var (
|
|
||||||
refs []interface{}
|
|
||||||
cnt int64
|
|
||||||
cols []string
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
keyIndex = -1
|
|
||||||
valueIndex = -1
|
|
||||||
)
|
|
||||||
|
|
||||||
for rs.Next() {
|
|
||||||
if cnt == 0 {
|
|
||||||
columns, err := rs.Columns()
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
cols = columns
|
|
||||||
refs = make([]interface{}, len(cols))
|
|
||||||
for i := range refs {
|
|
||||||
if keyCol == cols[i] {
|
|
||||||
keyIndex = i
|
|
||||||
}
|
|
||||||
if typ == 1 || keyIndex == i {
|
|
||||||
var ref sql.NullString
|
|
||||||
refs[i] = &ref
|
|
||||||
} else {
|
|
||||||
var ref interface{}
|
|
||||||
refs[i] = &ref
|
|
||||||
}
|
|
||||||
if valueCol == cols[i] {
|
|
||||||
valueIndex = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if keyIndex == -1 || valueIndex == -1 {
|
|
||||||
panic(fmt.Errorf("<RawSeter> RowsTo unknown key, value column name `%s: %s`", keyCol, valueCol))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := rs.Scan(refs...); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if cnt == 0 {
|
|
||||||
switch typ {
|
|
||||||
case 1:
|
|
||||||
maps = make(Params)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
key := reflect.Indirect(reflect.ValueOf(refs[keyIndex])).Interface().(sql.NullString).String
|
|
||||||
|
|
||||||
switch typ {
|
|
||||||
case 1:
|
|
||||||
value := reflect.Indirect(reflect.ValueOf(refs[valueIndex])).Interface().(sql.NullString)
|
|
||||||
if value.Valid {
|
|
||||||
maps[key] = value.String
|
|
||||||
} else {
|
|
||||||
maps[key] = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
if id := ind.FieldByName(camelString(key)); id.IsValid() {
|
|
||||||
o.setFieldValue(id, reflect.ValueOf(refs[valueIndex]).Elem().Interface())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
|
|
||||||
if typ == 1 {
|
|
||||||
v, _ := container.(*Params)
|
|
||||||
*v = maps
|
|
||||||
}
|
|
||||||
|
|
||||||
return cnt, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// query data to []map[string]interface
|
|
||||||
func (o *rawSet) Values(container *[]Params, cols ...string) (int64, error) {
|
|
||||||
return o.readValues(container, cols)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query data to [][]interface
|
|
||||||
func (o *rawSet) ValuesList(container *[]ParamsList, cols ...string) (int64, error) {
|
|
||||||
return o.readValues(container, cols)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query data to []interface
|
|
||||||
func (o *rawSet) ValuesFlat(container *ParamsList, cols ...string) (int64, error) {
|
|
||||||
return o.readValues(container, cols)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all rows into map[string]interface with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to map[string]interface{}{
|
|
||||||
// "total": 100,
|
|
||||||
// "found": 200,
|
|
||||||
// }
|
|
||||||
func (o *rawSet) RowsToMap(result *Params, keyCol, valueCol string) (int64, error) {
|
|
||||||
return o.queryRowsTo(result, keyCol, valueCol)
|
|
||||||
}
|
|
||||||
|
|
||||||
// query all rows into struct with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to struct {
|
|
||||||
// Total int
|
|
||||||
// Found int
|
|
||||||
// }
|
|
||||||
func (o *rawSet) RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error) {
|
|
||||||
return o.queryRowsTo(ptrStruct, keyCol, valueCol)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return prepared raw statement for used in times.
|
|
||||||
func (o *rawSet) Prepare() (RawPreparer, error) {
|
|
||||||
return newRawPreparer(o)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRawSet(orm *ormBase, query string, args []interface{}) RawSeter {
|
|
||||||
o := new(rawSet)
|
|
||||||
o.query = query
|
|
||||||
o.args = args
|
|
||||||
o.orm = orm
|
|
||||||
return o
|
|
||||||
}
|
|
@ -1,62 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
// QueryBuilder is the Query builder interface
|
|
||||||
type QueryBuilder interface {
|
|
||||||
Select(fields ...string) QueryBuilder
|
|
||||||
ForUpdate() QueryBuilder
|
|
||||||
From(tables ...string) QueryBuilder
|
|
||||||
InnerJoin(table string) QueryBuilder
|
|
||||||
LeftJoin(table string) QueryBuilder
|
|
||||||
RightJoin(table string) QueryBuilder
|
|
||||||
On(cond string) QueryBuilder
|
|
||||||
Where(cond string) QueryBuilder
|
|
||||||
And(cond string) QueryBuilder
|
|
||||||
Or(cond string) QueryBuilder
|
|
||||||
In(vals ...string) QueryBuilder
|
|
||||||
OrderBy(fields ...string) QueryBuilder
|
|
||||||
Asc() QueryBuilder
|
|
||||||
Desc() QueryBuilder
|
|
||||||
Limit(limit int) QueryBuilder
|
|
||||||
Offset(offset int) QueryBuilder
|
|
||||||
GroupBy(fields ...string) QueryBuilder
|
|
||||||
Having(cond string) QueryBuilder
|
|
||||||
Update(tables ...string) QueryBuilder
|
|
||||||
Set(kv ...string) QueryBuilder
|
|
||||||
Delete(tables ...string) QueryBuilder
|
|
||||||
InsertInto(table string, fields ...string) QueryBuilder
|
|
||||||
Values(vals ...string) QueryBuilder
|
|
||||||
Subquery(sub string, alias string) string
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewQueryBuilder return the QueryBuilder
|
|
||||||
func NewQueryBuilder(driver string) (qb QueryBuilder, err error) {
|
|
||||||
if driver == "mysql" {
|
|
||||||
qb = new(MySQLQueryBuilder)
|
|
||||||
} else if driver == "tidb" {
|
|
||||||
qb = new(TiDBQueryBuilder)
|
|
||||||
} else if driver == "postgres" {
|
|
||||||
qb = new(PostgresQueryBuilder)
|
|
||||||
} else if driver == "sqlite" {
|
|
||||||
err = errors.New("sqlite query builder is not supported yet")
|
|
||||||
} else {
|
|
||||||
err = errors.New("unknown driver for query builder")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
@ -1,187 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// CommaSpace is the separation
|
|
||||||
const CommaSpace = ", "
|
|
||||||
|
|
||||||
// MySQLQueryBuilder is the SQL build
|
|
||||||
type MySQLQueryBuilder struct {
|
|
||||||
tokens []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select will join the fields
|
|
||||||
func (qb *MySQLQueryBuilder) Select(fields ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "SELECT", strings.Join(fields, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForUpdate add the FOR UPDATE clause
|
|
||||||
func (qb *MySQLQueryBuilder) ForUpdate() QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "FOR UPDATE")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// From join the tables
|
|
||||||
func (qb *MySQLQueryBuilder) From(tables ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "FROM", strings.Join(tables, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// InnerJoin INNER JOIN the table
|
|
||||||
func (qb *MySQLQueryBuilder) InnerJoin(table string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "INNER JOIN", table)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// LeftJoin LEFT JOIN the table
|
|
||||||
func (qb *MySQLQueryBuilder) LeftJoin(table string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "LEFT JOIN", table)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// RightJoin RIGHT JOIN the table
|
|
||||||
func (qb *MySQLQueryBuilder) RightJoin(table string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "RIGHT JOIN", table)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// On join with on cond
|
|
||||||
func (qb *MySQLQueryBuilder) On(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "ON", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where join the Where cond
|
|
||||||
func (qb *MySQLQueryBuilder) Where(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "WHERE", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// And join the and cond
|
|
||||||
func (qb *MySQLQueryBuilder) And(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "AND", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or join the or cond
|
|
||||||
func (qb *MySQLQueryBuilder) Or(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "OR", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// In join the IN (vals)
|
|
||||||
func (qb *MySQLQueryBuilder) In(vals ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "IN", "(", strings.Join(vals, CommaSpace), ")")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderBy join the Order by fields
|
|
||||||
func (qb *MySQLQueryBuilder) OrderBy(fields ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "ORDER BY", strings.Join(fields, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asc join the asc
|
|
||||||
func (qb *MySQLQueryBuilder) Asc() QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "ASC")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Desc join the desc
|
|
||||||
func (qb *MySQLQueryBuilder) Desc() QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "DESC")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit join the limit num
|
|
||||||
func (qb *MySQLQueryBuilder) Limit(limit int) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "LIMIT", strconv.Itoa(limit))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offset join the offset num
|
|
||||||
func (qb *MySQLQueryBuilder) Offset(offset int) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "OFFSET", strconv.Itoa(offset))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupBy join the Group by fields
|
|
||||||
func (qb *MySQLQueryBuilder) GroupBy(fields ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "GROUP BY", strings.Join(fields, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Having join the Having cond
|
|
||||||
func (qb *MySQLQueryBuilder) Having(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "HAVING", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update join the update table
|
|
||||||
func (qb *MySQLQueryBuilder) Update(tables ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "UPDATE", strings.Join(tables, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set join the set kv
|
|
||||||
func (qb *MySQLQueryBuilder) Set(kv ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "SET", strings.Join(kv, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete join the Delete tables
|
|
||||||
func (qb *MySQLQueryBuilder) Delete(tables ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "DELETE")
|
|
||||||
if len(tables) != 0 {
|
|
||||||
qb.tokens = append(qb.tokens, strings.Join(tables, CommaSpace))
|
|
||||||
}
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertInto join the insert SQL
|
|
||||||
func (qb *MySQLQueryBuilder) InsertInto(table string, fields ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "INSERT INTO", table)
|
|
||||||
if len(fields) != 0 {
|
|
||||||
fieldsStr := strings.Join(fields, CommaSpace)
|
|
||||||
qb.tokens = append(qb.tokens, "(", fieldsStr, ")")
|
|
||||||
}
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values join the Values(vals)
|
|
||||||
func (qb *MySQLQueryBuilder) Values(vals ...string) QueryBuilder {
|
|
||||||
valsStr := strings.Join(vals, CommaSpace)
|
|
||||||
qb.tokens = append(qb.tokens, "VALUES", "(", valsStr, ")")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subquery join the sub as alias
|
|
||||||
func (qb *MySQLQueryBuilder) Subquery(sub string, alias string) string {
|
|
||||||
return fmt.Sprintf("(%s) AS %s", sub, alias)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String join all tokens
|
|
||||||
func (qb *MySQLQueryBuilder) String() string {
|
|
||||||
s := strings.Join(qb.tokens, " ")
|
|
||||||
qb.tokens = qb.tokens[:0]
|
|
||||||
return s
|
|
||||||
}
|
|
@ -1,219 +0,0 @@
|
|||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var quote string = `"`
|
|
||||||
|
|
||||||
// PostgresQueryBuilder is the SQL build
|
|
||||||
type PostgresQueryBuilder struct {
|
|
||||||
tokens []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func processingStr(str []string) string {
|
|
||||||
s := strings.Join(str, `","`)
|
|
||||||
s = fmt.Sprintf("%s%s%s", quote, s, quote)
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select will join the fields
|
|
||||||
func (qb *PostgresQueryBuilder) Select(fields ...string) QueryBuilder {
|
|
||||||
var str string
|
|
||||||
n := len(fields)
|
|
||||||
|
|
||||||
if fields[0] == "*" {
|
|
||||||
str = "*"
|
|
||||||
} else {
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
sli := strings.Split(fields[i], ".")
|
|
||||||
s := strings.Join(sli, `"."`)
|
|
||||||
s = fmt.Sprintf("%s%s%s", quote, s, quote)
|
|
||||||
if n == 1 || i == n-1 {
|
|
||||||
str += s
|
|
||||||
} else {
|
|
||||||
str += s + ","
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
qb.tokens = append(qb.tokens, "SELECT", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForUpdate add the FOR UPDATE clause
|
|
||||||
func (qb *PostgresQueryBuilder) ForUpdate() QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "FOR UPDATE")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// From join the tables
|
|
||||||
func (qb *PostgresQueryBuilder) From(tables ...string) QueryBuilder {
|
|
||||||
str := processingStr(tables)
|
|
||||||
qb.tokens = append(qb.tokens, "FROM", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// InnerJoin INNER JOIN the table
|
|
||||||
func (qb *PostgresQueryBuilder) InnerJoin(table string) QueryBuilder {
|
|
||||||
str := fmt.Sprintf("%s%s%s", quote, table, quote)
|
|
||||||
qb.tokens = append(qb.tokens, "INNER JOIN", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// LeftJoin LEFT JOIN the table
|
|
||||||
func (qb *PostgresQueryBuilder) LeftJoin(table string) QueryBuilder {
|
|
||||||
str := fmt.Sprintf("%s%s%s", quote, table, quote)
|
|
||||||
qb.tokens = append(qb.tokens, "LEFT JOIN", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// RightJoin RIGHT JOIN the table
|
|
||||||
func (qb *PostgresQueryBuilder) RightJoin(table string) QueryBuilder {
|
|
||||||
str := fmt.Sprintf("%s%s%s", quote, table, quote)
|
|
||||||
qb.tokens = append(qb.tokens, "RIGHT JOIN", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// On join with on cond
|
|
||||||
func (qb *PostgresQueryBuilder) On(cond string) QueryBuilder {
|
|
||||||
var str string
|
|
||||||
cond = strings.Replace(cond, " ", "", -1)
|
|
||||||
slice := strings.Split(cond, "=")
|
|
||||||
for i := 0; i < len(slice); i++ {
|
|
||||||
sli := strings.Split(slice[i], ".")
|
|
||||||
s := strings.Join(sli, `"."`)
|
|
||||||
s = fmt.Sprintf("%s%s%s", quote, s, quote)
|
|
||||||
if i == 0 {
|
|
||||||
str = s + " =" + " "
|
|
||||||
} else {
|
|
||||||
str += s
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
qb.tokens = append(qb.tokens, "ON", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Where join the Where cond
|
|
||||||
func (qb *PostgresQueryBuilder) Where(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "WHERE", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// And join the and cond
|
|
||||||
func (qb *PostgresQueryBuilder) And(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "AND", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Or join the or cond
|
|
||||||
func (qb *PostgresQueryBuilder) Or(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "OR", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// In join the IN (vals)
|
|
||||||
func (qb *PostgresQueryBuilder) In(vals ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "IN", "(", strings.Join(vals, CommaSpace), ")")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// OrderBy join the Order by fields
|
|
||||||
func (qb *PostgresQueryBuilder) OrderBy(fields ...string) QueryBuilder {
|
|
||||||
str := processingStr(fields)
|
|
||||||
qb.tokens = append(qb.tokens, "ORDER BY", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Asc join the asc
|
|
||||||
func (qb *PostgresQueryBuilder) Asc() QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "ASC")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Desc join the desc
|
|
||||||
func (qb *PostgresQueryBuilder) Desc() QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "DESC")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Limit join the limit num
|
|
||||||
func (qb *PostgresQueryBuilder) Limit(limit int) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "LIMIT", strconv.Itoa(limit))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Offset join the offset num
|
|
||||||
func (qb *PostgresQueryBuilder) Offset(offset int) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "OFFSET", strconv.Itoa(offset))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// GroupBy join the Group by fields
|
|
||||||
func (qb *PostgresQueryBuilder) GroupBy(fields ...string) QueryBuilder {
|
|
||||||
str := processingStr(fields)
|
|
||||||
qb.tokens = append(qb.tokens, "GROUP BY", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Having join the Having cond
|
|
||||||
func (qb *PostgresQueryBuilder) Having(cond string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "HAVING", cond)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update join the update table
|
|
||||||
func (qb *PostgresQueryBuilder) Update(tables ...string) QueryBuilder {
|
|
||||||
str := processingStr(tables)
|
|
||||||
qb.tokens = append(qb.tokens, "UPDATE", str)
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set join the set kv
|
|
||||||
func (qb *PostgresQueryBuilder) Set(kv ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "SET", strings.Join(kv, CommaSpace))
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete join the Delete tables
|
|
||||||
func (qb *PostgresQueryBuilder) Delete(tables ...string) QueryBuilder {
|
|
||||||
qb.tokens = append(qb.tokens, "DELETE")
|
|
||||||
if len(tables) != 0 {
|
|
||||||
str := processingStr(tables)
|
|
||||||
qb.tokens = append(qb.tokens, str)
|
|
||||||
}
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// InsertInto join the insert SQL
|
|
||||||
func (qb *PostgresQueryBuilder) InsertInto(table string, fields ...string) QueryBuilder {
|
|
||||||
str := fmt.Sprintf("%s%s%s", quote, table, quote)
|
|
||||||
qb.tokens = append(qb.tokens, "INSERT INTO", str)
|
|
||||||
if len(fields) != 0 {
|
|
||||||
fieldsStr := strings.Join(fields, CommaSpace)
|
|
||||||
qb.tokens = append(qb.tokens, "(", fieldsStr, ")")
|
|
||||||
}
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values join the Values(vals)
|
|
||||||
func (qb *PostgresQueryBuilder) Values(vals ...string) QueryBuilder {
|
|
||||||
valsStr := strings.Join(vals, CommaSpace)
|
|
||||||
qb.tokens = append(qb.tokens, "VALUES", "(", valsStr, ")")
|
|
||||||
return qb
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subquery join the sub as alias
|
|
||||||
func (qb *PostgresQueryBuilder) Subquery(sub string, alias string) string {
|
|
||||||
return fmt.Sprintf("(%s) AS %s", sub, alias)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String join all tokens
|
|
||||||
func (qb *PostgresQueryBuilder) String() string {
|
|
||||||
s := strings.Join(qb.tokens, " ")
|
|
||||||
qb.tokens = qb.tokens[:0]
|
|
||||||
return s
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
// Copyright 2015 TiDB Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
// TiDBQueryBuilder is the SQL build
|
|
||||||
type TiDBQueryBuilder struct {
|
|
||||||
MySQLQueryBuilder
|
|
||||||
tokens []string
|
|
||||||
}
|
|
@ -1,658 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/beego/beego/v2/client/orm/clauses/order_clause"
|
|
||||||
"github.com/beego/beego/v2/core/utils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TableNaming is usually used by model
|
|
||||||
// when you custom your table name, please implement this interfaces
|
|
||||||
// for example:
|
|
||||||
// type User struct {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
// func (u *User) TableName() string {
|
|
||||||
// return "USER_TABLE"
|
|
||||||
// }
|
|
||||||
type TableNameI interface {
|
|
||||||
TableName() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableEngineI is usually used by model
|
|
||||||
// when you want to use specific engine, like myisam, you can implement this interface
|
|
||||||
// for example:
|
|
||||||
// type User struct {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
// func (u *User) TableEngine() string {
|
|
||||||
// return "myisam"
|
|
||||||
// }
|
|
||||||
type TableEngineI interface {
|
|
||||||
TableEngine() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableIndexI is usually used by model
|
|
||||||
// when you want to create indexes, you can implement this interface
|
|
||||||
// for example:
|
|
||||||
// type User struct {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
// func (u *User) TableIndex() [][]string {
|
|
||||||
// return [][]string{{"Name"}}
|
|
||||||
// }
|
|
||||||
type TableIndexI interface {
|
|
||||||
TableIndex() [][]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// TableUniqueI is usually used by model
|
|
||||||
// when you want to create unique indexes, you can implement this interface
|
|
||||||
// for example:
|
|
||||||
// type User struct {
|
|
||||||
// ...
|
|
||||||
// }
|
|
||||||
// func (u *User) TableUnique() [][]string {
|
|
||||||
// return [][]string{{"Email"}}
|
|
||||||
// }
|
|
||||||
type TableUniqueI interface {
|
|
||||||
TableUnique() [][]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsApplicableTableForDB if return false, we won't create table to this db
|
|
||||||
type IsApplicableTableForDB interface {
|
|
||||||
IsApplicableTableForDB(db string) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Driver define database driver
|
|
||||||
type Driver interface {
|
|
||||||
Name() string
|
|
||||||
Type() DriverType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fielder define field info
|
|
||||||
type Fielder interface {
|
|
||||||
String() string
|
|
||||||
FieldType() int
|
|
||||||
SetRaw(interface{}) error
|
|
||||||
RawValue() interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type TxBeginner interface {
|
|
||||||
// self control transaction
|
|
||||||
Begin() (TxOrmer, error)
|
|
||||||
BeginWithCtx(ctx context.Context) (TxOrmer, error)
|
|
||||||
BeginWithOpts(opts *sql.TxOptions) (TxOrmer, error)
|
|
||||||
BeginWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions) (TxOrmer, error)
|
|
||||||
|
|
||||||
// closure control transaction
|
|
||||||
DoTx(task func(ctx context.Context, txOrm TxOrmer) error) error
|
|
||||||
DoTxWithCtx(ctx context.Context, task func(ctx context.Context, txOrm TxOrmer) error) error
|
|
||||||
DoTxWithOpts(opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error
|
|
||||||
DoTxWithCtxAndOpts(ctx context.Context, opts *sql.TxOptions, task func(ctx context.Context, txOrm TxOrmer) error) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type TxCommitter interface {
|
|
||||||
txEnder
|
|
||||||
}
|
|
||||||
|
|
||||||
// transaction beginner
|
|
||||||
type txer interface {
|
|
||||||
Begin() (*sql.Tx, error)
|
|
||||||
BeginTx(ctx context.Context, opts *sql.TxOptions) (*sql.Tx, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// transaction ending
|
|
||||||
type txEnder interface {
|
|
||||||
Commit() error
|
|
||||||
Rollback() error
|
|
||||||
|
|
||||||
// RollbackUnlessCommit if the transaction has been committed, do nothing, or transaction will be rollback
|
|
||||||
// For example:
|
|
||||||
// ```go
|
|
||||||
// txOrm := orm.Begin()
|
|
||||||
// defer txOrm.RollbackUnlessCommit()
|
|
||||||
// err := txOrm.Insert() // do something
|
|
||||||
// if err != nil {
|
|
||||||
// return err
|
|
||||||
// }
|
|
||||||
// txOrm.Commit()
|
|
||||||
// ```
|
|
||||||
RollbackUnlessCommit() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data Manipulation Language
|
|
||||||
type DML interface {
|
|
||||||
// insert model data to database
|
|
||||||
// for example:
|
|
||||||
// user := new(User)
|
|
||||||
// id, err = Ormer.Insert(user)
|
|
||||||
// user must be a pointer and Insert will set user's pk field
|
|
||||||
Insert(md interface{}) (int64, error)
|
|
||||||
InsertWithCtx(ctx context.Context, md interface{}) (int64, error)
|
|
||||||
// mysql:InsertOrUpdate(model) or InsertOrUpdate(model,"colu=colu+value")
|
|
||||||
// if colu type is integer : can use(+-*/), string : convert(colu,"value")
|
|
||||||
// postgres: InsertOrUpdate(model,"conflictColumnName") or InsertOrUpdate(model,"conflictColumnName","colu=colu+value")
|
|
||||||
// if colu type is integer : can use(+-*/), string : colu || "value"
|
|
||||||
InsertOrUpdate(md interface{}, colConflitAndArgs ...string) (int64, error)
|
|
||||||
InsertOrUpdateWithCtx(ctx context.Context, md interface{}, colConflitAndArgs ...string) (int64, error)
|
|
||||||
// insert some models to database
|
|
||||||
InsertMulti(bulk int, mds interface{}) (int64, error)
|
|
||||||
InsertMultiWithCtx(ctx context.Context, bulk int, mds interface{}) (int64, error)
|
|
||||||
// update model to database.
|
|
||||||
// cols set the columns those want to update.
|
|
||||||
// find model by Id(pk) field and update columns specified by fields, if cols is null then update all columns
|
|
||||||
// for example:
|
|
||||||
// user := User{Id: 2}
|
|
||||||
// user.Langs = append(user.Langs, "zh-CN", "en-US")
|
|
||||||
// user.Extra.Name = "beego"
|
|
||||||
// user.Extra.Data = "orm"
|
|
||||||
// num, err = Ormer.Update(&user, "Langs", "Extra")
|
|
||||||
Update(md interface{}, cols ...string) (int64, error)
|
|
||||||
UpdateWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error)
|
|
||||||
// delete model in database
|
|
||||||
Delete(md interface{}, cols ...string) (int64, error)
|
|
||||||
DeleteWithCtx(ctx context.Context, md interface{}, cols ...string) (int64, error)
|
|
||||||
|
|
||||||
// return a raw query seter for raw sql string.
|
|
||||||
// for example:
|
|
||||||
// ormer.Raw("UPDATE `user` SET `user_name` = ? WHERE `user_name` = ?", "slene", "testing").Exec()
|
|
||||||
// // update user testing's name to slene
|
|
||||||
Raw(query string, args ...interface{}) RawSeter
|
|
||||||
RawWithCtx(ctx context.Context, query string, args ...interface{}) RawSeter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Data Query Language
|
|
||||||
type DQL interface {
|
|
||||||
// read data to model
|
|
||||||
// for example:
|
|
||||||
// this will find User by Id field
|
|
||||||
// u = &User{Id: user.Id}
|
|
||||||
// err = Ormer.Read(u)
|
|
||||||
// this will find User by UserName field
|
|
||||||
// u = &User{UserName: "astaxie", Password: "pass"}
|
|
||||||
// err = Ormer.Read(u, "UserName")
|
|
||||||
Read(md interface{}, cols ...string) error
|
|
||||||
ReadWithCtx(ctx context.Context, md interface{}, cols ...string) error
|
|
||||||
|
|
||||||
// Like Read(), but with "FOR UPDATE" clause, useful in transaction.
|
|
||||||
// Some databases are not support this feature.
|
|
||||||
ReadForUpdate(md interface{}, cols ...string) error
|
|
||||||
ReadForUpdateWithCtx(ctx context.Context, md interface{}, cols ...string) error
|
|
||||||
|
|
||||||
// Try to read a row from the database, or insert one if it doesn't exist
|
|
||||||
ReadOrCreate(md interface{}, col1 string, cols ...string) (bool, int64, error)
|
|
||||||
ReadOrCreateWithCtx(ctx context.Context, md interface{}, col1 string, cols ...string) (bool, int64, error)
|
|
||||||
|
|
||||||
// load related models to md model.
|
|
||||||
// args are limit, offset int and order string.
|
|
||||||
//
|
|
||||||
// example:
|
|
||||||
// Ormer.LoadRelated(post,"Tags")
|
|
||||||
// for _,tag := range post.Tags{...}
|
|
||||||
// hints.DefaultRelDepth useDefaultRelsDepth ; or depth 0
|
|
||||||
// hints.RelDepth loadRelationDepth
|
|
||||||
// hints.Limit limit default limit 1000
|
|
||||||
// hints.Offset int offset default offset 0
|
|
||||||
// hints.OrderBy string order for example : "-Id"
|
|
||||||
// make sure the relation is defined in model struct tags.
|
|
||||||
LoadRelated(md interface{}, name string, args ...utils.KV) (int64, error)
|
|
||||||
LoadRelatedWithCtx(ctx context.Context, md interface{}, name string, args ...utils.KV) (int64, error)
|
|
||||||
|
|
||||||
// create a models to models queryer
|
|
||||||
// for example:
|
|
||||||
// post := Post{Id: 4}
|
|
||||||
// m2m := Ormer.QueryM2M(&post, "Tags")
|
|
||||||
QueryM2M(md interface{}, name string) QueryM2Mer
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
// Use context.Context directly on methods with `WithCtx` suffix such as InsertWithCtx/UpdateWithCtx
|
|
||||||
QueryM2MWithCtx(ctx context.Context, md interface{}, name string) QueryM2Mer
|
|
||||||
|
|
||||||
// return a QuerySeter for table operations.
|
|
||||||
// table name can be string or struct.
|
|
||||||
// e.g. QueryTable("user"), QueryTable(&user{}) or QueryTable((*User)(nil)),
|
|
||||||
QueryTable(ptrStructOrTableName interface{}) QuerySeter
|
|
||||||
// NOTE: this method is deprecated, context parameter will not take effect.
|
|
||||||
// Use context.Context directly on methods with `WithCtx` suffix such as InsertWithCtx/UpdateWithCtx
|
|
||||||
QueryTableWithCtx(ctx context.Context, ptrStructOrTableName interface{}) QuerySeter
|
|
||||||
|
|
||||||
DBStats() *sql.DBStats
|
|
||||||
}
|
|
||||||
|
|
||||||
type DriverGetter interface {
|
|
||||||
Driver() Driver
|
|
||||||
}
|
|
||||||
|
|
||||||
type ormer interface {
|
|
||||||
DQL
|
|
||||||
DML
|
|
||||||
DriverGetter
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryExecutor wrapping for ormer
|
|
||||||
type QueryExecutor interface {
|
|
||||||
ormer
|
|
||||||
}
|
|
||||||
|
|
||||||
type Ormer interface {
|
|
||||||
QueryExecutor
|
|
||||||
TxBeginner
|
|
||||||
}
|
|
||||||
|
|
||||||
type TxOrmer interface {
|
|
||||||
QueryExecutor
|
|
||||||
TxCommitter
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inserter insert prepared statement
|
|
||||||
type Inserter interface {
|
|
||||||
Insert(interface{}) (int64, error)
|
|
||||||
InsertWithCtx(context.Context, interface{}) (int64, error)
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// QuerySeter query seter
|
|
||||||
type QuerySeter interface {
|
|
||||||
// add condition expression to QuerySeter.
|
|
||||||
// for example:
|
|
||||||
// filter by UserName == 'slene'
|
|
||||||
// qs.Filter("UserName", "slene")
|
|
||||||
// sql : left outer join profile on t0.id1==t1.id2 where t1.age == 28
|
|
||||||
// Filter("profile__Age", 28)
|
|
||||||
// // time compare
|
|
||||||
// qs.Filter("created", time.Now())
|
|
||||||
Filter(string, ...interface{}) QuerySeter
|
|
||||||
// add raw sql to querySeter.
|
|
||||||
// for example:
|
|
||||||
// qs.FilterRaw("user_id IN (SELECT id FROM profile WHERE age>=18)")
|
|
||||||
// //sql-> WHERE user_id IN (SELECT id FROM profile WHERE age>=18)
|
|
||||||
FilterRaw(string, string) QuerySeter
|
|
||||||
// add NOT condition to querySeter.
|
|
||||||
// have the same usage as Filter
|
|
||||||
Exclude(string, ...interface{}) QuerySeter
|
|
||||||
// set condition to QuerySeter.
|
|
||||||
// sql's where condition
|
|
||||||
// cond := orm.NewCondition()
|
|
||||||
// cond1 := cond.And("profile__isnull", false).AndNot("status__in", 1).Or("profile__age__gt", 2000)
|
|
||||||
// //sql-> WHERE T0.`profile_id` IS NOT NULL AND NOT T0.`Status` IN (?) OR T1.`age` > 2000
|
|
||||||
// num, err := qs.SetCond(cond1).Count()
|
|
||||||
SetCond(*Condition) QuerySeter
|
|
||||||
// get condition from QuerySeter.
|
|
||||||
// sql's where condition
|
|
||||||
// cond := orm.NewCondition()
|
|
||||||
// cond = cond.And("profile__isnull", false).AndNot("status__in", 1)
|
|
||||||
// qs = qs.SetCond(cond)
|
|
||||||
// cond = qs.GetCond()
|
|
||||||
// cond := cond.Or("profile__age__gt", 2000)
|
|
||||||
// //sql-> WHERE T0.`profile_id` IS NOT NULL AND NOT T0.`Status` IN (?) OR T1.`age` > 2000
|
|
||||||
// num, err := qs.SetCond(cond).Count()
|
|
||||||
GetCond() *Condition
|
|
||||||
// add LIMIT value.
|
|
||||||
// args[0] means offset, e.g. LIMIT num,offset.
|
|
||||||
// if Limit <= 0 then Limit will be set to default limit ,eg 1000
|
|
||||||
// if QuerySeter doesn't call Limit, the sql's Limit will be set to default limit, eg 1000
|
|
||||||
// for example:
|
|
||||||
// qs.Limit(10, 2)
|
|
||||||
// // sql-> limit 10 offset 2
|
|
||||||
Limit(limit interface{}, args ...interface{}) QuerySeter
|
|
||||||
// add OFFSET value
|
|
||||||
// same as Limit function's args[0]
|
|
||||||
Offset(offset interface{}) QuerySeter
|
|
||||||
// add GROUP BY expression
|
|
||||||
// for example:
|
|
||||||
// qs.GroupBy("id")
|
|
||||||
GroupBy(exprs ...string) QuerySeter
|
|
||||||
// add ORDER expression.
|
|
||||||
// "column" means ASC, "-column" means DESC.
|
|
||||||
// for example:
|
|
||||||
// qs.OrderBy("-status")
|
|
||||||
OrderBy(exprs ...string) QuerySeter
|
|
||||||
// add ORDER expression by order clauses
|
|
||||||
// for example:
|
|
||||||
// OrderClauses(
|
|
||||||
// order_clause.Clause(
|
|
||||||
// order.Column("Id"),
|
|
||||||
// order.SortAscending(),
|
|
||||||
// ),
|
|
||||||
// order_clause.Clause(
|
|
||||||
// order.Column("status"),
|
|
||||||
// order.SortDescending(),
|
|
||||||
// ),
|
|
||||||
// )
|
|
||||||
// OrderClauses(order_clause.Clause(
|
|
||||||
// order_clause.Column(`user__status`),
|
|
||||||
// order_clause.SortDescending(),//default None
|
|
||||||
// ))
|
|
||||||
// OrderClauses(order_clause.Clause(
|
|
||||||
// order_clause.Column(`random()`),
|
|
||||||
// order_clause.SortNone(),//default None
|
|
||||||
// order_clause.Raw(),//default false.if true, do not check field is valid or not
|
|
||||||
// ))
|
|
||||||
OrderClauses(orders ...*order_clause.Order) QuerySeter
|
|
||||||
// add FORCE INDEX expression.
|
|
||||||
// for example:
|
|
||||||
// qs.ForceIndex(`idx_name1`,`idx_name2`)
|
|
||||||
// ForceIndex, UseIndex , IgnoreIndex are mutually exclusive
|
|
||||||
ForceIndex(indexes ...string) QuerySeter
|
|
||||||
// add USE INDEX expression.
|
|
||||||
// for example:
|
|
||||||
// qs.UseIndex(`idx_name1`,`idx_name2`)
|
|
||||||
// ForceIndex, UseIndex , IgnoreIndex are mutually exclusive
|
|
||||||
UseIndex(indexes ...string) QuerySeter
|
|
||||||
// add IGNORE INDEX expression.
|
|
||||||
// for example:
|
|
||||||
// qs.IgnoreIndex(`idx_name1`,`idx_name2`)
|
|
||||||
// ForceIndex, UseIndex , IgnoreIndex are mutually exclusive
|
|
||||||
IgnoreIndex(indexes ...string) QuerySeter
|
|
||||||
// set relation model to query together.
|
|
||||||
// it will query relation models and assign to parent model.
|
|
||||||
// for example:
|
|
||||||
// // will load all related fields use left join .
|
|
||||||
// qs.RelatedSel().One(&user)
|
|
||||||
// // will load related field only profile
|
|
||||||
// qs.RelatedSel("profile").One(&user)
|
|
||||||
// user.Profile.Age = 32
|
|
||||||
RelatedSel(params ...interface{}) QuerySeter
|
|
||||||
// Set Distinct
|
|
||||||
// for example:
|
|
||||||
// o.QueryTable("policy").Filter("Groups__Group__Users__User", user).
|
|
||||||
// Distinct().
|
|
||||||
// All(&permissions)
|
|
||||||
Distinct() QuerySeter
|
|
||||||
// set FOR UPDATE to query.
|
|
||||||
// for example:
|
|
||||||
// o.QueryTable("user").Filter("uid", uid).ForUpdate().All(&users)
|
|
||||||
ForUpdate() QuerySeter
|
|
||||||
// return QuerySeter execution result number
|
|
||||||
// for example:
|
|
||||||
// num, err = qs.Filter("profile__age__gt", 28).Count()
|
|
||||||
Count() (int64, error)
|
|
||||||
CountWithCtx(context.Context) (int64, error)
|
|
||||||
// check result empty or not after QuerySeter executed
|
|
||||||
// the same as QuerySeter.Count > 0
|
|
||||||
Exist() bool
|
|
||||||
ExistWithCtx(context.Context) bool
|
|
||||||
// execute update with parameters
|
|
||||||
// for example:
|
|
||||||
// num, err = qs.Filter("user_name", "slene").Update(Params{
|
|
||||||
// "Nums": ColValue(Col_Minus, 50),
|
|
||||||
// }) // user slene's Nums will minus 50
|
|
||||||
// num, err = qs.Filter("UserName", "slene").Update(Params{
|
|
||||||
// "user_name": "slene2"
|
|
||||||
// }) // user slene's name will change to slene2
|
|
||||||
Update(values Params) (int64, error)
|
|
||||||
UpdateWithCtx(ctx context.Context, values Params) (int64, error)
|
|
||||||
// delete from table
|
|
||||||
// for example:
|
|
||||||
// num ,err = qs.Filter("user_name__in", "testing1", "testing2").Delete()
|
|
||||||
// //delete two user who's name is testing1 or testing2
|
|
||||||
Delete() (int64, error)
|
|
||||||
DeleteWithCtx(context.Context) (int64, error)
|
|
||||||
// return an insert queryer.
|
|
||||||
// it can be used in times.
|
|
||||||
// example:
|
|
||||||
// i,err := sq.PrepareInsert()
|
|
||||||
// num, err = i.Insert(&user1) // user table will add one record user1 at once
|
|
||||||
// num, err = i.Insert(&user2) // user table will add one record user2 at once
|
|
||||||
// err = i.Close() //don't forget call Close
|
|
||||||
PrepareInsert() (Inserter, error)
|
|
||||||
PrepareInsertWithCtx(context.Context) (Inserter, error)
|
|
||||||
// query all data and map to containers.
|
|
||||||
// cols means the columns when querying.
|
|
||||||
// for example:
|
|
||||||
// var users []*User
|
|
||||||
// qs.All(&users) // users[0],users[1],users[2] ...
|
|
||||||
All(container interface{}, cols ...string) (int64, error)
|
|
||||||
AllWithCtx(ctx context.Context, container interface{}, cols ...string) (int64, error)
|
|
||||||
// query one row data and map to containers.
|
|
||||||
// cols means the columns when querying.
|
|
||||||
// for example:
|
|
||||||
// var user User
|
|
||||||
// qs.One(&user) //user.UserName == "slene"
|
|
||||||
One(container interface{}, cols ...string) error
|
|
||||||
OneWithCtx(ctx context.Context, container interface{}, cols ...string) error
|
|
||||||
// query all data and map to []map[string]interface.
|
|
||||||
// expres means condition expression.
|
|
||||||
// it converts data to []map[column]value.
|
|
||||||
// for example:
|
|
||||||
// var maps []Params
|
|
||||||
// qs.Values(&maps) //maps[0]["UserName"]=="slene"
|
|
||||||
Values(results *[]Params, exprs ...string) (int64, error)
|
|
||||||
ValuesWithCtx(ctx context.Context, results *[]Params, exprs ...string) (int64, error)
|
|
||||||
// query all data and map to [][]interface
|
|
||||||
// it converts data to [][column_index]value
|
|
||||||
// for example:
|
|
||||||
// var list []ParamsList
|
|
||||||
// qs.ValuesList(&list) // list[0][1] == "slene"
|
|
||||||
ValuesList(results *[]ParamsList, exprs ...string) (int64, error)
|
|
||||||
ValuesListWithCtx(ctx context.Context, results *[]ParamsList, exprs ...string) (int64, error)
|
|
||||||
// query all data and map to []interface.
|
|
||||||
// it's designed for one column record set, auto change to []value, not [][column]value.
|
|
||||||
// for example:
|
|
||||||
// var list ParamsList
|
|
||||||
// qs.ValuesFlat(&list, "UserName") // list[0] == "slene"
|
|
||||||
ValuesFlat(result *ParamsList, expr string) (int64, error)
|
|
||||||
ValuesFlatWithCtx(ctx context.Context, result *ParamsList, expr string) (int64, error)
|
|
||||||
// query all rows into map[string]interface with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to map[string]interface{}{
|
|
||||||
// "total": 100,
|
|
||||||
// "found": 200,
|
|
||||||
// }
|
|
||||||
RowsToMap(result *Params, keyCol, valueCol string) (int64, error)
|
|
||||||
// query all rows into struct with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to struct {
|
|
||||||
// Total int
|
|
||||||
// Found int
|
|
||||||
// }
|
|
||||||
RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error)
|
|
||||||
// aggregate func.
|
|
||||||
// for example:
|
|
||||||
// type result struct {
|
|
||||||
// DeptName string
|
|
||||||
// Total int
|
|
||||||
// }
|
|
||||||
// var res []result
|
|
||||||
// o.QueryTable("dept_info").Aggregate("dept_name,sum(salary) as total").GroupBy("dept_name").All(&res)
|
|
||||||
Aggregate(s string) QuerySeter
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryM2Mer model to model query struct
|
|
||||||
// all operations are on the m2m table only, will not affect the origin model table
|
|
||||||
type QueryM2Mer interface {
|
|
||||||
// add models to origin models when creating queryM2M.
|
|
||||||
// example:
|
|
||||||
// m2m := orm.QueryM2M(post,"Tag")
|
|
||||||
// m2m.Add(&Tag1{},&Tag2{})
|
|
||||||
// for _,tag := range post.Tags{}{ ... }
|
|
||||||
// param could also be any of the follow
|
|
||||||
// []*Tag{{Id:3,Name: "TestTag1"}, {Id:4,Name: "TestTag2"}}
|
|
||||||
// &Tag{Id:5,Name: "TestTag3"}
|
|
||||||
// []interface{}{&Tag{Id:6,Name: "TestTag4"}}
|
|
||||||
// insert one or more rows to m2m table
|
|
||||||
// make sure the relation is defined in post model struct tag.
|
|
||||||
Add(...interface{}) (int64, error)
|
|
||||||
AddWithCtx(context.Context, ...interface{}) (int64, error)
|
|
||||||
// remove models following the origin model relationship
|
|
||||||
// only delete rows from m2m table
|
|
||||||
// for example:
|
|
||||||
// tag3 := &Tag{Id:5,Name: "TestTag3"}
|
|
||||||
// num, err = m2m.Remove(tag3)
|
|
||||||
Remove(...interface{}) (int64, error)
|
|
||||||
RemoveWithCtx(context.Context, ...interface{}) (int64, error)
|
|
||||||
// check model is existed in relationship of origin model
|
|
||||||
Exist(interface{}) bool
|
|
||||||
ExistWithCtx(context.Context, interface{}) bool
|
|
||||||
// clean all models in related of origin model
|
|
||||||
Clear() (int64, error)
|
|
||||||
ClearWithCtx(context.Context) (int64, error)
|
|
||||||
// count all related models of origin model
|
|
||||||
Count() (int64, error)
|
|
||||||
CountWithCtx(context.Context) (int64, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawPreparer raw query statement
|
|
||||||
type RawPreparer interface {
|
|
||||||
Exec(...interface{}) (sql.Result, error)
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// RawSeter raw query seter
|
|
||||||
// create From Ormer.Raw
|
|
||||||
// for example:
|
|
||||||
// sql := fmt.Sprintf("SELECT %sid%s,%sname%s FROM %suser%s WHERE id = ?",Q,Q,Q,Q,Q,Q)
|
|
||||||
// rs := Ormer.Raw(sql, 1)
|
|
||||||
type RawSeter interface {
|
|
||||||
// execute sql and get result
|
|
||||||
Exec() (sql.Result, error)
|
|
||||||
// query data and map to container
|
|
||||||
// for example:
|
|
||||||
// var name string
|
|
||||||
// var id int
|
|
||||||
// rs.QueryRow(&id,&name) // id==2 name=="slene"
|
|
||||||
QueryRow(containers ...interface{}) error
|
|
||||||
|
|
||||||
// query data rows and map to container
|
|
||||||
// var ids []int
|
|
||||||
// var names []int
|
|
||||||
// query = fmt.Sprintf("SELECT 'id','name' FROM %suser%s", Q, Q)
|
|
||||||
// num, err = dORM.Raw(query).QueryRows(&ids,&names) // ids=>{1,2},names=>{"nobody","slene"}
|
|
||||||
QueryRows(containers ...interface{}) (int64, error)
|
|
||||||
SetArgs(...interface{}) RawSeter
|
|
||||||
// query data to []map[string]interface
|
|
||||||
// see QuerySeter's Values
|
|
||||||
Values(container *[]Params, cols ...string) (int64, error)
|
|
||||||
// query data to [][]interface
|
|
||||||
// see QuerySeter's ValuesList
|
|
||||||
ValuesList(container *[]ParamsList, cols ...string) (int64, error)
|
|
||||||
// query data to []interface
|
|
||||||
// see QuerySeter's ValuesFlat
|
|
||||||
ValuesFlat(container *ParamsList, cols ...string) (int64, error)
|
|
||||||
// query all rows into map[string]interface with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to map[string]interface{}{
|
|
||||||
// "total": 100,
|
|
||||||
// "found": 200,
|
|
||||||
// }
|
|
||||||
RowsToMap(result *Params, keyCol, valueCol string) (int64, error)
|
|
||||||
// query all rows into struct with specify key and value column name.
|
|
||||||
// keyCol = "name", valueCol = "value"
|
|
||||||
// table data
|
|
||||||
// name | value
|
|
||||||
// total | 100
|
|
||||||
// found | 200
|
|
||||||
// to struct {
|
|
||||||
// Total int
|
|
||||||
// Found int
|
|
||||||
// }
|
|
||||||
RowsToStruct(ptrStruct interface{}, keyCol, valueCol string) (int64, error)
|
|
||||||
|
|
||||||
// return prepared raw statement for used in times.
|
|
||||||
// for example:
|
|
||||||
// pre, err := dORM.Raw("INSERT INTO tag (name) VALUES (?)").Prepare()
|
|
||||||
// r, err := pre.Exec("name1") // INSERT INTO tag (name) VALUES (`name1`)
|
|
||||||
Prepare() (RawPreparer, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stmtQuerier statement querier
|
|
||||||
type stmtQuerier interface {
|
|
||||||
Close() error
|
|
||||||
Exec(args ...interface{}) (sql.Result, error)
|
|
||||||
ExecContext(ctx context.Context, args ...interface{}) (sql.Result, error)
|
|
||||||
Query(args ...interface{}) (*sql.Rows, error)
|
|
||||||
QueryContext(ctx context.Context, args ...interface{}) (*sql.Rows, error)
|
|
||||||
QueryRow(args ...interface{}) *sql.Row
|
|
||||||
QueryRowContext(ctx context.Context, args ...interface{}) *sql.Row
|
|
||||||
}
|
|
||||||
|
|
||||||
// db querier
|
|
||||||
type dbQuerier interface {
|
|
||||||
Prepare(query string) (*sql.Stmt, error)
|
|
||||||
PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
|
|
||||||
Exec(query string, args ...interface{}) (sql.Result, error)
|
|
||||||
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
|
||||||
Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
||||||
QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
|
|
||||||
QueryRow(query string, args ...interface{}) *sql.Row
|
|
||||||
QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row
|
|
||||||
}
|
|
||||||
|
|
||||||
// type DB interface {
|
|
||||||
// Begin() (*sql.Tx, error)
|
|
||||||
// Prepare(query string) (stmtQuerier, error)
|
|
||||||
// Exec(query string, args ...interface{}) (sql.Result, error)
|
|
||||||
// Query(query string, args ...interface{}) (*sql.Rows, error)
|
|
||||||
// QueryRow(query string, args ...interface{}) *sql.Row
|
|
||||||
// }
|
|
||||||
|
|
||||||
// base database struct
|
|
||||||
type dbBaser interface {
|
|
||||||
Read(context.Context, dbQuerier, *modelInfo, reflect.Value, *time.Location, []string, bool) error
|
|
||||||
ReadBatch(context.Context, dbQuerier, *querySet, *modelInfo, *Condition, interface{}, *time.Location, []string) (int64, error)
|
|
||||||
Count(context.Context, dbQuerier, *querySet, *modelInfo, *Condition, *time.Location) (int64, error)
|
|
||||||
ReadValues(context.Context, dbQuerier, *querySet, *modelInfo, *Condition, []string, interface{}, *time.Location) (int64, error)
|
|
||||||
|
|
||||||
Insert(context.Context, dbQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error)
|
|
||||||
InsertOrUpdate(context.Context, dbQuerier, *modelInfo, reflect.Value, *alias, ...string) (int64, error)
|
|
||||||
InsertMulti(context.Context, dbQuerier, *modelInfo, reflect.Value, int, *time.Location) (int64, error)
|
|
||||||
InsertValue(context.Context, dbQuerier, *modelInfo, bool, []string, []interface{}) (int64, error)
|
|
||||||
InsertStmt(context.Context, stmtQuerier, *modelInfo, reflect.Value, *time.Location) (int64, error)
|
|
||||||
|
|
||||||
Update(context.Context, dbQuerier, *modelInfo, reflect.Value, *time.Location, []string) (int64, error)
|
|
||||||
UpdateBatch(context.Context, dbQuerier, *querySet, *modelInfo, *Condition, Params, *time.Location) (int64, error)
|
|
||||||
|
|
||||||
Delete(context.Context, dbQuerier, *modelInfo, reflect.Value, *time.Location, []string) (int64, error)
|
|
||||||
DeleteBatch(context.Context, dbQuerier, *querySet, *modelInfo, *Condition, *time.Location) (int64, error)
|
|
||||||
|
|
||||||
SupportUpdateJoin() bool
|
|
||||||
OperatorSQL(string) string
|
|
||||||
GenerateOperatorSQL(*modelInfo, *fieldInfo, string, []interface{}, *time.Location) (string, []interface{})
|
|
||||||
GenerateOperatorLeftCol(*fieldInfo, string, *string)
|
|
||||||
PrepareInsert(context.Context, dbQuerier, *modelInfo) (stmtQuerier, string, error)
|
|
||||||
MaxLimit() uint64
|
|
||||||
TableQuote() string
|
|
||||||
ReplaceMarks(*string)
|
|
||||||
HasReturningID(*modelInfo, *string) bool
|
|
||||||
TimeFromDB(*time.Time, *time.Location)
|
|
||||||
TimeToDB(*time.Time, *time.Location)
|
|
||||||
DbTypes() map[string]string
|
|
||||||
GetTables(dbQuerier) (map[string]bool, error)
|
|
||||||
GetColumns(context.Context, dbQuerier, string) (map[string][3]string, error)
|
|
||||||
ShowTablesQuery() string
|
|
||||||
ShowColumnsQuery(string) string
|
|
||||||
IndexExists(context.Context, dbQuerier, string, string) bool
|
|
||||||
collectFieldValue(*modelInfo, *fieldInfo, reflect.Value, bool, *time.Location) (interface{}, error)
|
|
||||||
setval(context.Context, dbQuerier, *modelInfo, []string) error
|
|
||||||
|
|
||||||
GenerateSpecifyIndex(tableName string, useIndex int, indexes []string) string
|
|
||||||
}
|
|
@ -1,319 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package orm
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type fn func(string) string
|
|
||||||
|
|
||||||
var (
|
|
||||||
nameStrategyMap = map[string]fn{
|
|
||||||
defaultNameStrategy: snakeString,
|
|
||||||
SnakeAcronymNameStrategy: snakeStringWithAcronym,
|
|
||||||
}
|
|
||||||
defaultNameStrategy = "snakeString"
|
|
||||||
SnakeAcronymNameStrategy = "snakeStringWithAcronym"
|
|
||||||
nameStrategy = defaultNameStrategy
|
|
||||||
)
|
|
||||||
|
|
||||||
// StrTo is the target string
|
|
||||||
type StrTo string
|
|
||||||
|
|
||||||
// Set string
|
|
||||||
func (f *StrTo) Set(v string) {
|
|
||||||
if v != "" {
|
|
||||||
*f = StrTo(v)
|
|
||||||
} else {
|
|
||||||
f.Clear()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear string
|
|
||||||
func (f *StrTo) Clear() {
|
|
||||||
*f = StrTo(rune(0x1E))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exist check string exist
|
|
||||||
func (f StrTo) Exist() bool {
|
|
||||||
return string(f) != string(rune(0x1E))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool string to bool
|
|
||||||
func (f StrTo) Bool() (bool, error) {
|
|
||||||
return strconv.ParseBool(f.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float32 string to float32
|
|
||||||
func (f StrTo) Float32() (float32, error) {
|
|
||||||
v, err := strconv.ParseFloat(f.String(), 32)
|
|
||||||
return float32(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Float64 string to float64
|
|
||||||
func (f StrTo) Float64() (float64, error) {
|
|
||||||
return strconv.ParseFloat(f.String(), 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int string to int
|
|
||||||
func (f StrTo) Int() (int, error) {
|
|
||||||
v, err := strconv.ParseInt(f.String(), 10, 32)
|
|
||||||
return int(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int8 string to int8
|
|
||||||
func (f StrTo) Int8() (int8, error) {
|
|
||||||
v, err := strconv.ParseInt(f.String(), 10, 8)
|
|
||||||
return int8(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int16 string to int16
|
|
||||||
func (f StrTo) Int16() (int16, error) {
|
|
||||||
v, err := strconv.ParseInt(f.String(), 10, 16)
|
|
||||||
return int16(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int32 string to int32
|
|
||||||
func (f StrTo) Int32() (int32, error) {
|
|
||||||
v, err := strconv.ParseInt(f.String(), 10, 32)
|
|
||||||
return int32(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64 string to int64
|
|
||||||
func (f StrTo) Int64() (int64, error) {
|
|
||||||
v, err := strconv.ParseInt(f.String(), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
i := new(big.Int)
|
|
||||||
ni, ok := i.SetString(f.String(), 10) // octal
|
|
||||||
if !ok {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
return ni.Int64(), nil
|
|
||||||
}
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint string to uint
|
|
||||||
func (f StrTo) Uint() (uint, error) {
|
|
||||||
v, err := strconv.ParseUint(f.String(), 10, 32)
|
|
||||||
return uint(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint8 string to uint8
|
|
||||||
func (f StrTo) Uint8() (uint8, error) {
|
|
||||||
v, err := strconv.ParseUint(f.String(), 10, 8)
|
|
||||||
return uint8(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint16 string to uint16
|
|
||||||
func (f StrTo) Uint16() (uint16, error) {
|
|
||||||
v, err := strconv.ParseUint(f.String(), 10, 16)
|
|
||||||
return uint16(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint32 string to uint32
|
|
||||||
func (f StrTo) Uint32() (uint32, error) {
|
|
||||||
v, err := strconv.ParseUint(f.String(), 10, 32)
|
|
||||||
return uint32(v), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uint64 string to uint64
|
|
||||||
func (f StrTo) Uint64() (uint64, error) {
|
|
||||||
v, err := strconv.ParseUint(f.String(), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
i := new(big.Int)
|
|
||||||
ni, ok := i.SetString(f.String(), 10)
|
|
||||||
if !ok {
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
return ni.Uint64(), nil
|
|
||||||
}
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// String string to string
|
|
||||||
func (f StrTo) String() string {
|
|
||||||
if f.Exist() {
|
|
||||||
return string(f)
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToStr interface to string
|
|
||||||
func ToStr(value interface{}, args ...int) (s string) {
|
|
||||||
switch v := value.(type) {
|
|
||||||
case bool:
|
|
||||||
s = strconv.FormatBool(v)
|
|
||||||
case float32:
|
|
||||||
s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32))
|
|
||||||
case float64:
|
|
||||||
s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64))
|
|
||||||
case int:
|
|
||||||
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
|
|
||||||
case int8:
|
|
||||||
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
|
|
||||||
case int16:
|
|
||||||
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
|
|
||||||
case int32:
|
|
||||||
s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10))
|
|
||||||
case int64:
|
|
||||||
s = strconv.FormatInt(v, argInt(args).Get(0, 10))
|
|
||||||
case uint:
|
|
||||||
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
|
|
||||||
case uint8:
|
|
||||||
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
|
|
||||||
case uint16:
|
|
||||||
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
|
|
||||||
case uint32:
|
|
||||||
s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10))
|
|
||||||
case uint64:
|
|
||||||
s = strconv.FormatUint(v, argInt(args).Get(0, 10))
|
|
||||||
case string:
|
|
||||||
s = v
|
|
||||||
case []byte:
|
|
||||||
s = string(v)
|
|
||||||
default:
|
|
||||||
s = fmt.Sprintf("%v", v)
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToInt64 interface to int64
|
|
||||||
func ToInt64(value interface{}) (d int64) {
|
|
||||||
val := reflect.ValueOf(value)
|
|
||||||
switch value.(type) {
|
|
||||||
case int, int8, int16, int32, int64:
|
|
||||||
d = val.Int()
|
|
||||||
case uint, uint8, uint16, uint32, uint64:
|
|
||||||
d = int64(val.Uint())
|
|
||||||
default:
|
|
||||||
panic(fmt.Errorf("ToInt64 need numeric not `%T`", value))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func snakeStringWithAcronym(s string) string {
|
|
||||||
data := make([]byte, 0, len(s)*2)
|
|
||||||
num := len(s)
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
d := s[i]
|
|
||||||
before := false
|
|
||||||
after := false
|
|
||||||
if i > 0 {
|
|
||||||
before = s[i-1] >= 'a' && s[i-1] <= 'z'
|
|
||||||
}
|
|
||||||
if i+1 < num {
|
|
||||||
after = s[i+1] >= 'a' && s[i+1] <= 'z'
|
|
||||||
}
|
|
||||||
if i > 0 && d >= 'A' && d <= 'Z' && (before || after) {
|
|
||||||
data = append(data, '_')
|
|
||||||
}
|
|
||||||
data = append(data, d)
|
|
||||||
}
|
|
||||||
return strings.ToLower(string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
// snake string, XxYy to xx_yy , XxYY to xx_y_y
|
|
||||||
func snakeString(s string) string {
|
|
||||||
data := make([]byte, 0, len(s)*2)
|
|
||||||
j := false
|
|
||||||
num := len(s)
|
|
||||||
for i := 0; i < num; i++ {
|
|
||||||
d := s[i]
|
|
||||||
if i > 0 && d >= 'A' && d <= 'Z' && j {
|
|
||||||
data = append(data, '_')
|
|
||||||
}
|
|
||||||
if d != '_' {
|
|
||||||
j = true
|
|
||||||
}
|
|
||||||
data = append(data, d)
|
|
||||||
}
|
|
||||||
return strings.ToLower(string(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNameStrategy set different name strategy
|
|
||||||
func SetNameStrategy(s string) {
|
|
||||||
if SnakeAcronymNameStrategy != s {
|
|
||||||
nameStrategy = defaultNameStrategy
|
|
||||||
}
|
|
||||||
nameStrategy = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// camel string, xx_yy to XxYy
|
|
||||||
func camelString(s string) string {
|
|
||||||
data := make([]byte, 0, len(s))
|
|
||||||
flag, num := true, len(s)-1
|
|
||||||
for i := 0; i <= num; i++ {
|
|
||||||
d := s[i]
|
|
||||||
if d == '_' {
|
|
||||||
flag = true
|
|
||||||
continue
|
|
||||||
} else if flag {
|
|
||||||
if d >= 'a' && d <= 'z' {
|
|
||||||
d = d - 32
|
|
||||||
}
|
|
||||||
flag = false
|
|
||||||
}
|
|
||||||
data = append(data, d)
|
|
||||||
}
|
|
||||||
return string(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
type argString []string
|
|
||||||
|
|
||||||
// get string by index from string slice
|
|
||||||
func (a argString) Get(i int, args ...string) (r string) {
|
|
||||||
if i >= 0 && i < len(a) {
|
|
||||||
r = a[i]
|
|
||||||
} else if len(args) > 0 {
|
|
||||||
r = args[0]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type argInt []int
|
|
||||||
|
|
||||||
// get int by index from int slice
|
|
||||||
func (a argInt) Get(i int, args ...int) (r int) {
|
|
||||||
if i >= 0 && i < len(a) {
|
|
||||||
r = a[i]
|
|
||||||
}
|
|
||||||
if len(args) > 0 {
|
|
||||||
r = args[0]
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// parse time to string with location
|
|
||||||
func timeParse(dateString, format string) (time.Time, error) {
|
|
||||||
tp, err := time.ParseInLocation(format, dateString, DefaultTimeLoc)
|
|
||||||
return tp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get pointer indirect type
|
|
||||||
func indirectType(v reflect.Type) reflect.Type {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Ptr:
|
|
||||||
return indirectType(v.Elem())
|
|
||||||
default:
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,70 +0,0 @@
|
|||||||
## logs
|
|
||||||
|
|
||||||
logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` .
|
|
||||||
|
|
||||||
## How to install?
|
|
||||||
|
|
||||||
go get github.com/beego/beego/v2/core/logs
|
|
||||||
|
|
||||||
## What adapters are supported?
|
|
||||||
|
|
||||||
As of now this logs support console, file,smtp and conn.
|
|
||||||
|
|
||||||
## How to use it?
|
|
||||||
|
|
||||||
First you must import it
|
|
||||||
|
|
||||||
```golang
|
|
||||||
import (
|
|
||||||
"github.com/beego/beego/v2/core/logs"
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
Then init a Log (example with console adapter)
|
|
||||||
|
|
||||||
```golang
|
|
||||||
log := logs.NewLogger(10000)
|
|
||||||
log.SetLogger("console", "")
|
|
||||||
```
|
|
||||||
|
|
||||||
> the first params stand for how many channel
|
|
||||||
|
|
||||||
Use it like this:
|
|
||||||
|
|
||||||
```golang
|
|
||||||
log.Trace("trace")
|
|
||||||
log.Info("info")
|
|
||||||
log.Warn("warning")
|
|
||||||
log.Debug("debug")
|
|
||||||
log.Critical("critical")
|
|
||||||
```
|
|
||||||
|
|
||||||
## File adapter
|
|
||||||
|
|
||||||
Configure file adapter like this:
|
|
||||||
|
|
||||||
```golang
|
|
||||||
log := NewLogger(10000)
|
|
||||||
log.SetLogger("file", `{"filename":"test.log"}`)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Conn adapter
|
|
||||||
|
|
||||||
Configure like this:
|
|
||||||
|
|
||||||
```golang
|
|
||||||
log := NewLogger(1000)
|
|
||||||
log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
|
|
||||||
log.Info("info")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Smtp adapter
|
|
||||||
|
|
||||||
Configure like this:
|
|
||||||
|
|
||||||
```golang
|
|
||||||
log := NewLogger(10000)
|
|
||||||
log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
|
|
||||||
log.Critical("sendmail critical")
|
|
||||||
time.Sleep(time.Second * 30)
|
|
||||||
```
|
|
@ -1,93 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
apacheFormatPattern = "%s - - [%s] \"%s %d %d\" %f %s %s"
|
|
||||||
apacheFormat = "APACHE_FORMAT"
|
|
||||||
jsonFormat = "JSON_FORMAT"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AccessLogRecord is astruct for holding access log data.
|
|
||||||
type AccessLogRecord struct {
|
|
||||||
RemoteAddr string `json:"remote_addr"`
|
|
||||||
RequestTime time.Time `json:"request_time"`
|
|
||||||
RequestMethod string `json:"request_method"`
|
|
||||||
Request string `json:"request"`
|
|
||||||
ServerProtocol string `json:"server_protocol"`
|
|
||||||
Host string `json:"host"`
|
|
||||||
Status int `json:"status"`
|
|
||||||
BodyBytesSent int64 `json:"body_bytes_sent"`
|
|
||||||
ElapsedTime time.Duration `json:"elapsed_time"`
|
|
||||||
HTTPReferrer string `json:"http_referrer"`
|
|
||||||
HTTPUserAgent string `json:"http_user_agent"`
|
|
||||||
RemoteUser string `json:"remote_user"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AccessLogRecord) json() ([]byte, error) {
|
|
||||||
buffer := &bytes.Buffer{}
|
|
||||||
encoder := json.NewEncoder(buffer)
|
|
||||||
disableEscapeHTML(encoder)
|
|
||||||
|
|
||||||
err := encoder.Encode(r)
|
|
||||||
return buffer.Bytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func disableEscapeHTML(i interface{}) {
|
|
||||||
if e, ok := i.(interface {
|
|
||||||
SetEscapeHTML(bool)
|
|
||||||
}); ok {
|
|
||||||
e.SetEscapeHTML(false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// AccessLog - Format and print access log.
|
|
||||||
func AccessLog(r *AccessLogRecord, format string) {
|
|
||||||
msg := r.format(format)
|
|
||||||
lm := &LogMsg{
|
|
||||||
Msg: strings.TrimSpace(msg),
|
|
||||||
When: time.Now(),
|
|
||||||
Level: levelLoggerImpl,
|
|
||||||
}
|
|
||||||
beeLogger.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *AccessLogRecord) format(format string) string {
|
|
||||||
msg := ""
|
|
||||||
switch format {
|
|
||||||
case apacheFormat:
|
|
||||||
timeFormatted := r.RequestTime.Format("02/Jan/2006 03:04:05")
|
|
||||||
msg = fmt.Sprintf(apacheFormatPattern, r.RemoteAddr, timeFormatted, r.Request, r.Status, r.BodyBytesSent,
|
|
||||||
r.ElapsedTime.Seconds(), r.HTTPReferrer, r.HTTPUserAgent)
|
|
||||||
case jsonFormat:
|
|
||||||
fallthrough
|
|
||||||
default:
|
|
||||||
jsonData, err := r.json()
|
|
||||||
if err != nil {
|
|
||||||
msg = fmt.Sprintf(`{"Error": "%s"}`, err)
|
|
||||||
} else {
|
|
||||||
msg = string(jsonData)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
@ -1,141 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// connWriter implements LoggerInterface.
|
|
||||||
// Writes messages in keep-live tcp connection.
|
|
||||||
type connWriter struct {
|
|
||||||
lg *logWriter
|
|
||||||
innerWriter io.WriteCloser
|
|
||||||
formatter LogFormatter
|
|
||||||
Formatter string `json:"formatter"`
|
|
||||||
ReconnectOnMsg bool `json:"reconnectOnMsg"`
|
|
||||||
Reconnect bool `json:"reconnect"`
|
|
||||||
Net string `json:"net"`
|
|
||||||
Addr string `json:"addr"`
|
|
||||||
Level int `json:"level"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConn creates new ConnWrite returning as LoggerInterface.
|
|
||||||
func NewConn() Logger {
|
|
||||||
conn := new(connWriter)
|
|
||||||
conn.Level = LevelTrace
|
|
||||||
conn.formatter = conn
|
|
||||||
return conn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connWriter) Format(lm *LogMsg) string {
|
|
||||||
return lm.OldStyleFormat()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initializes a connection writer with json config.
|
|
||||||
// json config only needs they "level" key
|
|
||||||
func (c *connWriter) Init(config string) error {
|
|
||||||
res := json.Unmarshal([]byte(config), c)
|
|
||||||
if res == nil && len(c.Formatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(c.Formatter)
|
|
||||||
if !ok {
|
|
||||||
return errors.New(fmt.Sprintf("the formatter with name: %s not found", c.Formatter))
|
|
||||||
}
|
|
||||||
c.formatter = fmtr
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connWriter) SetFormatter(f LogFormatter) {
|
|
||||||
c.formatter = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes message in connection.
|
|
||||||
// If connection is down, try to re-connect.
|
|
||||||
func (c *connWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if lm.Level > c.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if c.needToConnectOnMsg() {
|
|
||||||
err := c.connect()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.ReconnectOnMsg {
|
|
||||||
defer c.innerWriter.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
msg := c.formatter.Format(lm)
|
|
||||||
|
|
||||||
_, err := c.lg.writeln(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush implementing method. empty.
|
|
||||||
func (c *connWriter) Flush() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy destroy connection writer and close tcp listener.
|
|
||||||
func (c *connWriter) Destroy() {
|
|
||||||
if c.innerWriter != nil {
|
|
||||||
c.innerWriter.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connWriter) connect() error {
|
|
||||||
if c.innerWriter != nil {
|
|
||||||
c.innerWriter.Close()
|
|
||||||
c.innerWriter = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
conn, err := net.Dial(c.Net, c.Addr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
|
||||||
tcpConn.SetKeepAlive(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
c.innerWriter = conn
|
|
||||||
c.lg = newLogWriter(conn)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *connWriter) needToConnectOnMsg() bool {
|
|
||||||
if c.Reconnect {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.innerWriter == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.ReconnectOnMsg
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterConn, NewConn)
|
|
||||||
}
|
|
@ -1,125 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/shiena/ansicolor"
|
|
||||||
)
|
|
||||||
|
|
||||||
// brush is a color join function
|
|
||||||
type brush func(string) string
|
|
||||||
|
|
||||||
// newBrush returns a fix color Brush
|
|
||||||
func newBrush(color string) brush {
|
|
||||||
pre := "\033["
|
|
||||||
reset := "\033[0m"
|
|
||||||
return func(text string) string {
|
|
||||||
return pre + color + "m" + text + reset
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var colors = []brush{
|
|
||||||
newBrush("1;37"), // Emergency white
|
|
||||||
newBrush("1;36"), // Alert cyan
|
|
||||||
newBrush("1;35"), // Critical magenta
|
|
||||||
newBrush("1;31"), // Error red
|
|
||||||
newBrush("1;33"), // Warning yellow
|
|
||||||
newBrush("1;32"), // Notice green
|
|
||||||
newBrush("1;34"), // Informational blue
|
|
||||||
newBrush("1;44"), // Debug Background blue
|
|
||||||
}
|
|
||||||
|
|
||||||
// consoleWriter implements LoggerInterface and writes messages to terminal.
|
|
||||||
type consoleWriter struct {
|
|
||||||
lg *logWriter
|
|
||||||
formatter LogFormatter
|
|
||||||
Formatter string `json:"formatter"`
|
|
||||||
Level int `json:"level"`
|
|
||||||
Colorful bool `json:"color"` // this filed is useful only when system's terminal supports color
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *consoleWriter) Format(lm *LogMsg) string {
|
|
||||||
msg := lm.OldStyleFormat()
|
|
||||||
if c.Colorful {
|
|
||||||
msg = strings.Replace(msg, levelPrefix[lm.Level], colors[lm.Level](levelPrefix[lm.Level]), 1)
|
|
||||||
}
|
|
||||||
h, _, _ := formatTimeHeader(lm.When)
|
|
||||||
return string(append(h, msg...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *consoleWriter) SetFormatter(f LogFormatter) {
|
|
||||||
c.formatter = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConsole creates ConsoleWriter returning as LoggerInterface.
|
|
||||||
func NewConsole() Logger {
|
|
||||||
return newConsole()
|
|
||||||
}
|
|
||||||
|
|
||||||
func newConsole() *consoleWriter {
|
|
||||||
cw := &consoleWriter{
|
|
||||||
lg: newLogWriter(ansicolor.NewAnsiColorWriter(os.Stdout)),
|
|
||||||
Level: LevelDebug,
|
|
||||||
Colorful: true,
|
|
||||||
}
|
|
||||||
cw.formatter = cw
|
|
||||||
return cw
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init initianlizes the console logger.
|
|
||||||
// jsonConfig must be in the format '{"level":LevelTrace}'
|
|
||||||
func (c *consoleWriter) Init(config string) error {
|
|
||||||
if len(config) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
res := json.Unmarshal([]byte(config), c)
|
|
||||||
if res == nil && len(c.Formatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(c.Formatter)
|
|
||||||
if !ok {
|
|
||||||
return errors.New(fmt.Sprintf("the formatter with name: %s not found", c.Formatter))
|
|
||||||
}
|
|
||||||
c.formatter = fmtr
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes message in console.
|
|
||||||
func (c *consoleWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if lm.Level > c.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
msg := c.formatter.Format(lm)
|
|
||||||
c.lg.writeln(msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy implementing method. empty.
|
|
||||||
func (c *consoleWriter) Destroy() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush implementing method. empty.
|
|
||||||
func (c *consoleWriter) Flush() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterConsole, NewConsole)
|
|
||||||
}
|
|
@ -1,442 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fileLogWriter implements LoggerInterface.
|
|
||||||
// Writes messages by lines limit, file size limit, or time frequency.
|
|
||||||
type fileLogWriter struct {
|
|
||||||
sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize
|
|
||||||
|
|
||||||
Rotate bool `json:"rotate"`
|
|
||||||
Daily bool `json:"daily"`
|
|
||||||
Hourly bool `json:"hourly"`
|
|
||||||
|
|
||||||
// The opened file
|
|
||||||
Filename string `json:"filename"`
|
|
||||||
fileWriter *os.File
|
|
||||||
|
|
||||||
// Rotate at line
|
|
||||||
MaxLines int `json:"maxlines"`
|
|
||||||
maxLinesCurLines int
|
|
||||||
|
|
||||||
MaxFiles int `json:"maxfiles"`
|
|
||||||
MaxFilesCurFiles int
|
|
||||||
|
|
||||||
// Rotate at size
|
|
||||||
MaxSize int `json:"maxsize"`
|
|
||||||
maxSizeCurSize int
|
|
||||||
|
|
||||||
// Rotate daily
|
|
||||||
MaxDays int64 `json:"maxdays"`
|
|
||||||
dailyOpenDate int
|
|
||||||
dailyOpenTime time.Time
|
|
||||||
|
|
||||||
// Rotate hourly
|
|
||||||
MaxHours int64 `json:"maxhours"`
|
|
||||||
hourlyOpenDate int
|
|
||||||
hourlyOpenTime time.Time
|
|
||||||
|
|
||||||
Level int `json:"level"`
|
|
||||||
// Permissions for log file
|
|
||||||
Perm string `json:"perm"`
|
|
||||||
// Permissions for directory if it is specified in FileName
|
|
||||||
DirPerm string `json:"dirperm"`
|
|
||||||
|
|
||||||
RotatePerm string `json:"rotateperm"`
|
|
||||||
|
|
||||||
fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix
|
|
||||||
|
|
||||||
logFormatter LogFormatter
|
|
||||||
Formatter string `json:"formatter"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// newFileWriter creates a FileLogWriter returning as LoggerInterface.
|
|
||||||
func newFileWriter() Logger {
|
|
||||||
w := &fileLogWriter{
|
|
||||||
Daily: true,
|
|
||||||
MaxDays: 7,
|
|
||||||
Hourly: false,
|
|
||||||
MaxHours: 168,
|
|
||||||
Rotate: true,
|
|
||||||
RotatePerm: "0440",
|
|
||||||
Level: LevelTrace,
|
|
||||||
Perm: "0660",
|
|
||||||
DirPerm: "0770",
|
|
||||||
MaxLines: 10000000,
|
|
||||||
MaxFiles: 999,
|
|
||||||
MaxSize: 1 << 28,
|
|
||||||
}
|
|
||||||
w.logFormatter = w
|
|
||||||
return w
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*fileLogWriter) Format(lm *LogMsg) string {
|
|
||||||
msg := lm.OldStyleFormat()
|
|
||||||
hd, _, _ := formatTimeHeader(lm.When)
|
|
||||||
msg = fmt.Sprintf("%s %s\n", string(hd), msg)
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) SetFormatter(f LogFormatter) {
|
|
||||||
w.logFormatter = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init file logger with json config.
|
|
||||||
// jsonConfig like:
|
|
||||||
// {
|
|
||||||
// "filename":"logs/beego.log",
|
|
||||||
// "maxLines":10000,
|
|
||||||
// "maxsize":1024,
|
|
||||||
// "daily":true,
|
|
||||||
// "maxDays":15,
|
|
||||||
// "rotate":true,
|
|
||||||
// "perm":"0600"
|
|
||||||
// }
|
|
||||||
func (w *fileLogWriter) Init(config string) error {
|
|
||||||
err := json.Unmarshal([]byte(config), w)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if w.Filename == "" {
|
|
||||||
return errors.New("jsonconfig must have filename")
|
|
||||||
}
|
|
||||||
w.suffix = filepath.Ext(w.Filename)
|
|
||||||
w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix)
|
|
||||||
if w.suffix == "" {
|
|
||||||
w.suffix = ".log"
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(w.Formatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(w.Formatter)
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("the formatter with name: %s not found", w.Formatter)
|
|
||||||
}
|
|
||||||
w.logFormatter = fmtr
|
|
||||||
}
|
|
||||||
err = w.startLogger()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// start file logger. create log file and set to locker-inside file writer.
|
|
||||||
func (w *fileLogWriter) startLogger() error {
|
|
||||||
file, err := w.createLogFile()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if w.fileWriter != nil {
|
|
||||||
w.fileWriter.Close()
|
|
||||||
}
|
|
||||||
w.fileWriter = file
|
|
||||||
return w.initFd()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) needRotateDaily(day int) bool {
|
|
||||||
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
|
|
||||||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
|
|
||||||
(w.Daily && day != w.dailyOpenDate)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) needRotateHourly(hour int) bool {
|
|
||||||
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
|
|
||||||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
|
|
||||||
(w.Hourly && hour != w.hourlyOpenDate)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes logger message into file.
|
|
||||||
func (w *fileLogWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if lm.Level > w.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, d, h := formatTimeHeader(lm.When)
|
|
||||||
|
|
||||||
msg := w.logFormatter.Format(lm)
|
|
||||||
if w.Rotate {
|
|
||||||
w.RLock()
|
|
||||||
if w.needRotateHourly(h) {
|
|
||||||
w.RUnlock()
|
|
||||||
w.Lock()
|
|
||||||
if w.needRotateHourly(h) {
|
|
||||||
if err := w.doRotate(lm.When); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.Unlock()
|
|
||||||
} else if w.needRotateDaily(d) {
|
|
||||||
w.RUnlock()
|
|
||||||
w.Lock()
|
|
||||||
if w.needRotateDaily(d) {
|
|
||||||
if err := w.doRotate(lm.When); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.Unlock()
|
|
||||||
} else {
|
|
||||||
w.RUnlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Lock()
|
|
||||||
_, err := w.fileWriter.Write([]byte(msg))
|
|
||||||
if err == nil {
|
|
||||||
w.maxLinesCurLines++
|
|
||||||
w.maxSizeCurSize += len(msg)
|
|
||||||
}
|
|
||||||
w.Unlock()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) createLogFile() (*os.File, error) {
|
|
||||||
// Open the log file
|
|
||||||
perm, err := strconv.ParseInt(w.Perm, 8, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dirperm, err := strconv.ParseInt(w.DirPerm, 8, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
filepath := path.Dir(w.Filename)
|
|
||||||
os.MkdirAll(filepath, os.FileMode(dirperm))
|
|
||||||
|
|
||||||
fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm))
|
|
||||||
if err == nil {
|
|
||||||
// Make sure file perm is user set perm cause of `os.OpenFile` will obey umask
|
|
||||||
os.Chmod(w.Filename, os.FileMode(perm))
|
|
||||||
}
|
|
||||||
return fd, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) initFd() error {
|
|
||||||
fd := w.fileWriter
|
|
||||||
fInfo, err := fd.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("get stat err: %s", err)
|
|
||||||
}
|
|
||||||
w.maxSizeCurSize = int(fInfo.Size())
|
|
||||||
w.dailyOpenTime = time.Now()
|
|
||||||
w.dailyOpenDate = w.dailyOpenTime.Day()
|
|
||||||
w.hourlyOpenTime = time.Now()
|
|
||||||
w.hourlyOpenDate = w.hourlyOpenTime.Hour()
|
|
||||||
w.maxLinesCurLines = 0
|
|
||||||
if w.Hourly {
|
|
||||||
go w.hourlyRotate(w.hourlyOpenTime)
|
|
||||||
} else if w.Daily {
|
|
||||||
go w.dailyRotate(w.dailyOpenTime)
|
|
||||||
}
|
|
||||||
if fInfo.Size() > 0 && w.MaxLines > 0 {
|
|
||||||
count, err := w.lines()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.maxLinesCurLines = count
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) dailyRotate(openTime time.Time) {
|
|
||||||
y, m, d := openTime.Add(24 * time.Hour).Date()
|
|
||||||
nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location())
|
|
||||||
tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100))
|
|
||||||
<-tm.C
|
|
||||||
w.Lock()
|
|
||||||
if w.needRotateDaily(time.Now().Day()) {
|
|
||||||
if err := w.doRotate(time.Now()); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) hourlyRotate(openTime time.Time) {
|
|
||||||
y, m, d := openTime.Add(1 * time.Hour).Date()
|
|
||||||
h, _, _ := openTime.Add(1 * time.Hour).Clock()
|
|
||||||
nextHour := time.Date(y, m, d, h, 0, 0, 0, openTime.Location())
|
|
||||||
tm := time.NewTimer(time.Duration(nextHour.UnixNano() - openTime.UnixNano() + 100))
|
|
||||||
<-tm.C
|
|
||||||
w.Lock()
|
|
||||||
if w.needRotateHourly(time.Now().Hour()) {
|
|
||||||
if err := w.doRotate(time.Now()); err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) lines() (int, error) {
|
|
||||||
fd, err := os.Open(w.Filename)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
buf := make([]byte, 32768) // 32k
|
|
||||||
count := 0
|
|
||||||
lineSep := []byte{'\n'}
|
|
||||||
|
|
||||||
for {
|
|
||||||
c, err := fd.Read(buf)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return count, err
|
|
||||||
}
|
|
||||||
|
|
||||||
count += bytes.Count(buf[:c], lineSep)
|
|
||||||
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return count, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoRotate means it needs to write logs into a new file.
|
|
||||||
// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size)
|
|
||||||
func (w *fileLogWriter) doRotate(logTime time.Time) error {
|
|
||||||
// file exists
|
|
||||||
// Find the next available number
|
|
||||||
num := w.MaxFilesCurFiles + 1
|
|
||||||
fName := ""
|
|
||||||
format := ""
|
|
||||||
var openTime time.Time
|
|
||||||
rotatePerm, err := strconv.ParseInt(w.RotatePerm, 8, 64)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = os.Lstat(w.Filename)
|
|
||||||
if err != nil {
|
|
||||||
// even if the file is not exist or other ,we should RESTART the logger
|
|
||||||
goto RESTART_LOGGER
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.Hourly {
|
|
||||||
format = "2006010215"
|
|
||||||
openTime = w.hourlyOpenTime
|
|
||||||
} else if w.Daily {
|
|
||||||
format = "2006-01-02"
|
|
||||||
openTime = w.dailyOpenTime
|
|
||||||
}
|
|
||||||
|
|
||||||
// only when one of them be setted, then the file would be splited
|
|
||||||
if w.MaxLines > 0 || w.MaxSize > 0 {
|
|
||||||
for ; err == nil && num <= w.MaxFiles; num++ {
|
|
||||||
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format(format), num, w.suffix)
|
|
||||||
_, err = os.Lstat(fName)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", openTime.Format(format), num, w.suffix)
|
|
||||||
_, err = os.Lstat(fName)
|
|
||||||
w.MaxFilesCurFiles = num
|
|
||||||
}
|
|
||||||
|
|
||||||
// return error if the last file checked still existed
|
|
||||||
if err == nil {
|
|
||||||
return fmt.Errorf("Rotate: Cannot find free log number to rename %s", w.Filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// close fileWriter before rename
|
|
||||||
w.fileWriter.Close()
|
|
||||||
|
|
||||||
// Rename the file to its new found name
|
|
||||||
// even if occurs error,we MUST guarantee to restart new logger
|
|
||||||
err = os.Rename(w.Filename, fName)
|
|
||||||
if err != nil {
|
|
||||||
goto RESTART_LOGGER
|
|
||||||
}
|
|
||||||
|
|
||||||
err = os.Chmod(fName, os.FileMode(rotatePerm))
|
|
||||||
|
|
||||||
RESTART_LOGGER:
|
|
||||||
|
|
||||||
startLoggerErr := w.startLogger()
|
|
||||||
go w.deleteOldLog()
|
|
||||||
|
|
||||||
if startLoggerErr != nil {
|
|
||||||
return fmt.Errorf("Rotate StartLogger: %s", startLoggerErr)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Rotate: %s", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *fileLogWriter) deleteOldLog() {
|
|
||||||
dir := filepath.Dir(w.Filename)
|
|
||||||
absolutePath, err := filepath.EvalSymlinks(w.Filename)
|
|
||||||
if err == nil {
|
|
||||||
dir = filepath.Dir(absolutePath)
|
|
||||||
}
|
|
||||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if info == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if w.Hourly {
|
|
||||||
if !info.IsDir() && info.ModTime().Add(1*time.Hour*time.Duration(w.MaxHours)).Before(time.Now()) {
|
|
||||||
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
|
|
||||||
strings.HasSuffix(filepath.Base(path), w.suffix) {
|
|
||||||
os.Remove(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if w.Daily {
|
|
||||||
if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) {
|
|
||||||
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
|
|
||||||
strings.HasSuffix(filepath.Base(path), w.suffix) {
|
|
||||||
os.Remove(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy close the file description, close file writer.
|
|
||||||
func (w *fileLogWriter) Destroy() {
|
|
||||||
w.fileWriter.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flushes file logger.
|
|
||||||
// there are no buffering messages in file logger in memory.
|
|
||||||
// flush file means sync file from disk.
|
|
||||||
func (w *fileLogWriter) Flush() {
|
|
||||||
w.fileWriter.Sync()
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterFile, newFileWriter)
|
|
||||||
}
|
|
@ -1,96 +0,0 @@
|
|||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook
|
|
||||||
type JLWriter struct {
|
|
||||||
AuthorName string `json:"authorname"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
WebhookURL string `json:"webhookurl"`
|
|
||||||
RedirectURL string `json:"redirecturl,omitempty"`
|
|
||||||
ImageURL string `json:"imageurl,omitempty"`
|
|
||||||
Level int `json:"level"`
|
|
||||||
|
|
||||||
formatter LogFormatter
|
|
||||||
Formatter string `json:"formatter"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// newJLWriter creates jiaoliao writer.
|
|
||||||
func newJLWriter() Logger {
|
|
||||||
res := &JLWriter{Level: LevelTrace}
|
|
||||||
res.formatter = res
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init JLWriter with json config string
|
|
||||||
func (s *JLWriter) Init(config string) error {
|
|
||||||
res := json.Unmarshal([]byte(config), s)
|
|
||||||
if res == nil && len(s.Formatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(s.Formatter)
|
|
||||||
if !ok {
|
|
||||||
return errors.New(fmt.Sprintf("the formatter with name: %s not found", s.Formatter))
|
|
||||||
}
|
|
||||||
s.formatter = fmtr
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *JLWriter) Format(lm *LogMsg) string {
|
|
||||||
msg := lm.OldStyleFormat()
|
|
||||||
msg = fmt.Sprintf("%s %s", lm.When.Format("2006-01-02 15:04:05"), msg)
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *JLWriter) SetFormatter(f LogFormatter) {
|
|
||||||
s.formatter = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes message in smtp writer.
|
|
||||||
// Sends an email with subject and only this message.
|
|
||||||
func (s *JLWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if lm.Level > s.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
text := s.formatter.Format(lm)
|
|
||||||
|
|
||||||
form := url.Values{}
|
|
||||||
form.Add("authorName", s.AuthorName)
|
|
||||||
form.Add("title", s.Title)
|
|
||||||
form.Add("text", text)
|
|
||||||
if s.RedirectURL != "" {
|
|
||||||
form.Add("redirectUrl", s.RedirectURL)
|
|
||||||
}
|
|
||||||
if s.ImageURL != "" {
|
|
||||||
form.Add("imageUrl", s.ImageURL)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := http.PostForm(s.WebhookURL, form)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush implementing method. empty.
|
|
||||||
func (s *JLWriter) Flush() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy implementing method. empty.
|
|
||||||
func (s *JLWriter) Destroy() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterJianLiao, newJLWriter)
|
|
||||||
}
|
|
@ -1,782 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Package logs provide a general log interface
|
|
||||||
// Usage:
|
|
||||||
//
|
|
||||||
// import "github.com/beego/beego/v2/core/logs"
|
|
||||||
//
|
|
||||||
// log := NewLogger(10000)
|
|
||||||
// log.SetLogger("console", "")
|
|
||||||
//
|
|
||||||
// > the first params stand for how many channel
|
|
||||||
//
|
|
||||||
// Use it like this:
|
|
||||||
//
|
|
||||||
// log.Trace("trace")
|
|
||||||
// log.Info("info")
|
|
||||||
// log.Warn("warning")
|
|
||||||
// log.Debug("debug")
|
|
||||||
// log.Critical("critical")
|
|
||||||
//
|
|
||||||
// more docs http://beego.vip/docs/module/logs.md
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RFC5424 log message levels.
|
|
||||||
const (
|
|
||||||
LevelEmergency = iota
|
|
||||||
LevelAlert
|
|
||||||
LevelCritical
|
|
||||||
LevelError
|
|
||||||
LevelWarning
|
|
||||||
LevelNotice
|
|
||||||
LevelInformational
|
|
||||||
LevelDebug
|
|
||||||
)
|
|
||||||
|
|
||||||
// levelLogLogger is defined to implement log.Logger
|
|
||||||
// the real log level will be LevelEmergency
|
|
||||||
const levelLoggerImpl = -1
|
|
||||||
|
|
||||||
// Name for adapter with beego official support
|
|
||||||
const (
|
|
||||||
AdapterConsole = "console"
|
|
||||||
AdapterFile = "file"
|
|
||||||
AdapterMultiFile = "multifile"
|
|
||||||
AdapterMail = "smtp"
|
|
||||||
AdapterConn = "conn"
|
|
||||||
AdapterEs = "es"
|
|
||||||
AdapterJianLiao = "jianliao"
|
|
||||||
AdapterSlack = "slack"
|
|
||||||
AdapterAliLS = "alils"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Legacy log level constants to ensure backwards compatibility.
|
|
||||||
const (
|
|
||||||
LevelInfo = LevelInformational
|
|
||||||
LevelTrace = LevelDebug
|
|
||||||
LevelWarn = LevelWarning
|
|
||||||
)
|
|
||||||
|
|
||||||
type newLoggerFunc func() Logger
|
|
||||||
|
|
||||||
// Logger defines the behavior of a log provider.
|
|
||||||
type Logger interface {
|
|
||||||
Init(config string) error
|
|
||||||
WriteMsg(lm *LogMsg) error
|
|
||||||
Destroy()
|
|
||||||
Flush()
|
|
||||||
SetFormatter(f LogFormatter)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
adapters = make(map[string]newLoggerFunc)
|
|
||||||
levelPrefix = [LevelDebug + 1]string{"[M]", "[A]", "[C]", "[E]", "[W]", "[N]", "[I]", "[D]"}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register makes a log provide available by the provided name.
|
|
||||||
// If Register is called twice with the same name or if driver is nil,
|
|
||||||
// it panics.
|
|
||||||
func Register(name string, log newLoggerFunc) {
|
|
||||||
if log == nil {
|
|
||||||
panic("logs: Register provide is nil")
|
|
||||||
}
|
|
||||||
if _, dup := adapters[name]; dup {
|
|
||||||
panic("logs: Register called twice for provider " + name)
|
|
||||||
}
|
|
||||||
adapters[name] = log
|
|
||||||
}
|
|
||||||
|
|
||||||
// BeeLogger is default logger in beego application.
|
|
||||||
// Can contain several providers and log message into all providers.
|
|
||||||
type BeeLogger struct {
|
|
||||||
lock sync.Mutex
|
|
||||||
init bool
|
|
||||||
enableFuncCallDepth bool
|
|
||||||
enableFullFilePath bool
|
|
||||||
asynchronous bool
|
|
||||||
wg sync.WaitGroup
|
|
||||||
level int
|
|
||||||
loggerFuncCallDepth int
|
|
||||||
prefix string
|
|
||||||
msgChanLen int64
|
|
||||||
msgChan chan *LogMsg
|
|
||||||
signalChan chan string
|
|
||||||
outputs []*nameLogger
|
|
||||||
globalFormatter string
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultAsyncMsgLen = 1e3
|
|
||||||
|
|
||||||
type nameLogger struct {
|
|
||||||
Logger
|
|
||||||
name string
|
|
||||||
}
|
|
||||||
|
|
||||||
var logMsgPool *sync.Pool
|
|
||||||
|
|
||||||
// NewLogger returns a new BeeLogger.
|
|
||||||
// channelLen: the number of messages in chan(used where asynchronous is true).
|
|
||||||
// if the buffering chan is full, logger adapters write to file or other way.
|
|
||||||
func NewLogger(channelLens ...int64) *BeeLogger {
|
|
||||||
bl := new(BeeLogger)
|
|
||||||
bl.level = LevelDebug
|
|
||||||
bl.loggerFuncCallDepth = 3
|
|
||||||
bl.msgChanLen = append(channelLens, 0)[0]
|
|
||||||
if bl.msgChanLen <= 0 {
|
|
||||||
bl.msgChanLen = defaultAsyncMsgLen
|
|
||||||
}
|
|
||||||
bl.signalChan = make(chan string, 1)
|
|
||||||
bl.setLogger(AdapterConsole)
|
|
||||||
return bl
|
|
||||||
}
|
|
||||||
|
|
||||||
// Async sets the log to asynchronous and start the goroutine
|
|
||||||
func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger {
|
|
||||||
bl.lock.Lock()
|
|
||||||
defer bl.lock.Unlock()
|
|
||||||
if bl.asynchronous {
|
|
||||||
return bl
|
|
||||||
}
|
|
||||||
bl.asynchronous = true
|
|
||||||
if len(msgLen) > 0 && msgLen[0] > 0 {
|
|
||||||
bl.msgChanLen = msgLen[0]
|
|
||||||
}
|
|
||||||
bl.msgChan = make(chan *LogMsg, bl.msgChanLen)
|
|
||||||
logMsgPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return &LogMsg{}
|
|
||||||
},
|
|
||||||
}
|
|
||||||
bl.wg.Add(1)
|
|
||||||
go bl.startLogger()
|
|
||||||
return bl
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger provides a given logger adapter into BeeLogger with config string.
|
|
||||||
// config must in in JSON format like {"interval":360}}
|
|
||||||
func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error {
|
|
||||||
config := append(configs, "{}")[0]
|
|
||||||
for _, l := range bl.outputs {
|
|
||||||
if l.name == adapterName {
|
|
||||||
return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logAdapter, ok := adapters[adapterName]
|
|
||||||
if !ok {
|
|
||||||
return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
|
|
||||||
}
|
|
||||||
|
|
||||||
lg := logAdapter()
|
|
||||||
|
|
||||||
// Global formatter overrides the default set formatter
|
|
||||||
if len(bl.globalFormatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(bl.globalFormatter)
|
|
||||||
if !ok {
|
|
||||||
return errors.New(fmt.Sprintf("the formatter with name: %s not found", bl.globalFormatter))
|
|
||||||
}
|
|
||||||
lg.SetFormatter(fmtr)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := lg.Init(config)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger provides a given logger adapter into BeeLogger with config string.
|
|
||||||
// config must in in JSON format like {"interval":360}}
|
|
||||||
func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error {
|
|
||||||
bl.lock.Lock()
|
|
||||||
defer bl.lock.Unlock()
|
|
||||||
if !bl.init {
|
|
||||||
bl.outputs = []*nameLogger{}
|
|
||||||
bl.init = true
|
|
||||||
}
|
|
||||||
return bl.setLogger(adapterName, configs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DelLogger removes a logger adapter in BeeLogger.
|
|
||||||
func (bl *BeeLogger) DelLogger(adapterName string) error {
|
|
||||||
bl.lock.Lock()
|
|
||||||
defer bl.lock.Unlock()
|
|
||||||
outputs := []*nameLogger{}
|
|
||||||
for _, lg := range bl.outputs {
|
|
||||||
if lg.name == adapterName {
|
|
||||||
lg.Destroy()
|
|
||||||
} else {
|
|
||||||
outputs = append(outputs, lg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(outputs) == len(bl.outputs) {
|
|
||||||
return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
|
|
||||||
}
|
|
||||||
bl.outputs = outputs
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bl *BeeLogger) writeToLoggers(lm *LogMsg) {
|
|
||||||
for _, l := range bl.outputs {
|
|
||||||
err := l.WriteMsg(lm)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bl *BeeLogger) Write(p []byte) (n int, err error) {
|
|
||||||
if len(p) == 0 {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
// writeMsg will always add a '\n' character
|
|
||||||
if p[len(p)-1] == '\n' {
|
|
||||||
p = p[0 : len(p)-1]
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Msg: string(p),
|
|
||||||
Level: levelLoggerImpl,
|
|
||||||
When: time.Now(),
|
|
||||||
}
|
|
||||||
|
|
||||||
// set levelLoggerImpl to ensure all log message will be write out
|
|
||||||
err = bl.writeMsg(lm)
|
|
||||||
if err == nil {
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bl *BeeLogger) writeMsg(lm *LogMsg) error {
|
|
||||||
if !bl.init {
|
|
||||||
bl.lock.Lock()
|
|
||||||
bl.setLogger(AdapterConsole)
|
|
||||||
bl.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
file string
|
|
||||||
line int
|
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
|
|
||||||
_, file, line, ok = runtime.Caller(bl.loggerFuncCallDepth)
|
|
||||||
if !ok {
|
|
||||||
file = "???"
|
|
||||||
line = 0
|
|
||||||
}
|
|
||||||
lm.FilePath = file
|
|
||||||
lm.LineNumber = line
|
|
||||||
lm.Prefix = bl.prefix
|
|
||||||
|
|
||||||
lm.enableFullFilePath = bl.enableFullFilePath
|
|
||||||
lm.enableFuncCallDepth = bl.enableFuncCallDepth
|
|
||||||
|
|
||||||
// set level info in front of filename info
|
|
||||||
if lm.Level == levelLoggerImpl {
|
|
||||||
// set to emergency to ensure all log will be print out correctly
|
|
||||||
lm.Level = LevelEmergency
|
|
||||||
}
|
|
||||||
|
|
||||||
if bl.asynchronous {
|
|
||||||
logM := logMsgPool.Get().(*LogMsg)
|
|
||||||
logM.Level = lm.Level
|
|
||||||
logM.Msg = lm.Msg
|
|
||||||
logM.When = lm.When
|
|
||||||
logM.Args = lm.Args
|
|
||||||
logM.FilePath = lm.FilePath
|
|
||||||
logM.LineNumber = lm.LineNumber
|
|
||||||
logM.Prefix = lm.Prefix
|
|
||||||
if bl.outputs != nil {
|
|
||||||
bl.msgChan <- lm
|
|
||||||
} else {
|
|
||||||
logMsgPool.Put(lm)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bl.writeToLoggers(lm)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLevel sets log message level.
|
|
||||||
// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning),
|
|
||||||
// log providers will not be sent the message.
|
|
||||||
func (bl *BeeLogger) SetLevel(l int) {
|
|
||||||
bl.level = l
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLevel Get Current log message level.
|
|
||||||
func (bl *BeeLogger) GetLevel() int {
|
|
||||||
return bl.level
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogFuncCallDepth set log funcCallDepth
|
|
||||||
func (bl *BeeLogger) SetLogFuncCallDepth(d int) {
|
|
||||||
bl.loggerFuncCallDepth = d
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogFuncCallDepth return log funcCallDepth for wrapper
|
|
||||||
func (bl *BeeLogger) GetLogFuncCallDepth() int {
|
|
||||||
return bl.loggerFuncCallDepth
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableFuncCallDepth enable log funcCallDepth
|
|
||||||
func (bl *BeeLogger) EnableFuncCallDepth(b bool) {
|
|
||||||
bl.enableFuncCallDepth = b
|
|
||||||
}
|
|
||||||
|
|
||||||
// set prefix
|
|
||||||
func (bl *BeeLogger) SetPrefix(s string) {
|
|
||||||
bl.prefix = s
|
|
||||||
}
|
|
||||||
|
|
||||||
// start logger chan reading.
|
|
||||||
// when chan is not empty, write logs.
|
|
||||||
func (bl *BeeLogger) startLogger() {
|
|
||||||
gameOver := false
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case bm := <-bl.msgChan:
|
|
||||||
bl.writeToLoggers(bm)
|
|
||||||
logMsgPool.Put(bm)
|
|
||||||
case sg := <-bl.signalChan:
|
|
||||||
// Now should only send "flush" or "close" to bl.signalChan
|
|
||||||
bl.flush()
|
|
||||||
if sg == "close" {
|
|
||||||
for _, l := range bl.outputs {
|
|
||||||
l.Destroy()
|
|
||||||
}
|
|
||||||
bl.outputs = nil
|
|
||||||
gameOver = true
|
|
||||||
}
|
|
||||||
bl.wg.Done()
|
|
||||||
}
|
|
||||||
if gameOver {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bl *BeeLogger) setGlobalFormatter(fmtter string) error {
|
|
||||||
bl.globalFormatter = fmtter
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetGlobalFormatter sets the global formatter for all log adapters
|
|
||||||
// don't forget to register the formatter by invoking RegisterFormatter
|
|
||||||
func SetGlobalFormatter(fmtter string) error {
|
|
||||||
return beeLogger.setGlobalFormatter(fmtter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emergency Log EMERGENCY level message.
|
|
||||||
func (bl *BeeLogger) Emergency(format string, v ...interface{}) {
|
|
||||||
if LevelEmergency > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelEmergency,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
}
|
|
||||||
if len(v) > 0 {
|
|
||||||
lm.Msg = fmt.Sprintf(lm.Msg, v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alert Log ALERT level message.
|
|
||||||
func (bl *BeeLogger) Alert(format string, v ...interface{}) {
|
|
||||||
if LevelAlert > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelAlert,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Critical Log CRITICAL level message.
|
|
||||||
func (bl *BeeLogger) Critical(format string, v ...interface{}) {
|
|
||||||
if LevelCritical > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelCritical,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error Log ERROR level message.
|
|
||||||
func (bl *BeeLogger) Error(format string, v ...interface{}) {
|
|
||||||
if LevelError > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelError,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warning Log WARNING level message.
|
|
||||||
func (bl *BeeLogger) Warning(format string, v ...interface{}) {
|
|
||||||
if LevelWarn > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelWarn,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notice Log NOTICE level message.
|
|
||||||
func (bl *BeeLogger) Notice(format string, v ...interface{}) {
|
|
||||||
if LevelNotice > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelNotice,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Informational Log INFORMATIONAL level message.
|
|
||||||
func (bl *BeeLogger) Informational(format string, v ...interface{}) {
|
|
||||||
if LevelInfo > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelInfo,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug Log DEBUG level message.
|
|
||||||
func (bl *BeeLogger) Debug(format string, v ...interface{}) {
|
|
||||||
if LevelDebug > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelDebug,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn Log WARN level message.
|
|
||||||
// compatibility alias for Warning()
|
|
||||||
func (bl *BeeLogger) Warn(format string, v ...interface{}) {
|
|
||||||
if LevelWarn > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelWarn,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info Log INFO level message.
|
|
||||||
// compatibility alias for Informational()
|
|
||||||
func (bl *BeeLogger) Info(format string, v ...interface{}) {
|
|
||||||
if LevelInfo > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelInfo,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trace Log TRACE level message.
|
|
||||||
// compatibility alias for Debug()
|
|
||||||
func (bl *BeeLogger) Trace(format string, v ...interface{}) {
|
|
||||||
if LevelDebug > bl.level {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lm := &LogMsg{
|
|
||||||
Level: LevelDebug,
|
|
||||||
Msg: format,
|
|
||||||
When: time.Now(),
|
|
||||||
Args: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
bl.writeMsg(lm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flush all chan data.
|
|
||||||
func (bl *BeeLogger) Flush() {
|
|
||||||
if bl.asynchronous {
|
|
||||||
bl.signalChan <- "flush"
|
|
||||||
bl.wg.Wait()
|
|
||||||
bl.wg.Add(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
bl.flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close close logger, flush all chan data and destroy all adapters in BeeLogger.
|
|
||||||
func (bl *BeeLogger) Close() {
|
|
||||||
if bl.asynchronous {
|
|
||||||
bl.signalChan <- "close"
|
|
||||||
bl.wg.Wait()
|
|
||||||
close(bl.msgChan)
|
|
||||||
} else {
|
|
||||||
bl.flush()
|
|
||||||
for _, l := range bl.outputs {
|
|
||||||
l.Destroy()
|
|
||||||
}
|
|
||||||
bl.outputs = nil
|
|
||||||
}
|
|
||||||
close(bl.signalChan)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset close all outputs, and set bl.outputs to nil
|
|
||||||
func (bl *BeeLogger) Reset() {
|
|
||||||
bl.Flush()
|
|
||||||
for _, l := range bl.outputs {
|
|
||||||
l.Destroy()
|
|
||||||
}
|
|
||||||
bl.outputs = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bl *BeeLogger) flush() {
|
|
||||||
if bl.asynchronous {
|
|
||||||
for {
|
|
||||||
if len(bl.msgChan) > 0 {
|
|
||||||
bm := <-bl.msgChan
|
|
||||||
bl.writeToLoggers(bm)
|
|
||||||
logMsgPool.Put(bm)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, l := range bl.outputs {
|
|
||||||
l.Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// beeLogger references the used application logger.
|
|
||||||
var beeLogger = NewLogger()
|
|
||||||
|
|
||||||
// GetBeeLogger returns the default BeeLogger
|
|
||||||
func GetBeeLogger() *BeeLogger {
|
|
||||||
return beeLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
var beeLoggerMap = struct {
|
|
||||||
sync.RWMutex
|
|
||||||
logs map[string]*log.Logger
|
|
||||||
}{
|
|
||||||
logs: map[string]*log.Logger{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLogger returns the default BeeLogger
|
|
||||||
func GetLogger(prefixes ...string) *log.Logger {
|
|
||||||
prefix := append(prefixes, "")[0]
|
|
||||||
if prefix != "" {
|
|
||||||
prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix))
|
|
||||||
}
|
|
||||||
beeLoggerMap.RLock()
|
|
||||||
l, ok := beeLoggerMap.logs[prefix]
|
|
||||||
if ok {
|
|
||||||
beeLoggerMap.RUnlock()
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
beeLoggerMap.RUnlock()
|
|
||||||
beeLoggerMap.Lock()
|
|
||||||
defer beeLoggerMap.Unlock()
|
|
||||||
l, ok = beeLoggerMap.logs[prefix]
|
|
||||||
if !ok {
|
|
||||||
l = log.New(beeLogger, prefix, 0)
|
|
||||||
beeLoggerMap.logs[prefix] = l
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableFullFilePath enables full file path logging. Disabled by default
|
|
||||||
// e.g "/home/Documents/GitHub/beego/mainapp/" instead of "mainapp"
|
|
||||||
func EnableFullFilePath(b bool) {
|
|
||||||
beeLogger.enableFullFilePath = b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset will remove all the adapter
|
|
||||||
func Reset() {
|
|
||||||
beeLogger.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Async set the beelogger with Async mode and hold msglen messages
|
|
||||||
func Async(msgLen ...int64) *BeeLogger {
|
|
||||||
return beeLogger.Async(msgLen...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLevel sets the global log level used by the simple logger.
|
|
||||||
func SetLevel(l int) {
|
|
||||||
beeLogger.SetLevel(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPrefix sets the prefix
|
|
||||||
func SetPrefix(s string) {
|
|
||||||
beeLogger.SetPrefix(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// EnableFuncCallDepth enable log funcCallDepth
|
|
||||||
func EnableFuncCallDepth(b bool) {
|
|
||||||
beeLogger.enableFuncCallDepth = b
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogFuncCall set the CallDepth, default is 4
|
|
||||||
func SetLogFuncCall(b bool) {
|
|
||||||
beeLogger.EnableFuncCallDepth(b)
|
|
||||||
beeLogger.SetLogFuncCallDepth(3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogFuncCallDepth set log funcCallDepth
|
|
||||||
func SetLogFuncCallDepth(d int) {
|
|
||||||
beeLogger.loggerFuncCallDepth = d
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets a new logger.
|
|
||||||
func SetLogger(adapter string, config ...string) error {
|
|
||||||
return beeLogger.SetLogger(adapter, config...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Emergency logs a message at emergency level.
|
|
||||||
func Emergency(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Emergency(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alert logs a message at alert level.
|
|
||||||
func Alert(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Alert(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Critical logs a message at critical level.
|
|
||||||
func Critical(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Critical(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error logs a message at error level.
|
|
||||||
func Error(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Error(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warning logs a message at warning level.
|
|
||||||
func Warning(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Warn(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn compatibility alias for Warning()
|
|
||||||
func Warn(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Warn(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notice logs a message at notice level.
|
|
||||||
func Notice(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Notice(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Informational logs a message at info level.
|
|
||||||
func Informational(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Info(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info compatibility alias for Warning()
|
|
||||||
func Info(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Info(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug logs a message at debug level.
|
|
||||||
func Debug(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Debug(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trace logs a message at trace level.
|
|
||||||
// compatibility alias for Warning()
|
|
||||||
func Trace(f interface{}, v ...interface{}) {
|
|
||||||
beeLogger.Trace(formatPattern(f, v...), v...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func formatPattern(f interface{}, v ...interface{}) string {
|
|
||||||
var msg string
|
|
||||||
switch f.(type) {
|
|
||||||
case string:
|
|
||||||
msg = f.(string)
|
|
||||||
if len(v) == 0 {
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
if !strings.Contains(msg, "%") {
|
|
||||||
// do not contain format char
|
|
||||||
msg += strings.Repeat(" %v", len(v))
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
msg = fmt.Sprint(f)
|
|
||||||
if len(v) == 0 {
|
|
||||||
return msg
|
|
||||||
}
|
|
||||||
msg += strings.Repeat(" %v", len(v))
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
// Copyright 2020
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type LogMsg struct {
|
|
||||||
Level int
|
|
||||||
Msg string
|
|
||||||
When time.Time
|
|
||||||
FilePath string
|
|
||||||
LineNumber int
|
|
||||||
Args []interface{}
|
|
||||||
Prefix string
|
|
||||||
enableFullFilePath bool
|
|
||||||
enableFuncCallDepth bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// OldStyleFormat you should never invoke this
|
|
||||||
func (lm *LogMsg) OldStyleFormat() string {
|
|
||||||
msg := lm.Msg
|
|
||||||
|
|
||||||
if len(lm.Args) > 0 {
|
|
||||||
msg = fmt.Sprintf(lm.Msg, lm.Args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg = lm.Prefix + " " + msg
|
|
||||||
|
|
||||||
if lm.enableFuncCallDepth {
|
|
||||||
filePath := lm.FilePath
|
|
||||||
if !lm.enableFullFilePath {
|
|
||||||
_, filePath = path.Split(filePath)
|
|
||||||
}
|
|
||||||
msg = fmt.Sprintf("[%s:%d] %s", filePath, lm.LineNumber, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg = levelPrefix[lm.Level] + " " + msg
|
|
||||||
return msg
|
|
||||||
}
|
|
@ -1,178 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type logWriter struct {
|
|
||||||
sync.Mutex
|
|
||||||
writer io.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLogWriter(wr io.Writer) *logWriter {
|
|
||||||
return &logWriter{writer: wr}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lg *logWriter) writeln(msg string) (int, error) {
|
|
||||||
lg.Lock()
|
|
||||||
msg += "\n"
|
|
||||||
n, err := lg.writer.Write([]byte(msg))
|
|
||||||
lg.Unlock()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
y1 = `0123456789`
|
|
||||||
y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
|
|
||||||
y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999`
|
|
||||||
y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
|
|
||||||
mo1 = `000000000111`
|
|
||||||
mo2 = `123456789012`
|
|
||||||
d1 = `0000000001111111111222222222233`
|
|
||||||
d2 = `1234567890123456789012345678901`
|
|
||||||
h1 = `000000000011111111112222`
|
|
||||||
h2 = `012345678901234567890123`
|
|
||||||
mi1 = `000000000011111111112222222222333333333344444444445555555555`
|
|
||||||
mi2 = `012345678901234567890123456789012345678901234567890123456789`
|
|
||||||
s1 = `000000000011111111112222222222333333333344444444445555555555`
|
|
||||||
s2 = `012345678901234567890123456789012345678901234567890123456789`
|
|
||||||
ns1 = `0123456789`
|
|
||||||
)
|
|
||||||
|
|
||||||
func formatTimeHeader(when time.Time) ([]byte, int, int) {
|
|
||||||
y, mo, d := when.Date()
|
|
||||||
h, mi, s := when.Clock()
|
|
||||||
ns := when.Nanosecond() / 1000000
|
|
||||||
// len("2006/01/02 15:04:05.123 ")==24
|
|
||||||
var buf [24]byte
|
|
||||||
|
|
||||||
buf[0] = y1[y/1000%10]
|
|
||||||
buf[1] = y2[y/100]
|
|
||||||
buf[2] = y3[y-y/100*100]
|
|
||||||
buf[3] = y4[y-y/100*100]
|
|
||||||
buf[4] = '/'
|
|
||||||
buf[5] = mo1[mo-1]
|
|
||||||
buf[6] = mo2[mo-1]
|
|
||||||
buf[7] = '/'
|
|
||||||
buf[8] = d1[d-1]
|
|
||||||
buf[9] = d2[d-1]
|
|
||||||
buf[10] = ' '
|
|
||||||
buf[11] = h1[h]
|
|
||||||
buf[12] = h2[h]
|
|
||||||
buf[13] = ':'
|
|
||||||
buf[14] = mi1[mi]
|
|
||||||
buf[15] = mi2[mi]
|
|
||||||
buf[16] = ':'
|
|
||||||
buf[17] = s1[s]
|
|
||||||
buf[18] = s2[s]
|
|
||||||
buf[19] = '.'
|
|
||||||
buf[20] = ns1[ns/100]
|
|
||||||
buf[21] = ns1[ns%100/10]
|
|
||||||
buf[22] = ns1[ns%10]
|
|
||||||
|
|
||||||
buf[23] = ' '
|
|
||||||
|
|
||||||
return buf[0:], d, h
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
|
|
||||||
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
|
|
||||||
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
|
|
||||||
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
|
|
||||||
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
|
|
||||||
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
|
|
||||||
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
|
|
||||||
|
|
||||||
w32Green = string([]byte{27, 91, 52, 50, 109})
|
|
||||||
w32White = string([]byte{27, 91, 52, 55, 109})
|
|
||||||
w32Yellow = string([]byte{27, 91, 52, 51, 109})
|
|
||||||
w32Red = string([]byte{27, 91, 52, 49, 109})
|
|
||||||
w32Blue = string([]byte{27, 91, 52, 52, 109})
|
|
||||||
w32Magenta = string([]byte{27, 91, 52, 53, 109})
|
|
||||||
w32Cyan = string([]byte{27, 91, 52, 54, 109})
|
|
||||||
|
|
||||||
reset = string([]byte{27, 91, 48, 109})
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
once sync.Once
|
|
||||||
colorMap map[string]string
|
|
||||||
)
|
|
||||||
|
|
||||||
func initColor() {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
green = w32Green
|
|
||||||
white = w32White
|
|
||||||
yellow = w32Yellow
|
|
||||||
red = w32Red
|
|
||||||
blue = w32Blue
|
|
||||||
magenta = w32Magenta
|
|
||||||
cyan = w32Cyan
|
|
||||||
}
|
|
||||||
colorMap = map[string]string{
|
|
||||||
// by color
|
|
||||||
"green": green,
|
|
||||||
"white": white,
|
|
||||||
"yellow": yellow,
|
|
||||||
"red": red,
|
|
||||||
// by method
|
|
||||||
"GET": blue,
|
|
||||||
"POST": cyan,
|
|
||||||
"PUT": yellow,
|
|
||||||
"DELETE": red,
|
|
||||||
"PATCH": green,
|
|
||||||
"HEAD": magenta,
|
|
||||||
"OPTIONS": white,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ColorByStatus return color by http code
|
|
||||||
// 2xx return Green
|
|
||||||
// 3xx return White
|
|
||||||
// 4xx return Yellow
|
|
||||||
// 5xx return Red
|
|
||||||
func ColorByStatus(code int) string {
|
|
||||||
once.Do(initColor)
|
|
||||||
switch {
|
|
||||||
case code >= 200 && code < 300:
|
|
||||||
return colorMap["green"]
|
|
||||||
case code >= 300 && code < 400:
|
|
||||||
return colorMap["white"]
|
|
||||||
case code >= 400 && code < 500:
|
|
||||||
return colorMap["yellow"]
|
|
||||||
default:
|
|
||||||
return colorMap["red"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ColorByMethod return color by http code
|
|
||||||
func ColorByMethod(method string) string {
|
|
||||||
once.Do(initColor)
|
|
||||||
if c := colorMap[method]; c != "" {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
return reset
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetColor return reset color
|
|
||||||
func ResetColor() string {
|
|
||||||
return reset
|
|
||||||
}
|
|
@ -1,132 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A filesLogWriter manages several fileLogWriter
|
|
||||||
// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file
|
|
||||||
// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log
|
|
||||||
// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log
|
|
||||||
// the rotate attribute also acts like fileLogWriter
|
|
||||||
type multiFileLogWriter struct {
|
|
||||||
writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter
|
|
||||||
fullLogWriter *fileLogWriter
|
|
||||||
Separate []string `json:"separate"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"}
|
|
||||||
|
|
||||||
// Init file logger with json config.
|
|
||||||
// jsonConfig like:
|
|
||||||
// {
|
|
||||||
// "filename":"logs/beego.log",
|
|
||||||
// "maxLines":0,
|
|
||||||
// "maxsize":0,
|
|
||||||
// "daily":true,
|
|
||||||
// "maxDays":15,
|
|
||||||
// "rotate":true,
|
|
||||||
// "perm":0600,
|
|
||||||
// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
|
|
||||||
// }
|
|
||||||
|
|
||||||
func (f *multiFileLogWriter) Init(config string) error {
|
|
||||||
writer := newFileWriter().(*fileLogWriter)
|
|
||||||
err := writer.Init(config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.fullLogWriter = writer
|
|
||||||
f.writers[LevelDebug+1] = writer
|
|
||||||
|
|
||||||
// unmarshal "separate" field to f.Separate
|
|
||||||
err = json.Unmarshal([]byte(config), f)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
jsonMap := map[string]interface{}{}
|
|
||||||
err = json.Unmarshal([]byte(config), &jsonMap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := LevelEmergency; i < LevelDebug+1; i++ {
|
|
||||||
for _, v := range f.Separate {
|
|
||||||
if v == levelNames[i] {
|
|
||||||
jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix
|
|
||||||
jsonMap["level"] = i
|
|
||||||
bs, _ := json.Marshal(jsonMap)
|
|
||||||
writer = newFileWriter().(*fileLogWriter)
|
|
||||||
err := writer.Init(string(bs))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.writers[i] = writer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*multiFileLogWriter) Format(lm *LogMsg) string {
|
|
||||||
return lm.OldStyleFormat()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *multiFileLogWriter) SetFormatter(fmt LogFormatter) {
|
|
||||||
f.fullLogWriter.SetFormatter(fmt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *multiFileLogWriter) Destroy() {
|
|
||||||
for i := 0; i < len(f.writers); i++ {
|
|
||||||
if f.writers[i] != nil {
|
|
||||||
f.writers[i].Destroy()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *multiFileLogWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if f.fullLogWriter != nil {
|
|
||||||
f.fullLogWriter.WriteMsg(lm)
|
|
||||||
}
|
|
||||||
for i := 0; i < len(f.writers)-1; i++ {
|
|
||||||
if f.writers[i] != nil {
|
|
||||||
if lm.Level == f.writers[i].Level {
|
|
||||||
f.writers[i].WriteMsg(lm)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *multiFileLogWriter) Flush() {
|
|
||||||
for i := 0; i < len(f.writers); i++ {
|
|
||||||
if f.writers[i] != nil {
|
|
||||||
f.writers[i].Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// newFilesWriter create a FileLogWriter returning as LoggerInterface.
|
|
||||||
func newFilesWriter() Logger {
|
|
||||||
res := &multiFileLogWriter{}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterMultiFile, newFilesWriter)
|
|
||||||
}
|
|
@ -1,84 +0,0 @@
|
|||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook
|
|
||||||
type SLACKWriter struct {
|
|
||||||
WebhookURL string `json:"webhookurl"`
|
|
||||||
Level int `json:"level"`
|
|
||||||
formatter LogFormatter
|
|
||||||
Formatter string `json:"formatter"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// newSLACKWriter creates jiaoliao writer.
|
|
||||||
func newSLACKWriter() Logger {
|
|
||||||
res := &SLACKWriter{Level: LevelTrace}
|
|
||||||
res.formatter = res
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SLACKWriter) Format(lm *LogMsg) string {
|
|
||||||
// text := fmt.Sprintf("{\"text\": \"%s\"}", msg)
|
|
||||||
return lm.When.Format("2006-01-02 15:04:05") + " " + lm.OldStyleFormat()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SLACKWriter) SetFormatter(f LogFormatter) {
|
|
||||||
s.formatter = f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init SLACKWriter with json config string
|
|
||||||
func (s *SLACKWriter) Init(config string) error {
|
|
||||||
res := json.Unmarshal([]byte(config), s)
|
|
||||||
|
|
||||||
if res == nil && len(s.Formatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(s.Formatter)
|
|
||||||
if !ok {
|
|
||||||
return errors.New(fmt.Sprintf("the formatter with name: %s not found", s.Formatter))
|
|
||||||
}
|
|
||||||
s.formatter = fmtr
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg write message in smtp writer.
|
|
||||||
// Sends an email with subject and only this message.
|
|
||||||
func (s *SLACKWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if lm.Level > s.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
msg := s.Format(lm)
|
|
||||||
m := make(map[string]string, 1)
|
|
||||||
m["text"] = msg
|
|
||||||
|
|
||||||
body, _ := json.Marshal(m)
|
|
||||||
// resp, err := http.PostForm(s.WebhookURL, form)
|
|
||||||
resp, err := http.Post(s.WebhookURL, "application/json", bytes.NewReader(body))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush implementing method. empty.
|
|
||||||
func (s *SLACKWriter) Flush() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy implementing method. empty.
|
|
||||||
func (s *SLACKWriter) Destroy() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterSlack, newSLACKWriter)
|
|
||||||
}
|
|
@ -1,172 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package logs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/smtp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server.
|
|
||||||
type SMTPWriter struct {
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Host string `json:"host"`
|
|
||||||
Subject string `json:"subject"`
|
|
||||||
FromAddress string `json:"fromAddress"`
|
|
||||||
RecipientAddresses []string `json:"sendTos"`
|
|
||||||
Level int `json:"level"`
|
|
||||||
formatter LogFormatter
|
|
||||||
Formatter string `json:"formatter"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSMTPWriter creates the smtp writer.
|
|
||||||
func newSMTPWriter() Logger {
|
|
||||||
res := &SMTPWriter{Level: LevelTrace}
|
|
||||||
res.formatter = res
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init smtp writer with json config.
|
|
||||||
// config like:
|
|
||||||
// {
|
|
||||||
// "username":"example@gmail.com",
|
|
||||||
// "password:"password",
|
|
||||||
// "host":"smtp.gmail.com:465",
|
|
||||||
// "subject":"email title",
|
|
||||||
// "fromAddress":"from@example.com",
|
|
||||||
// "sendTos":["email1","email2"],
|
|
||||||
// "level":LevelError
|
|
||||||
// }
|
|
||||||
func (s *SMTPWriter) Init(config string) error {
|
|
||||||
res := json.Unmarshal([]byte(config), s)
|
|
||||||
if res == nil && len(s.Formatter) > 0 {
|
|
||||||
fmtr, ok := GetFormatter(s.Formatter)
|
|
||||||
if !ok {
|
|
||||||
return errors.New(fmt.Sprintf("the formatter with name: %s not found", s.Formatter))
|
|
||||||
}
|
|
||||||
s.formatter = fmtr
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth {
|
|
||||||
if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return smtp.PlainAuth(
|
|
||||||
"",
|
|
||||||
s.Username,
|
|
||||||
s.Password,
|
|
||||||
host,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SMTPWriter) SetFormatter(f LogFormatter) {
|
|
||||||
s.formatter = f
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error {
|
|
||||||
client, err := smtp.Dial(hostAddressWithPort)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
host, _, _ := net.SplitHostPort(hostAddressWithPort)
|
|
||||||
tlsConn := &tls.Config{
|
|
||||||
InsecureSkipVerify: true,
|
|
||||||
ServerName: host,
|
|
||||||
}
|
|
||||||
if err = client.StartTLS(tlsConn); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if auth != nil {
|
|
||||||
if err = client.Auth(auth); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = client.Mail(fromAddress); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, rec := range recipients {
|
|
||||||
if err = client.Rcpt(rec); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w, err := client.Data()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, err = w.Write(msgContent)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = w.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.Quit()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SMTPWriter) Format(lm *LogMsg) string {
|
|
||||||
return lm.OldStyleFormat()
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes message in smtp writer.
|
|
||||||
// Sends an email with subject and only this message.
|
|
||||||
func (s *SMTPWriter) WriteMsg(lm *LogMsg) error {
|
|
||||||
if lm.Level > s.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
hp := strings.Split(s.Host, ":")
|
|
||||||
|
|
||||||
// Set up authentication information.
|
|
||||||
auth := s.getSMTPAuth(hp[0])
|
|
||||||
|
|
||||||
msg := s.Format(lm)
|
|
||||||
|
|
||||||
// Connect to the server, authenticate, set the sender and recipient,
|
|
||||||
// and send the email all in one step.
|
|
||||||
contentType := "Content-Type: text/plain" + "; charset=UTF-8"
|
|
||||||
mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress +
|
|
||||||
">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", lm.When.Format("2006-01-02 15:04:05")) + msg)
|
|
||||||
|
|
||||||
return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush implementing method. empty.
|
|
||||||
func (s *SMTPWriter) Flush() {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Destroy implementing method. empty.
|
|
||||||
func (s *SMTPWriter) Destroy() {
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
Register(AdapterMail, newSMTPWriter)
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFuncName get function name
|
|
||||||
func GetFuncName(i interface{}) string {
|
|
||||||
return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
|
|
||||||
}
|
|
@ -1,478 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
dunno = []byte("???")
|
|
||||||
centerDot = []byte("·")
|
|
||||||
dot = []byte(".")
|
|
||||||
)
|
|
||||||
|
|
||||||
type pointerInfo struct {
|
|
||||||
prev *pointerInfo
|
|
||||||
n int
|
|
||||||
addr uintptr
|
|
||||||
pos int
|
|
||||||
used []int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display print the data in console
|
|
||||||
func Display(data ...interface{}) {
|
|
||||||
display(true, data...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDisplayString return data print string
|
|
||||||
func GetDisplayString(data ...interface{}) string {
|
|
||||||
return display(false, data...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func display(displayed bool, data ...interface{}) string {
|
|
||||||
pc, file, line, ok := runtime.Caller(2)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
fmt.Fprintf(buf, "[Debug] at %s() [%s:%d]\n", function(pc), file, line)
|
|
||||||
|
|
||||||
fmt.Fprintf(buf, "\n[Variables]\n")
|
|
||||||
|
|
||||||
for i := 0; i < len(data); i += 2 {
|
|
||||||
output := fomateinfo(len(data[i].(string))+3, data[i+1])
|
|
||||||
fmt.Fprintf(buf, "%s = %s", data[i], output)
|
|
||||||
}
|
|
||||||
|
|
||||||
if displayed {
|
|
||||||
log.Print(buf)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// return data dump and format bytes
|
|
||||||
func fomateinfo(headlen int, data ...interface{}) []byte {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
if len(data) > 1 {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
|
|
||||||
fmt.Fprint(buf, "[")
|
|
||||||
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range data {
|
|
||||||
buf2 := new(bytes.Buffer)
|
|
||||||
var pointers *pointerInfo
|
|
||||||
interfaces := make([]reflect.Value, 0, 10)
|
|
||||||
|
|
||||||
printKeyValue(buf2, reflect.ValueOf(v), &pointers, &interfaces, nil, true, " ", 1)
|
|
||||||
|
|
||||||
if k < len(data)-1 {
|
|
||||||
fmt.Fprint(buf2, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(buf2)
|
|
||||||
|
|
||||||
buf.Write(buf2.Bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(data) > 1 {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
|
|
||||||
fmt.Fprint(buf, "]")
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// check data is golang basic type
|
|
||||||
func isSimpleType(val reflect.Value, kind reflect.Kind, pointers **pointerInfo, interfaces *[]reflect.Value) bool {
|
|
||||||
switch kind {
|
|
||||||
case reflect.Bool:
|
|
||||||
return true
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return true
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64:
|
|
||||||
return true
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return true
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
return true
|
|
||||||
case reflect.String:
|
|
||||||
return true
|
|
||||||
case reflect.Chan:
|
|
||||||
return true
|
|
||||||
case reflect.Invalid:
|
|
||||||
return true
|
|
||||||
case reflect.Interface:
|
|
||||||
for _, in := range *interfaces {
|
|
||||||
if reflect.DeepEqual(in, val) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
case reflect.UnsafePointer:
|
|
||||||
if val.IsNil() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
elem := val.Elem()
|
|
||||||
|
|
||||||
if isSimpleType(elem, elem.Kind(), pointers, interfaces) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := val.Elem().UnsafeAddr()
|
|
||||||
|
|
||||||
for p := *pointers; p != nil; p = p.prev {
|
|
||||||
if addr == p.addr {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// dump value
|
|
||||||
func printKeyValue(buf *bytes.Buffer, val reflect.Value, pointers **pointerInfo, interfaces *[]reflect.Value, structFilter func(string, string) bool, formatOutput bool, indent string, level int) {
|
|
||||||
t := val.Kind()
|
|
||||||
|
|
||||||
switch t {
|
|
||||||
case reflect.Bool:
|
|
||||||
fmt.Fprint(buf, val.Bool())
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
fmt.Fprint(buf, val.Int())
|
|
||||||
case reflect.Uint8, reflect.Uint16, reflect.Uint, reflect.Uint32, reflect.Uint64:
|
|
||||||
fmt.Fprint(buf, val.Uint())
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
fmt.Fprint(buf, val.Float())
|
|
||||||
case reflect.Complex64, reflect.Complex128:
|
|
||||||
fmt.Fprint(buf, val.Complex())
|
|
||||||
case reflect.UnsafePointer:
|
|
||||||
fmt.Fprintf(buf, "unsafe.Pointer(0x%X)", val.Pointer())
|
|
||||||
case reflect.Ptr:
|
|
||||||
if val.IsNil() {
|
|
||||||
fmt.Fprint(buf, "nil")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := val.Elem().UnsafeAddr()
|
|
||||||
|
|
||||||
for p := *pointers; p != nil; p = p.prev {
|
|
||||||
if addr == p.addr {
|
|
||||||
p.used = append(p.used, buf.Len())
|
|
||||||
fmt.Fprintf(buf, "0x%X", addr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*pointers = &pointerInfo{
|
|
||||||
prev: *pointers,
|
|
||||||
addr: addr,
|
|
||||||
pos: buf.Len(),
|
|
||||||
used: make([]int, 0),
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(buf, "&")
|
|
||||||
|
|
||||||
printKeyValue(buf, val.Elem(), pointers, interfaces, structFilter, formatOutput, indent, level)
|
|
||||||
case reflect.String:
|
|
||||||
fmt.Fprint(buf, "\"", val.String(), "\"")
|
|
||||||
case reflect.Interface:
|
|
||||||
value := val.Elem()
|
|
||||||
|
|
||||||
if !value.IsValid() {
|
|
||||||
fmt.Fprint(buf, "nil")
|
|
||||||
} else {
|
|
||||||
for _, in := range *interfaces {
|
|
||||||
if reflect.DeepEqual(in, val) {
|
|
||||||
fmt.Fprint(buf, "repeat")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*interfaces = append(*interfaces, val)
|
|
||||||
|
|
||||||
printKeyValue(buf, value, pointers, interfaces, structFilter, formatOutput, indent, level+1)
|
|
||||||
}
|
|
||||||
case reflect.Struct:
|
|
||||||
t := val.Type()
|
|
||||||
|
|
||||||
fmt.Fprint(buf, t)
|
|
||||||
fmt.Fprint(buf, "{")
|
|
||||||
|
|
||||||
for i := 0; i < val.NumField(); i++ {
|
|
||||||
if formatOutput {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
name := t.Field(i).Name
|
|
||||||
|
|
||||||
if formatOutput {
|
|
||||||
for ind := 0; ind < level; ind++ {
|
|
||||||
fmt.Fprint(buf, indent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(buf, name)
|
|
||||||
fmt.Fprint(buf, ": ")
|
|
||||||
|
|
||||||
if structFilter != nil && structFilter(t.String(), name) {
|
|
||||||
fmt.Fprint(buf, "ignore")
|
|
||||||
} else {
|
|
||||||
printKeyValue(buf, val.Field(i), pointers, interfaces, structFilter, formatOutput, indent, level+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(buf, ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
|
|
||||||
for ind := 0; ind < level-1; ind++ {
|
|
||||||
fmt.Fprint(buf, indent)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(buf, "}")
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
fmt.Fprint(buf, val.Type())
|
|
||||||
fmt.Fprint(buf, "{")
|
|
||||||
|
|
||||||
allSimple := true
|
|
||||||
|
|
||||||
for i := 0; i < val.Len(); i++ {
|
|
||||||
elem := val.Index(i)
|
|
||||||
|
|
||||||
isSimple := isSimpleType(elem, elem.Kind(), pointers, interfaces)
|
|
||||||
|
|
||||||
if !isSimple {
|
|
||||||
allSimple = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput && !isSimple {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput && !isSimple {
|
|
||||||
for ind := 0; ind < level; ind++ {
|
|
||||||
fmt.Fprint(buf, indent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1)
|
|
||||||
|
|
||||||
if i != val.Len()-1 || !allSimple {
|
|
||||||
fmt.Fprint(buf, ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput && !allSimple {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
|
|
||||||
for ind := 0; ind < level-1; ind++ {
|
|
||||||
fmt.Fprint(buf, indent)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(buf, "}")
|
|
||||||
case reflect.Map:
|
|
||||||
t := val.Type()
|
|
||||||
keys := val.MapKeys()
|
|
||||||
|
|
||||||
fmt.Fprint(buf, t)
|
|
||||||
fmt.Fprint(buf, "{")
|
|
||||||
|
|
||||||
allSimple := true
|
|
||||||
|
|
||||||
for i := 0; i < len(keys); i++ {
|
|
||||||
elem := val.MapIndex(keys[i])
|
|
||||||
|
|
||||||
isSimple := isSimpleType(elem, elem.Kind(), pointers, interfaces)
|
|
||||||
|
|
||||||
if !isSimple {
|
|
||||||
allSimple = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput && !isSimple {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput && !isSimple {
|
|
||||||
for ind := 0; ind <= level; ind++ {
|
|
||||||
fmt.Fprint(buf, indent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
printKeyValue(buf, keys[i], pointers, interfaces, structFilter, formatOutput, indent, level+1)
|
|
||||||
fmt.Fprint(buf, ": ")
|
|
||||||
printKeyValue(buf, elem, pointers, interfaces, structFilter, formatOutput, indent, level+1)
|
|
||||||
|
|
||||||
if i != val.Len()-1 || !allSimple {
|
|
||||||
fmt.Fprint(buf, ",")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if formatOutput && !allSimple {
|
|
||||||
fmt.Fprintln(buf)
|
|
||||||
|
|
||||||
for ind := 0; ind < level-1; ind++ {
|
|
||||||
fmt.Fprint(buf, indent)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Fprint(buf, " ")
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprint(buf, "}")
|
|
||||||
case reflect.Chan:
|
|
||||||
fmt.Fprint(buf, val.Type())
|
|
||||||
case reflect.Invalid:
|
|
||||||
fmt.Fprint(buf, "invalid")
|
|
||||||
default:
|
|
||||||
fmt.Fprint(buf, "unknow")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrintPointerInfo dump pointer value
|
|
||||||
func PrintPointerInfo(buf *bytes.Buffer, headlen int, pointers *pointerInfo) {
|
|
||||||
anyused := false
|
|
||||||
pointerNum := 0
|
|
||||||
|
|
||||||
for p := pointers; p != nil; p = p.prev {
|
|
||||||
if len(p.used) > 0 {
|
|
||||||
anyused = true
|
|
||||||
}
|
|
||||||
pointerNum++
|
|
||||||
p.n = pointerNum
|
|
||||||
}
|
|
||||||
|
|
||||||
if anyused {
|
|
||||||
pointerBufs := make([][]rune, pointerNum+1)
|
|
||||||
|
|
||||||
for i := 0; i < len(pointerBufs); i++ {
|
|
||||||
pointerBuf := make([]rune, buf.Len()+headlen)
|
|
||||||
|
|
||||||
for j := 0; j < len(pointerBuf); j++ {
|
|
||||||
pointerBuf[j] = ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
pointerBufs[i] = pointerBuf
|
|
||||||
}
|
|
||||||
|
|
||||||
for pn := 0; pn <= pointerNum; pn++ {
|
|
||||||
for p := pointers; p != nil; p = p.prev {
|
|
||||||
if len(p.used) > 0 && p.n >= pn {
|
|
||||||
if pn == p.n {
|
|
||||||
pointerBufs[pn][p.pos+headlen] = '└'
|
|
||||||
|
|
||||||
maxpos := 0
|
|
||||||
|
|
||||||
for i, pos := range p.used {
|
|
||||||
if i < len(p.used)-1 {
|
|
||||||
pointerBufs[pn][pos+headlen] = '┴'
|
|
||||||
} else {
|
|
||||||
pointerBufs[pn][pos+headlen] = '┘'
|
|
||||||
}
|
|
||||||
|
|
||||||
maxpos = pos
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < maxpos-p.pos-1; i++ {
|
|
||||||
if pointerBufs[pn][i+p.pos+headlen+1] == ' ' {
|
|
||||||
pointerBufs[pn][i+p.pos+headlen+1] = '─'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
pointerBufs[pn][p.pos+headlen] = '│'
|
|
||||||
|
|
||||||
for _, pos := range p.used {
|
|
||||||
if pointerBufs[pn][pos+headlen] == ' ' {
|
|
||||||
pointerBufs[pn][pos+headlen] = '│'
|
|
||||||
} else {
|
|
||||||
pointerBufs[pn][pos+headlen] = '┼'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(string(pointerBufs[pn]) + "\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stack get stack bytes
|
|
||||||
func Stack(skip int, indent string) []byte {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
for i := skip; ; i++ {
|
|
||||||
pc, file, line, ok := runtime.Caller(i)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
buf.WriteString(indent)
|
|
||||||
|
|
||||||
fmt.Fprintf(buf, "at %s() [%s:%d]\n", function(pc), file, line)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the name of the function containing the PC if possible,
|
|
||||||
func function(pc uintptr) []byte {
|
|
||||||
fn := runtime.FuncForPC(pc)
|
|
||||||
if fn == nil {
|
|
||||||
return dunno
|
|
||||||
}
|
|
||||||
name := []byte(fn.Name())
|
|
||||||
// The name includes the path name to the package, which is unnecessary
|
|
||||||
// since the file name is already included. Plus, it has center dots.
|
|
||||||
// That is, we see
|
|
||||||
// runtime/debug.*T·ptrmethod
|
|
||||||
// and want
|
|
||||||
// *T.ptrmethod
|
|
||||||
if period := bytes.Index(name, dot); period >= 0 {
|
|
||||||
name = name[period+1:]
|
|
||||||
}
|
|
||||||
name = bytes.Replace(name, centerDot, dot, -1)
|
|
||||||
return name
|
|
||||||
}
|
|
@ -1,101 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SelfPath gets compiled executable file absolute path
|
|
||||||
func SelfPath() string {
|
|
||||||
path, _ := filepath.Abs(os.Args[0])
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
// SelfDir gets compiled executable file directory
|
|
||||||
func SelfDir() string {
|
|
||||||
return filepath.Dir(SelfPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileExists reports whether the named file or directory exists.
|
|
||||||
func FileExists(name string) bool {
|
|
||||||
if _, err := os.Stat(name); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SearchFile Search a file in paths.
|
|
||||||
// this is often used in search config file in /etc ~/
|
|
||||||
func SearchFile(filename string, paths ...string) (fullpath string, err error) {
|
|
||||||
for _, path := range paths {
|
|
||||||
if fullpath = filepath.Join(path, filename); FileExists(fullpath) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = errors.New(fullpath + " not found in paths")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// GrepFile like command grep -E
|
|
||||||
// for example: GrepFile(`^hello`, "hello.txt")
|
|
||||||
// \n is striped while read
|
|
||||||
func GrepFile(patten string, filename string) (lines []string, err error) {
|
|
||||||
re, err := regexp.Compile(patten)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
fd, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lines = make([]string, 0)
|
|
||||||
reader := bufio.NewReader(fd)
|
|
||||||
prefix := ""
|
|
||||||
var isLongLine bool
|
|
||||||
for {
|
|
||||||
byteLine, isPrefix, er := reader.ReadLine()
|
|
||||||
if er != nil && er != io.EOF {
|
|
||||||
return nil, er
|
|
||||||
}
|
|
||||||
if er == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
line := string(byteLine)
|
|
||||||
if isPrefix {
|
|
||||||
prefix += line
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
isLongLine = true
|
|
||||||
}
|
|
||||||
|
|
||||||
line = prefix + line
|
|
||||||
if isLongLine {
|
|
||||||
prefix = ""
|
|
||||||
}
|
|
||||||
if re.MatchString(line) {
|
|
||||||
lines = append(lines, line)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lines, nil
|
|
||||||
}
|
|
@ -1,87 +0,0 @@
|
|||||||
// Copyright 2020 beego-dev
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
type KV interface {
|
|
||||||
GetKey() interface{}
|
|
||||||
GetValue() interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SimpleKV is common structure to store key-value pairs.
|
|
||||||
// When you need something like Pair, you can use this
|
|
||||||
type SimpleKV struct {
|
|
||||||
Key interface{}
|
|
||||||
Value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ KV = new(SimpleKV)
|
|
||||||
|
|
||||||
func (s *SimpleKV) GetKey() interface{} {
|
|
||||||
return s.Key
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SimpleKV) GetValue() interface{} {
|
|
||||||
return s.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// KVs interface
|
|
||||||
type KVs interface {
|
|
||||||
GetValueOr(key interface{}, defValue interface{}) interface{}
|
|
||||||
Contains(key interface{}) bool
|
|
||||||
IfContains(key interface{}, action func(value interface{})) KVs
|
|
||||||
}
|
|
||||||
|
|
||||||
// SimpleKVs will store SimpleKV collection as map
|
|
||||||
type SimpleKVs struct {
|
|
||||||
kvs map[interface{}]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ KVs = new(SimpleKVs)
|
|
||||||
|
|
||||||
// GetValueOr returns the value for a given key, if non-existent
|
|
||||||
// it returns defValue
|
|
||||||
func (kvs *SimpleKVs) GetValueOr(key interface{}, defValue interface{}) interface{} {
|
|
||||||
v, ok := kvs.kvs[key]
|
|
||||||
if ok {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return defValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains checks if a key exists
|
|
||||||
func (kvs *SimpleKVs) Contains(key interface{}) bool {
|
|
||||||
_, ok := kvs.kvs[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// IfContains invokes the action on a key if it exists
|
|
||||||
func (kvs *SimpleKVs) IfContains(key interface{}, action func(value interface{})) KVs {
|
|
||||||
v, ok := kvs.kvs[key]
|
|
||||||
if ok {
|
|
||||||
action(v)
|
|
||||||
}
|
|
||||||
return kvs
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewKVs creates the *KVs instance
|
|
||||||
func NewKVs(kvs ...KV) KVs {
|
|
||||||
res := &SimpleKVs{
|
|
||||||
kvs: make(map[interface{}]interface{}, len(kvs)),
|
|
||||||
}
|
|
||||||
for _, kv := range kvs {
|
|
||||||
res.kvs[kv.GetKey()] = kv.GetValue()
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
@ -1,424 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"mime/multipart"
|
|
||||||
"net/mail"
|
|
||||||
"net/smtp"
|
|
||||||
"net/textproto"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxLineLength = 76
|
|
||||||
|
|
||||||
upperhex = "0123456789ABCDEF"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Email is the type used for email messages
|
|
||||||
type Email struct {
|
|
||||||
Auth smtp.Auth
|
|
||||||
Identity string `json:"identity"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Host string `json:"host"`
|
|
||||||
Port int `json:"port"`
|
|
||||||
From string `json:"from"`
|
|
||||||
To []string
|
|
||||||
Bcc []string
|
|
||||||
Cc []string
|
|
||||||
Subject string
|
|
||||||
Text string // Plaintext message (optional)
|
|
||||||
HTML string // Html message (optional)
|
|
||||||
Headers textproto.MIMEHeader
|
|
||||||
Attachments []*Attachment
|
|
||||||
ReadReceipt []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attachment is a struct representing an email attachment.
|
|
||||||
// Based on the mime/multipart.FileHeader struct, Attachment contains the name, MIMEHeader, and content of the attachment in question
|
|
||||||
type Attachment struct {
|
|
||||||
Filename string
|
|
||||||
Header textproto.MIMEHeader
|
|
||||||
Content []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEMail create new Email struct with config json.
|
|
||||||
// config json is followed from Email struct fields.
|
|
||||||
func NewEMail(config string) *Email {
|
|
||||||
e := new(Email)
|
|
||||||
e.Headers = textproto.MIMEHeader{}
|
|
||||||
err := json.Unmarshal([]byte(config), e)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes Make all send information to byte
|
|
||||||
func (e *Email) Bytes() ([]byte, error) {
|
|
||||||
buff := &bytes.Buffer{}
|
|
||||||
w := multipart.NewWriter(buff)
|
|
||||||
// Set the appropriate headers (overwriting any conflicts)
|
|
||||||
// Leave out Bcc (only included in envelope headers)
|
|
||||||
e.Headers.Set("To", strings.Join(e.To, ","))
|
|
||||||
if e.Cc != nil {
|
|
||||||
e.Headers.Set("Cc", strings.Join(e.Cc, ","))
|
|
||||||
}
|
|
||||||
e.Headers.Set("From", e.From)
|
|
||||||
e.Headers.Set("Subject", e.Subject)
|
|
||||||
if len(e.ReadReceipt) != 0 {
|
|
||||||
e.Headers.Set("Disposition-Notification-To", strings.Join(e.ReadReceipt, ","))
|
|
||||||
}
|
|
||||||
e.Headers.Set("MIME-Version", "1.0")
|
|
||||||
|
|
||||||
// Write the envelope headers (including any custom headers)
|
|
||||||
if err := headerToBytes(buff, e.Headers); err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to render message headers: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
e.Headers.Set("Content-Type", fmt.Sprintf("multipart/mixed;\r\n boundary=%s\r\n", w.Boundary()))
|
|
||||||
fmt.Fprintf(buff, "%s:", "Content-Type")
|
|
||||||
fmt.Fprintf(buff, " %s\r\n", fmt.Sprintf("multipart/mixed;\r\n boundary=%s\r\n", w.Boundary()))
|
|
||||||
|
|
||||||
// Start the multipart/mixed part
|
|
||||||
fmt.Fprintf(buff, "--%s\r\n", w.Boundary())
|
|
||||||
header := textproto.MIMEHeader{}
|
|
||||||
// Check to see if there is a Text or HTML field
|
|
||||||
if e.Text != "" || e.HTML != "" {
|
|
||||||
subWriter := multipart.NewWriter(buff)
|
|
||||||
// Create the multipart alternative part
|
|
||||||
header.Set("Content-Type", fmt.Sprintf("multipart/alternative;\r\n boundary=%s\r\n", subWriter.Boundary()))
|
|
||||||
// Write the header
|
|
||||||
if err := headerToBytes(buff, header); err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to render multipart message headers: %s", err)
|
|
||||||
}
|
|
||||||
// Create the body sections
|
|
||||||
if e.Text != "" {
|
|
||||||
header.Set("Content-Type", fmt.Sprintf("text/plain; charset=UTF-8"))
|
|
||||||
header.Set("Content-Transfer-Encoding", "quoted-printable")
|
|
||||||
if _, err := subWriter.CreatePart(header); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Write the text
|
|
||||||
if err := quotePrintEncode(buff, e.Text); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if e.HTML != "" {
|
|
||||||
header.Set("Content-Type", fmt.Sprintf("text/html; charset=UTF-8"))
|
|
||||||
header.Set("Content-Transfer-Encoding", "quoted-printable")
|
|
||||||
if _, err := subWriter.CreatePart(header); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Write the text
|
|
||||||
if err := quotePrintEncode(buff, e.HTML); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := subWriter.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create attachment part, if necessary
|
|
||||||
for _, a := range e.Attachments {
|
|
||||||
ap, err := w.CreatePart(a.Header)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Write the base64Wrapped content to the part
|
|
||||||
base64Wrap(ap, a.Content)
|
|
||||||
}
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buff.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttachFile Add attach file to the send mail
|
|
||||||
func (e *Email) AttachFile(args ...string) (a *Attachment, err error) {
|
|
||||||
if len(args) < 1 || len(args) > 2 { // change && to ||
|
|
||||||
err = errors.New("Must specify a file name and number of parameters can not exceed at least two")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filename := args[0]
|
|
||||||
id := ""
|
|
||||||
if len(args) > 1 {
|
|
||||||
id = args[1]
|
|
||||||
}
|
|
||||||
f, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
ct := mime.TypeByExtension(filepath.Ext(filename))
|
|
||||||
basename := path.Base(filename)
|
|
||||||
return e.Attach(f, basename, ct, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Attach is used to attach content from an io.Reader to the email.
|
|
||||||
// Parameters include an io.Reader, the desired filename for the attachment, and the Content-Type.
|
|
||||||
func (e *Email) Attach(r io.Reader, filename string, args ...string) (a *Attachment, err error) {
|
|
||||||
if len(args) < 1 || len(args) > 2 { // change && to ||
|
|
||||||
err = errors.New("Must specify the file type and number of parameters can not exceed at least two")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c := args[0] // Content-Type
|
|
||||||
id := ""
|
|
||||||
if len(args) > 1 {
|
|
||||||
id = args[1] // Content-ID
|
|
||||||
}
|
|
||||||
var buffer bytes.Buffer
|
|
||||||
if _, err = io.Copy(&buffer, r); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
at := &Attachment{
|
|
||||||
Filename: filename,
|
|
||||||
Header: textproto.MIMEHeader{},
|
|
||||||
Content: buffer.Bytes(),
|
|
||||||
}
|
|
||||||
// Get the Content-Type to be used in the MIMEHeader
|
|
||||||
if c != "" {
|
|
||||||
at.Header.Set("Content-Type", c)
|
|
||||||
} else {
|
|
||||||
// If the Content-Type is blank, set the Content-Type to "application/octet-stream"
|
|
||||||
at.Header.Set("Content-Type", "application/octet-stream")
|
|
||||||
}
|
|
||||||
if id != "" {
|
|
||||||
at.Header.Set("Content-Disposition", fmt.Sprintf("inline;\r\n filename=\"%s\"", filename))
|
|
||||||
at.Header.Set("Content-ID", fmt.Sprintf("<%s>", id))
|
|
||||||
} else {
|
|
||||||
at.Header.Set("Content-Disposition", fmt.Sprintf("attachment;\r\n filename=\"%s\"", filename))
|
|
||||||
}
|
|
||||||
at.Header.Set("Content-Transfer-Encoding", "base64")
|
|
||||||
e.Attachments = append(e.Attachments, at)
|
|
||||||
return at, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send will send out the mail
|
|
||||||
func (e *Email) Send() error {
|
|
||||||
if e.Auth == nil {
|
|
||||||
e.Auth = smtp.PlainAuth(e.Identity, e.Username, e.Password, e.Host)
|
|
||||||
}
|
|
||||||
// Merge the To, Cc, and Bcc fields
|
|
||||||
to := make([]string, 0, len(e.To)+len(e.Cc)+len(e.Bcc))
|
|
||||||
to = append(append(append(to, e.To...), e.Cc...), e.Bcc...)
|
|
||||||
// Check to make sure there is at least one recipient and one "From" address
|
|
||||||
if len(to) == 0 {
|
|
||||||
return errors.New("Must specify at least one To address")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the username if no From is provided
|
|
||||||
if len(e.From) == 0 {
|
|
||||||
e.From = e.Username
|
|
||||||
}
|
|
||||||
|
|
||||||
from, err := mail.ParseAddress(e.From)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// use mail's RFC 2047 to encode any string
|
|
||||||
e.Subject = qEncode("utf-8", e.Subject)
|
|
||||||
|
|
||||||
raw, err := e.Bytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return smtp.SendMail(e.Host+":"+strconv.Itoa(e.Port), e.Auth, from.Address, to, raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
// quotePrintEncode writes the quoted-printable text to the IO Writer (according to RFC 2045)
|
|
||||||
func quotePrintEncode(w io.Writer, s string) error {
|
|
||||||
var buf [3]byte
|
|
||||||
mc := 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
c := s[i]
|
|
||||||
// We're assuming Unix style text formats as input (LF line break), and
|
|
||||||
// quoted-printble uses CRLF line breaks. (Literal CRs will become
|
|
||||||
// "=0D", but probably shouldn't be there to begin with!)
|
|
||||||
if c == '\n' {
|
|
||||||
io.WriteString(w, "\r\n")
|
|
||||||
mc = 0
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var nextOut []byte
|
|
||||||
if isPrintable(c) {
|
|
||||||
nextOut = append(buf[:0], c)
|
|
||||||
} else {
|
|
||||||
nextOut = buf[:]
|
|
||||||
qpEscape(nextOut, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a soft line break if the next (encoded) byte would push this line
|
|
||||||
// to or past the limit.
|
|
||||||
if mc+len(nextOut) >= maxLineLength {
|
|
||||||
if _, err := io.WriteString(w, "=\r\n"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mc = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := w.Write(nextOut); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
mc += len(nextOut)
|
|
||||||
}
|
|
||||||
// No trailing end-of-line?? Soft line break, then. TODO: is this sane?
|
|
||||||
if mc > 0 {
|
|
||||||
io.WriteString(w, "=\r\n")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// isPrintable returns true if the rune given is "printable" according to RFC 2045, false otherwise
|
|
||||||
func isPrintable(c byte) bool {
|
|
||||||
return (c >= '!' && c <= '<') || (c >= '>' && c <= '~') || (c == ' ' || c == '\n' || c == '\t')
|
|
||||||
}
|
|
||||||
|
|
||||||
// qpEscape is a helper function for quotePrintEncode which escapes a
|
|
||||||
// non-printable byte. Expects len(dest) == 3.
|
|
||||||
func qpEscape(dest []byte, c byte) {
|
|
||||||
const nums = "0123456789ABCDEF"
|
|
||||||
dest[0] = '='
|
|
||||||
dest[1] = nums[(c&0xf0)>>4]
|
|
||||||
dest[2] = nums[(c & 0xf)]
|
|
||||||
}
|
|
||||||
|
|
||||||
// headerToBytes enumerates the key and values in the header, and writes the results to the IO Writer
|
|
||||||
func headerToBytes(w io.Writer, t textproto.MIMEHeader) error {
|
|
||||||
for k, v := range t {
|
|
||||||
// Write the header key
|
|
||||||
_, err := fmt.Fprintf(w, "%s:", k)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Write each value in the header
|
|
||||||
for _, c := range v {
|
|
||||||
_, err := fmt.Fprintf(w, " %s\r\n", c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// base64Wrap encodes the attachment content, and wraps it according to RFC 2045 standards (every 76 chars)
|
|
||||||
// The output is then written to the specified io.Writer
|
|
||||||
func base64Wrap(w io.Writer, b []byte) {
|
|
||||||
// 57 raw bytes per 76-byte base64 line.
|
|
||||||
const maxRaw = 57
|
|
||||||
// Buffer for each line, including trailing CRLF.
|
|
||||||
var buffer [maxLineLength + len("\r\n")]byte
|
|
||||||
copy(buffer[maxLineLength:], "\r\n")
|
|
||||||
// Process raw chunks until there's no longer enough to fill a line.
|
|
||||||
for len(b) >= maxRaw {
|
|
||||||
base64.StdEncoding.Encode(buffer[:], b[:maxRaw])
|
|
||||||
w.Write(buffer[:])
|
|
||||||
b = b[maxRaw:]
|
|
||||||
}
|
|
||||||
// Handle the last chunk of bytes.
|
|
||||||
if len(b) > 0 {
|
|
||||||
out := buffer[:base64.StdEncoding.EncodedLen(len(b))]
|
|
||||||
base64.StdEncoding.Encode(out, b)
|
|
||||||
out = append(out, "\r\n"...)
|
|
||||||
w.Write(out)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode returns the encoded-word form of s. If s is ASCII without special
|
|
||||||
// characters, it is returned unchanged. The provided charset is the IANA
|
|
||||||
// charset name of s. It is case insensitive.
|
|
||||||
// RFC 2047 encoded-word
|
|
||||||
func qEncode(charset, s string) string {
|
|
||||||
if !needsEncoding(s) {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return encodeWord(charset, s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func needsEncoding(s string) bool {
|
|
||||||
for _, b := range s {
|
|
||||||
if (b < ' ' || b > '~') && b != '\t' {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// encodeWord encodes a string into an encoded-word.
|
|
||||||
func encodeWord(charset, s string) string {
|
|
||||||
buf := getBuffer()
|
|
||||||
|
|
||||||
buf.WriteString("=?")
|
|
||||||
buf.WriteString(charset)
|
|
||||||
buf.WriteByte('?')
|
|
||||||
buf.WriteByte('q')
|
|
||||||
buf.WriteByte('?')
|
|
||||||
|
|
||||||
enc := make([]byte, 3)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
b := s[i]
|
|
||||||
switch {
|
|
||||||
case b == ' ':
|
|
||||||
buf.WriteByte('_')
|
|
||||||
case b <= '~' && b >= '!' && b != '=' && b != '?' && b != '_':
|
|
||||||
buf.WriteByte(b)
|
|
||||||
default:
|
|
||||||
enc[0] = '='
|
|
||||||
enc[1] = upperhex[b>>4]
|
|
||||||
enc[2] = upperhex[b&0x0f]
|
|
||||||
buf.Write(enc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
buf.WriteString("?=")
|
|
||||||
|
|
||||||
es := buf.String()
|
|
||||||
putBuffer(buf)
|
|
||||||
return es
|
|
||||||
}
|
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return new(bytes.Buffer)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBuffer() *bytes.Buffer {
|
|
||||||
return bufPool.Get().(*bytes.Buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putBuffer(buf *bytes.Buffer) {
|
|
||||||
if buf.Len() > 1024 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
buf.Reset()
|
|
||||||
bufPool.Put(buf)
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
r "math/rand"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var alphaNum = []byte(`0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`)
|
|
||||||
|
|
||||||
// RandomCreateBytes generate random []byte by specify chars.
|
|
||||||
func RandomCreateBytes(n int, alphabets ...byte) []byte {
|
|
||||||
if len(alphabets) == 0 {
|
|
||||||
alphabets = alphaNum
|
|
||||||
}
|
|
||||||
bytes := make([]byte, n)
|
|
||||||
var randBy bool
|
|
||||||
if num, err := rand.Read(bytes); num != n || err != nil {
|
|
||||||
r.Seed(time.Now().UnixNano())
|
|
||||||
randBy = true
|
|
||||||
}
|
|
||||||
for i, b := range bytes {
|
|
||||||
if randBy {
|
|
||||||
bytes[i] = alphabets[r.Intn(len(alphabets))]
|
|
||||||
} else {
|
|
||||||
bytes[i] = alphabets[b%byte(len(alphabets))]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bytes
|
|
||||||
}
|
|
@ -1,91 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Deprecated: using sync.Map
|
|
||||||
type BeeMap struct {
|
|
||||||
lock *sync.RWMutex
|
|
||||||
bm map[interface{}]interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBeeMap return new safemap
|
|
||||||
func NewBeeMap() *BeeMap {
|
|
||||||
return &BeeMap{
|
|
||||||
lock: new(sync.RWMutex),
|
|
||||||
bm: make(map[interface{}]interface{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get from maps return the k's value
|
|
||||||
func (m *BeeMap) Get(k interface{}) interface{} {
|
|
||||||
m.lock.RLock()
|
|
||||||
defer m.lock.RUnlock()
|
|
||||||
if val, ok := m.bm[k]; ok {
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set Maps the given key and value. Returns false
|
|
||||||
// if the key is already in the map and changes nothing.
|
|
||||||
func (m *BeeMap) Set(k interface{}, v interface{}) bool {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
if val, ok := m.bm[k]; !ok {
|
|
||||||
m.bm[k] = v
|
|
||||||
} else if val != v {
|
|
||||||
m.bm[k] = v
|
|
||||||
} else {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check Returns true if k is exist in the map.
|
|
||||||
func (m *BeeMap) Check(k interface{}) bool {
|
|
||||||
m.lock.RLock()
|
|
||||||
defer m.lock.RUnlock()
|
|
||||||
_, ok := m.bm[k]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the given key and value.
|
|
||||||
func (m *BeeMap) Delete(k interface{}) {
|
|
||||||
m.lock.Lock()
|
|
||||||
defer m.lock.Unlock()
|
|
||||||
delete(m.bm, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Items returns all items in safemap.
|
|
||||||
func (m *BeeMap) Items() map[interface{}]interface{} {
|
|
||||||
m.lock.RLock()
|
|
||||||
defer m.lock.RUnlock()
|
|
||||||
r := make(map[interface{}]interface{})
|
|
||||||
for k, v := range m.bm {
|
|
||||||
r[k] = v
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the number of items within the map.
|
|
||||||
func (m *BeeMap) Count() int {
|
|
||||||
m.lock.RLock()
|
|
||||||
defer m.lock.RUnlock()
|
|
||||||
return len(m.bm)
|
|
||||||
}
|
|
@ -1,171 +0,0 @@
|
|||||||
// Copyright 2014 beego Author. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type reducetype func(interface{}) interface{}
|
|
||||||
|
|
||||||
type filtertype func(interface{}) bool
|
|
||||||
|
|
||||||
// InSlice checks given string in string slice or not.
|
|
||||||
func InSlice(v string, sl []string) bool {
|
|
||||||
for _, vv := range sl {
|
|
||||||
if vv == v {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// InSliceIface checks given interface in interface slice.
|
|
||||||
func InSliceIface(v interface{}, sl []interface{}) bool {
|
|
||||||
for _, vv := range sl {
|
|
||||||
if vv == v {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceRandList generate an int slice from min to max.
|
|
||||||
func SliceRandList(min, max int) []int {
|
|
||||||
if max < min {
|
|
||||||
min, max = max, min
|
|
||||||
}
|
|
||||||
length := max - min + 1
|
|
||||||
t0 := time.Now()
|
|
||||||
rand.Seed(int64(t0.Nanosecond()))
|
|
||||||
list := rand.Perm(length)
|
|
||||||
for index := range list {
|
|
||||||
list[index] += min
|
|
||||||
}
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceMerge merges interface slices to one slice.
|
|
||||||
func SliceMerge(slice1, slice2 []interface{}) (c []interface{}) {
|
|
||||||
c = append(slice1, slice2...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceReduce generates a new slice after parsing every value by reduce function
|
|
||||||
func SliceReduce(slice []interface{}, a reducetype) (dslice []interface{}) {
|
|
||||||
for _, v := range slice {
|
|
||||||
dslice = append(dslice, a(v))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceRand returns random one from slice.
|
|
||||||
func SliceRand(a []interface{}) (b interface{}) {
|
|
||||||
randnum := rand.Intn(len(a))
|
|
||||||
b = a[randnum]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceSum sums all values in int64 slice.
|
|
||||||
func SliceSum(intslice []int64) (sum int64) {
|
|
||||||
for _, v := range intslice {
|
|
||||||
sum += v
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceFilter generates a new slice after filter function.
|
|
||||||
func SliceFilter(slice []interface{}, a filtertype) (ftslice []interface{}) {
|
|
||||||
for _, v := range slice {
|
|
||||||
if a(v) {
|
|
||||||
ftslice = append(ftslice, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceDiff returns diff slice of slice1 - slice2.
|
|
||||||
func SliceDiff(slice1, slice2 []interface{}) (diffslice []interface{}) {
|
|
||||||
for _, v := range slice1 {
|
|
||||||
if !InSliceIface(v, slice2) {
|
|
||||||
diffslice = append(diffslice, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceIntersect returns slice that are present in all the slice1 and slice2.
|
|
||||||
func SliceIntersect(slice1, slice2 []interface{}) (diffslice []interface{}) {
|
|
||||||
for _, v := range slice1 {
|
|
||||||
if InSliceIface(v, slice2) {
|
|
||||||
diffslice = append(diffslice, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceChunk separates one slice to some sized slice.
|
|
||||||
func SliceChunk(slice []interface{}, size int) (chunkslice [][]interface{}) {
|
|
||||||
if size >= len(slice) {
|
|
||||||
chunkslice = append(chunkslice, slice)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
end := size
|
|
||||||
for i := 0; i <= (len(slice) - size); i += size {
|
|
||||||
chunkslice = append(chunkslice, slice[i:end])
|
|
||||||
end += size
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceRange generates a new slice from begin to end with step duration of int64 number.
|
|
||||||
func SliceRange(start, end, step int64) (intslice []int64) {
|
|
||||||
for i := start; i <= end; i += step {
|
|
||||||
intslice = append(intslice, i)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SlicePad prepends size number of val into slice.
|
|
||||||
func SlicePad(slice []interface{}, size int, val interface{}) []interface{} {
|
|
||||||
if size <= len(slice) {
|
|
||||||
return slice
|
|
||||||
}
|
|
||||||
for i := 0; i < (size - len(slice)); i++ {
|
|
||||||
slice = append(slice, val)
|
|
||||||
}
|
|
||||||
return slice
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceUnique cleans repeated values in slice.
|
|
||||||
func SliceUnique(slice []interface{}) (uniqueslice []interface{}) {
|
|
||||||
for _, v := range slice {
|
|
||||||
if !InSliceIface(v, uniqueslice) {
|
|
||||||
uniqueslice = append(uniqueslice, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceShuffle shuffles a slice.
|
|
||||||
func SliceShuffle(slice []interface{}) []interface{} {
|
|
||||||
for i := 0; i < len(slice); i++ {
|
|
||||||
a := rand.Intn(len(slice))
|
|
||||||
b := rand.Intn(len(slice))
|
|
||||||
slice[a], slice[b] = slice[b], slice[a]
|
|
||||||
}
|
|
||||||
return slice
|
|
||||||
}
|
|
@ -1,46 +0,0 @@
|
|||||||
// Copyright 2020
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ToShortTimeFormat short string format
|
|
||||||
func ToShortTimeFormat(d time.Duration) string {
|
|
||||||
u := uint64(d)
|
|
||||||
if u < uint64(time.Second) {
|
|
||||||
switch {
|
|
||||||
case u == 0:
|
|
||||||
return "0"
|
|
||||||
case u < uint64(time.Microsecond):
|
|
||||||
return fmt.Sprintf("%.2fns", float64(u))
|
|
||||||
case u < uint64(time.Millisecond):
|
|
||||||
return fmt.Sprintf("%.2fus", float64(u)/1000)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%.2fms", float64(u)/1000/1000)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
switch {
|
|
||||||
case u < uint64(time.Minute):
|
|
||||||
return fmt.Sprintf("%.2fs", float64(u)/1000/1000/1000)
|
|
||||||
case u < uint64(time.Hour):
|
|
||||||
return fmt.Sprintf("%.2fm", float64(u)/1000/1000/1000/60)
|
|
||||||
default:
|
|
||||||
return fmt.Sprintf("%.2fh", float64(u)/1000/1000/1000/60/60)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,89 +0,0 @@
|
|||||||
package utils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetGOPATHs returns all paths in GOPATH variable.
|
|
||||||
func GetGOPATHs() []string {
|
|
||||||
gopath := os.Getenv("GOPATH")
|
|
||||||
if gopath == "" && compareGoVersion(runtime.Version(), "go1.8") >= 0 {
|
|
||||||
gopath = defaultGOPATH()
|
|
||||||
}
|
|
||||||
return filepath.SplitList(gopath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareGoVersion(a, b string) int {
|
|
||||||
reg := regexp.MustCompile("^\\d*")
|
|
||||||
|
|
||||||
a = strings.TrimPrefix(a, "go")
|
|
||||||
b = strings.TrimPrefix(b, "go")
|
|
||||||
|
|
||||||
versionsA := strings.Split(a, ".")
|
|
||||||
versionsB := strings.Split(b, ".")
|
|
||||||
|
|
||||||
for i := 0; i < len(versionsA) && i < len(versionsB); i++ {
|
|
||||||
versionA := versionsA[i]
|
|
||||||
versionB := versionsB[i]
|
|
||||||
|
|
||||||
vA, err := strconv.Atoi(versionA)
|
|
||||||
if err != nil {
|
|
||||||
str := reg.FindString(versionA)
|
|
||||||
if str != "" {
|
|
||||||
vA, _ = strconv.Atoi(str)
|
|
||||||
} else {
|
|
||||||
vA = -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vB, err := strconv.Atoi(versionB)
|
|
||||||
if err != nil {
|
|
||||||
str := reg.FindString(versionB)
|
|
||||||
if str != "" {
|
|
||||||
vB, _ = strconv.Atoi(str)
|
|
||||||
} else {
|
|
||||||
vB = -1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if vA > vB {
|
|
||||||
// vA = 12, vB = 8
|
|
||||||
return 1
|
|
||||||
} else if vA < vB {
|
|
||||||
// vA = 6, vB = 8
|
|
||||||
return -1
|
|
||||||
} else if vA == -1 {
|
|
||||||
// vA = rc1, vB = rc3
|
|
||||||
return strings.Compare(versionA, versionB)
|
|
||||||
}
|
|
||||||
|
|
||||||
// vA = vB = 8
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(versionsA) > len(versionsB) {
|
|
||||||
return 1
|
|
||||||
} else if len(versionsA) == len(versionsB) {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultGOPATH() string {
|
|
||||||
env := "HOME"
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
env = "USERPROFILE"
|
|
||||||
} else if runtime.GOOS == "plan9" {
|
|
||||||
env = "home"
|
|
||||||
}
|
|
||||||
if home := os.Getenv(env); home != "" {
|
|
||||||
return filepath.Join(home, "go")
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
@ -1,23 +0,0 @@
|
|||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
@ -1,223 +0,0 @@
|
|||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
|
|
||||||
// to recently added entries that have only been accessed once.
|
|
||||||
Default2QRecentRatio = 0.25
|
|
||||||
|
|
||||||
// Default2QGhostEntries is the default ratio of ghost
|
|
||||||
// entries kept to track entries recently evicted
|
|
||||||
Default2QGhostEntries = 0.50
|
|
||||||
)
|
|
||||||
|
|
||||||
// TwoQueueCache is a thread-safe fixed size 2Q cache.
|
|
||||||
// 2Q is an enhancement over the standard LRU cache
|
|
||||||
// in that it tracks both frequently and recently used
|
|
||||||
// entries separately. This avoids a burst in access to new
|
|
||||||
// entries from evicting frequently used entries. It adds some
|
|
||||||
// additional tracking overhead to the standard LRU cache, and is
|
|
||||||
// computationally about 2x the cost, and adds some metadata over
|
|
||||||
// head. The ARCCache is similar, but does not require setting any
|
|
||||||
// parameters.
|
|
||||||
type TwoQueueCache struct {
|
|
||||||
size int
|
|
||||||
recentSize int
|
|
||||||
|
|
||||||
recent simplelru.LRUCache
|
|
||||||
frequent simplelru.LRUCache
|
|
||||||
recentEvict simplelru.LRUCache
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// New2Q creates a new TwoQueueCache using the default
|
|
||||||
// values for the parameters.
|
|
||||||
func New2Q(size int) (*TwoQueueCache, error) {
|
|
||||||
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
|
|
||||||
}
|
|
||||||
|
|
||||||
// New2QParams creates a new TwoQueueCache using the provided
|
|
||||||
// parameter values.
|
|
||||||
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
|
|
||||||
if size <= 0 {
|
|
||||||
return nil, fmt.Errorf("invalid size")
|
|
||||||
}
|
|
||||||
if recentRatio < 0.0 || recentRatio > 1.0 {
|
|
||||||
return nil, fmt.Errorf("invalid recent ratio")
|
|
||||||
}
|
|
||||||
if ghostRatio < 0.0 || ghostRatio > 1.0 {
|
|
||||||
return nil, fmt.Errorf("invalid ghost ratio")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine the sub-sizes
|
|
||||||
recentSize := int(float64(size) * recentRatio)
|
|
||||||
evictSize := int(float64(size) * ghostRatio)
|
|
||||||
|
|
||||||
// Allocate the LRUs
|
|
||||||
recent, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
frequent, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
recentEvict, err := simplelru.NewLRU(evictSize, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the cache
|
|
||||||
c := &TwoQueueCache{
|
|
||||||
size: size,
|
|
||||||
recentSize: recentSize,
|
|
||||||
recent: recent,
|
|
||||||
frequent: frequent,
|
|
||||||
recentEvict: recentEvict,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Check if this is a frequent value
|
|
||||||
if val, ok := c.frequent.Get(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the value is contained in recent, then we
|
|
||||||
// promote it to frequent
|
|
||||||
if val, ok := c.recent.Peek(key); ok {
|
|
||||||
c.recent.Remove(key)
|
|
||||||
c.frequent.Add(key, val)
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// No hit
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache.
|
|
||||||
func (c *TwoQueueCache) Add(key, value interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Check if the value is frequently used already,
|
|
||||||
// and just update the value
|
|
||||||
if c.frequent.Contains(key) {
|
|
||||||
c.frequent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is recently used, and promote
|
|
||||||
// the value into the frequent list
|
|
||||||
if c.recent.Contains(key) {
|
|
||||||
c.recent.Remove(key)
|
|
||||||
c.frequent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the value was recently evicted, add it to the
|
|
||||||
// frequently used list
|
|
||||||
if c.recentEvict.Contains(key) {
|
|
||||||
c.ensureSpace(true)
|
|
||||||
c.recentEvict.Remove(key)
|
|
||||||
c.frequent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to the recently seen list
|
|
||||||
c.ensureSpace(false)
|
|
||||||
c.recent.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureSpace is used to ensure we have space in the cache
|
|
||||||
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
|
|
||||||
// If we have space, nothing to do
|
|
||||||
recentLen := c.recent.Len()
|
|
||||||
freqLen := c.frequent.Len()
|
|
||||||
if recentLen+freqLen < c.size {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the recent buffer is larger than
|
|
||||||
// the target, evict from there
|
|
||||||
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
|
|
||||||
k, _, _ := c.recent.RemoveOldest()
|
|
||||||
c.recentEvict.Add(k, nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from the frequent list otherwise
|
|
||||||
c.frequent.RemoveOldest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items in the cache.
|
|
||||||
func (c *TwoQueueCache) Len() int {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.recent.Len() + c.frequent.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of the keys in the cache.
|
|
||||||
// The frequently used keys are first in the returned slice.
|
|
||||||
func (c *TwoQueueCache) Keys() []interface{} {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
k1 := c.frequent.Keys()
|
|
||||||
k2 := c.recent.Keys()
|
|
||||||
return append(k1, k2...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the provided key from the cache.
|
|
||||||
func (c *TwoQueueCache) Remove(key interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
if c.frequent.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.recent.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.recentEvict.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to completely clear the cache.
|
|
||||||
func (c *TwoQueueCache) Purge() {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
c.recent.Purge()
|
|
||||||
c.frequent.Purge()
|
|
||||||
c.recentEvict.Purge()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains is used to check if the cache contains a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *TwoQueueCache) Contains(key interface{}) bool {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.frequent.Contains(key) || c.recent.Contains(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek is used to inspect the cache value of a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
if val, ok := c.frequent.Peek(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
return c.recent.Peek(key)
|
|
||||||
}
|
|
@ -1,362 +0,0 @@
|
|||||||
Mozilla Public License, version 2.0
|
|
||||||
|
|
||||||
1. Definitions
|
|
||||||
|
|
||||||
1.1. "Contributor"
|
|
||||||
|
|
||||||
means each individual or legal entity that creates, contributes to the
|
|
||||||
creation of, or owns Covered Software.
|
|
||||||
|
|
||||||
1.2. "Contributor Version"
|
|
||||||
|
|
||||||
means the combination of the Contributions of others (if any) used by a
|
|
||||||
Contributor and that particular Contributor's Contribution.
|
|
||||||
|
|
||||||
1.3. "Contribution"
|
|
||||||
|
|
||||||
means Covered Software of a particular Contributor.
|
|
||||||
|
|
||||||
1.4. "Covered Software"
|
|
||||||
|
|
||||||
means Source Code Form to which the initial Contributor has attached the
|
|
||||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
|
||||||
Modifications of such Source Code Form, in each case including portions
|
|
||||||
thereof.
|
|
||||||
|
|
||||||
1.5. "Incompatible With Secondary Licenses"
|
|
||||||
means
|
|
||||||
|
|
||||||
a. that the initial Contributor has attached the notice described in
|
|
||||||
Exhibit B to the Covered Software; or
|
|
||||||
|
|
||||||
b. that the Covered Software was made available under the terms of
|
|
||||||
version 1.1 or earlier of the License, but not also under the terms of
|
|
||||||
a Secondary License.
|
|
||||||
|
|
||||||
1.6. "Executable Form"
|
|
||||||
|
|
||||||
means any form of the work other than Source Code Form.
|
|
||||||
|
|
||||||
1.7. "Larger Work"
|
|
||||||
|
|
||||||
means a work that combines Covered Software with other material, in a
|
|
||||||
separate file or files, that is not Covered Software.
|
|
||||||
|
|
||||||
1.8. "License"
|
|
||||||
|
|
||||||
means this document.
|
|
||||||
|
|
||||||
1.9. "Licensable"
|
|
||||||
|
|
||||||
means having the right to grant, to the maximum extent possible, whether
|
|
||||||
at the time of the initial grant or subsequently, any and all of the
|
|
||||||
rights conveyed by this License.
|
|
||||||
|
|
||||||
1.10. "Modifications"
|
|
||||||
|
|
||||||
means any of the following:
|
|
||||||
|
|
||||||
a. any file in Source Code Form that results from an addition to,
|
|
||||||
deletion from, or modification of the contents of Covered Software; or
|
|
||||||
|
|
||||||
b. any new file in Source Code Form that contains any Covered Software.
|
|
||||||
|
|
||||||
1.11. "Patent Claims" of a Contributor
|
|
||||||
|
|
||||||
means any patent claim(s), including without limitation, method,
|
|
||||||
process, and apparatus claims, in any patent Licensable by such
|
|
||||||
Contributor that would be infringed, but for the grant of the License,
|
|
||||||
by the making, using, selling, offering for sale, having made, import,
|
|
||||||
or transfer of either its Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
1.12. "Secondary License"
|
|
||||||
|
|
||||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
|
||||||
General Public License, Version 2.1, the GNU Affero General Public
|
|
||||||
License, Version 3.0, or any later versions of those licenses.
|
|
||||||
|
|
||||||
1.13. "Source Code Form"
|
|
||||||
|
|
||||||
means the form of the work preferred for making modifications.
|
|
||||||
|
|
||||||
1.14. "You" (or "Your")
|
|
||||||
|
|
||||||
means an individual or a legal entity exercising rights under this
|
|
||||||
License. For legal entities, "You" includes any entity that controls, is
|
|
||||||
controlled by, or is under common control with You. For purposes of this
|
|
||||||
definition, "control" means (a) the power, direct or indirect, to cause
|
|
||||||
the direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
|
||||||
outstanding shares or beneficial ownership of such entity.
|
|
||||||
|
|
||||||
|
|
||||||
2. License Grants and Conditions
|
|
||||||
|
|
||||||
2.1. Grants
|
|
||||||
|
|
||||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
|
||||||
non-exclusive license:
|
|
||||||
|
|
||||||
a. under intellectual property rights (other than patent or trademark)
|
|
||||||
Licensable by such Contributor to use, reproduce, make available,
|
|
||||||
modify, display, perform, distribute, and otherwise exploit its
|
|
||||||
Contributions, either on an unmodified basis, with Modifications, or
|
|
||||||
as part of a Larger Work; and
|
|
||||||
|
|
||||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
|
||||||
sale, have made, import, and otherwise transfer either its
|
|
||||||
Contributions or its Contributor Version.
|
|
||||||
|
|
||||||
2.2. Effective Date
|
|
||||||
|
|
||||||
The licenses granted in Section 2.1 with respect to any Contribution
|
|
||||||
become effective for each Contribution on the date the Contributor first
|
|
||||||
distributes such Contribution.
|
|
||||||
|
|
||||||
2.3. Limitations on Grant Scope
|
|
||||||
|
|
||||||
The licenses granted in this Section 2 are the only rights granted under
|
|
||||||
this License. No additional rights or licenses will be implied from the
|
|
||||||
distribution or licensing of Covered Software under this License.
|
|
||||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
|
||||||
Contributor:
|
|
||||||
|
|
||||||
a. for any code that a Contributor has removed from Covered Software; or
|
|
||||||
|
|
||||||
b. for infringements caused by: (i) Your and any other third party's
|
|
||||||
modifications of Covered Software, or (ii) the combination of its
|
|
||||||
Contributions with other software (except as part of its Contributor
|
|
||||||
Version); or
|
|
||||||
|
|
||||||
c. under Patent Claims infringed by Covered Software in the absence of
|
|
||||||
its Contributions.
|
|
||||||
|
|
||||||
This License does not grant any rights in the trademarks, service marks,
|
|
||||||
or logos of any Contributor (except as may be necessary to comply with
|
|
||||||
the notice requirements in Section 3.4).
|
|
||||||
|
|
||||||
2.4. Subsequent Licenses
|
|
||||||
|
|
||||||
No Contributor makes additional grants as a result of Your choice to
|
|
||||||
distribute the Covered Software under a subsequent version of this
|
|
||||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
|
||||||
permitted under the terms of Section 3.3).
|
|
||||||
|
|
||||||
2.5. Representation
|
|
||||||
|
|
||||||
Each Contributor represents that the Contributor believes its
|
|
||||||
Contributions are its original creation(s) or it has sufficient rights to
|
|
||||||
grant the rights to its Contributions conveyed by this License.
|
|
||||||
|
|
||||||
2.6. Fair Use
|
|
||||||
|
|
||||||
This License is not intended to limit any rights You have under
|
|
||||||
applicable copyright doctrines of fair use, fair dealing, or other
|
|
||||||
equivalents.
|
|
||||||
|
|
||||||
2.7. Conditions
|
|
||||||
|
|
||||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
|
||||||
Section 2.1.
|
|
||||||
|
|
||||||
|
|
||||||
3. Responsibilities
|
|
||||||
|
|
||||||
3.1. Distribution of Source Form
|
|
||||||
|
|
||||||
All distribution of Covered Software in Source Code Form, including any
|
|
||||||
Modifications that You create or to which You contribute, must be under
|
|
||||||
the terms of this License. You must inform recipients that the Source
|
|
||||||
Code Form of the Covered Software is governed by the terms of this
|
|
||||||
License, and how they can obtain a copy of this License. You may not
|
|
||||||
attempt to alter or restrict the recipients' rights in the Source Code
|
|
||||||
Form.
|
|
||||||
|
|
||||||
3.2. Distribution of Executable Form
|
|
||||||
|
|
||||||
If You distribute Covered Software in Executable Form then:
|
|
||||||
|
|
||||||
a. such Covered Software must also be made available in Source Code Form,
|
|
||||||
as described in Section 3.1, and You must inform recipients of the
|
|
||||||
Executable Form how they can obtain a copy of such Source Code Form by
|
|
||||||
reasonable means in a timely manner, at a charge no more than the cost
|
|
||||||
of distribution to the recipient; and
|
|
||||||
|
|
||||||
b. You may distribute such Executable Form under the terms of this
|
|
||||||
License, or sublicense it under different terms, provided that the
|
|
||||||
license for the Executable Form does not attempt to limit or alter the
|
|
||||||
recipients' rights in the Source Code Form under this License.
|
|
||||||
|
|
||||||
3.3. Distribution of a Larger Work
|
|
||||||
|
|
||||||
You may create and distribute a Larger Work under terms of Your choice,
|
|
||||||
provided that You also comply with the requirements of this License for
|
|
||||||
the Covered Software. If the Larger Work is a combination of Covered
|
|
||||||
Software with a work governed by one or more Secondary Licenses, and the
|
|
||||||
Covered Software is not Incompatible With Secondary Licenses, this
|
|
||||||
License permits You to additionally distribute such Covered Software
|
|
||||||
under the terms of such Secondary License(s), so that the recipient of
|
|
||||||
the Larger Work may, at their option, further distribute the Covered
|
|
||||||
Software under the terms of either this License or such Secondary
|
|
||||||
License(s).
|
|
||||||
|
|
||||||
3.4. Notices
|
|
||||||
|
|
||||||
You may not remove or alter the substance of any license notices
|
|
||||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
|
||||||
limitations of liability) contained within the Source Code Form of the
|
|
||||||
Covered Software, except that You may alter any license notices to the
|
|
||||||
extent required to remedy known factual inaccuracies.
|
|
||||||
|
|
||||||
3.5. Application of Additional Terms
|
|
||||||
|
|
||||||
You may choose to offer, and to charge a fee for, warranty, support,
|
|
||||||
indemnity or liability obligations to one or more recipients of Covered
|
|
||||||
Software. However, You may do so only on Your own behalf, and not on
|
|
||||||
behalf of any Contributor. You must make it absolutely clear that any
|
|
||||||
such warranty, support, indemnity, or liability obligation is offered by
|
|
||||||
You alone, and You hereby agree to indemnify every Contributor for any
|
|
||||||
liability incurred by such Contributor as a result of warranty, support,
|
|
||||||
indemnity or liability terms You offer. You may include additional
|
|
||||||
disclaimers of warranty and limitations of liability specific to any
|
|
||||||
jurisdiction.
|
|
||||||
|
|
||||||
4. Inability to Comply Due to Statute or Regulation
|
|
||||||
|
|
||||||
If it is impossible for You to comply with any of the terms of this License
|
|
||||||
with respect to some or all of the Covered Software due to statute,
|
|
||||||
judicial order, or regulation then You must: (a) comply with the terms of
|
|
||||||
this License to the maximum extent possible; and (b) describe the
|
|
||||||
limitations and the code they affect. Such description must be placed in a
|
|
||||||
text file included with all distributions of the Covered Software under
|
|
||||||
this License. Except to the extent prohibited by statute or regulation,
|
|
||||||
such description must be sufficiently detailed for a recipient of ordinary
|
|
||||||
skill to be able to understand it.
|
|
||||||
|
|
||||||
5. Termination
|
|
||||||
|
|
||||||
5.1. The rights granted under this License will terminate automatically if You
|
|
||||||
fail to comply with any of its terms. However, if You become compliant,
|
|
||||||
then the rights granted under this License from a particular Contributor
|
|
||||||
are reinstated (a) provisionally, unless and until such Contributor
|
|
||||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
|
||||||
basis, if such Contributor fails to notify You of the non-compliance by
|
|
||||||
some reasonable means prior to 60 days after You have come back into
|
|
||||||
compliance. Moreover, Your grants from a particular Contributor are
|
|
||||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
|
||||||
non-compliance by some reasonable means, this is the first time You have
|
|
||||||
received notice of non-compliance with this License from such
|
|
||||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
|
||||||
of the notice.
|
|
||||||
|
|
||||||
5.2. If You initiate litigation against any entity by asserting a patent
|
|
||||||
infringement claim (excluding declaratory judgment actions,
|
|
||||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
|
||||||
directly or indirectly infringes any patent, then the rights granted to
|
|
||||||
You by any and all Contributors for the Covered Software under Section
|
|
||||||
2.1 of this License shall terminate.
|
|
||||||
|
|
||||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
|
||||||
license agreements (excluding distributors and resellers) which have been
|
|
||||||
validly granted by You or Your distributors under this License prior to
|
|
||||||
termination shall survive termination.
|
|
||||||
|
|
||||||
6. Disclaimer of Warranty
|
|
||||||
|
|
||||||
Covered Software is provided under this License on an "as is" basis,
|
|
||||||
without warranty of any kind, either expressed, implied, or statutory,
|
|
||||||
including, without limitation, warranties that the Covered Software is free
|
|
||||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
|
||||||
The entire risk as to the quality and performance of the Covered Software
|
|
||||||
is with You. Should any Covered Software prove defective in any respect,
|
|
||||||
You (not any Contributor) assume the cost of any necessary servicing,
|
|
||||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
|
||||||
part of this License. No use of any Covered Software is authorized under
|
|
||||||
this License except under this disclaimer.
|
|
||||||
|
|
||||||
7. Limitation of Liability
|
|
||||||
|
|
||||||
Under no circumstances and under no legal theory, whether tort (including
|
|
||||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
|
||||||
distributes Covered Software as permitted above, be liable to You for any
|
|
||||||
direct, indirect, special, incidental, or consequential damages of any
|
|
||||||
character including, without limitation, damages for lost profits, loss of
|
|
||||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses, even if such party shall have been
|
|
||||||
informed of the possibility of such damages. This limitation of liability
|
|
||||||
shall not apply to liability for death or personal injury resulting from
|
|
||||||
such party's negligence to the extent applicable law prohibits such
|
|
||||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
|
||||||
incidental or consequential damages, so this exclusion and limitation may
|
|
||||||
not apply to You.
|
|
||||||
|
|
||||||
8. Litigation
|
|
||||||
|
|
||||||
Any litigation relating to this License may be brought only in the courts
|
|
||||||
of a jurisdiction where the defendant maintains its principal place of
|
|
||||||
business and such litigation shall be governed by laws of that
|
|
||||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
|
||||||
in this Section shall prevent a party's ability to bring cross-claims or
|
|
||||||
counter-claims.
|
|
||||||
|
|
||||||
9. Miscellaneous
|
|
||||||
|
|
||||||
This License represents the complete agreement concerning the subject
|
|
||||||
matter hereof. If any provision of this License is held to be
|
|
||||||
unenforceable, such provision shall be reformed only to the extent
|
|
||||||
necessary to make it enforceable. Any law or regulation which provides that
|
|
||||||
the language of a contract shall be construed against the drafter shall not
|
|
||||||
be used to construe this License against a Contributor.
|
|
||||||
|
|
||||||
|
|
||||||
10. Versions of the License
|
|
||||||
|
|
||||||
10.1. New Versions
|
|
||||||
|
|
||||||
Mozilla Foundation is the license steward. Except as provided in Section
|
|
||||||
10.3, no one other than the license steward has the right to modify or
|
|
||||||
publish new versions of this License. Each version will be given a
|
|
||||||
distinguishing version number.
|
|
||||||
|
|
||||||
10.2. Effect of New Versions
|
|
||||||
|
|
||||||
You may distribute the Covered Software under the terms of the version
|
|
||||||
of the License under which You originally received the Covered Software,
|
|
||||||
or under the terms of any subsequent version published by the license
|
|
||||||
steward.
|
|
||||||
|
|
||||||
10.3. Modified Versions
|
|
||||||
|
|
||||||
If you create software not governed by this License, and you want to
|
|
||||||
create a new license for such software, you may create and use a
|
|
||||||
modified version of this License if you rename the license and remove
|
|
||||||
any references to the name of the license steward (except to note that
|
|
||||||
such modified license differs from this License).
|
|
||||||
|
|
||||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
|
||||||
Licenses If You choose to distribute Source Code Form that is
|
|
||||||
Incompatible With Secondary Licenses under the terms of this version of
|
|
||||||
the License, the notice described in Exhibit B of this License must be
|
|
||||||
attached.
|
|
||||||
|
|
||||||
Exhibit A - Source Code Form License Notice
|
|
||||||
|
|
||||||
This Source Code Form is subject to the
|
|
||||||
terms of the Mozilla Public License, v.
|
|
||||||
2.0. If a copy of the MPL was not
|
|
||||||
distributed with this file, You can
|
|
||||||
obtain one at
|
|
||||||
http://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
If it is not possible or desirable to put the notice in a particular file,
|
|
||||||
then You may include the notice in a location (such as a LICENSE file in a
|
|
||||||
relevant directory) where a recipient would be likely to look for such a
|
|
||||||
notice.
|
|
||||||
|
|
||||||
You may add additional accurate notices of copyright ownership.
|
|
||||||
|
|
||||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
|
||||||
|
|
||||||
This Source Code Form is "Incompatible
|
|
||||||
With Secondary Licenses", as defined by
|
|
||||||
the Mozilla Public License, v. 2.0.
|
|
@ -1,25 +0,0 @@
|
|||||||
golang-lru
|
|
||||||
==========
|
|
||||||
|
|
||||||
This provides the `lru` package which implements a fixed-size
|
|
||||||
thread safe LRU cache. It is based on the cache in Groupcache.
|
|
||||||
|
|
||||||
Documentation
|
|
||||||
=============
|
|
||||||
|
|
||||||
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
|
||||||
|
|
||||||
Example
|
|
||||||
=======
|
|
||||||
|
|
||||||
Using the LRU is very simple:
|
|
||||||
|
|
||||||
```go
|
|
||||||
l, _ := New(128)
|
|
||||||
for i := 0; i < 256; i++ {
|
|
||||||
l.Add(i, nil)
|
|
||||||
}
|
|
||||||
if l.Len() != 128 {
|
|
||||||
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
|
||||||
}
|
|
||||||
```
|
|
@ -1,257 +0,0 @@
|
|||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
|
||||||
// ARC is an enhancement over the standard LRU cache in that tracks both
|
|
||||||
// frequency and recency of use. This avoids a burst in access to new
|
|
||||||
// entries from evicting the frequently used older entries. It adds some
|
|
||||||
// additional tracking overhead to a standard LRU cache, computationally
|
|
||||||
// it is roughly 2x the cost, and the extra memory overhead is linear
|
|
||||||
// with the size of the cache. ARC has been patented by IBM, but is
|
|
||||||
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
|
||||||
type ARCCache struct {
|
|
||||||
size int // Size is the total capacity of the cache
|
|
||||||
p int // P is the dynamic preference towards T1 or T2
|
|
||||||
|
|
||||||
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
|
|
||||||
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
|
|
||||||
|
|
||||||
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
|
|
||||||
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
|
|
||||||
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewARC creates an ARC of the given size
|
|
||||||
func NewARC(size int) (*ARCCache, error) {
|
|
||||||
// Create the sub LRUs
|
|
||||||
b1, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
b2, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
t1, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
t2, err := simplelru.NewLRU(size, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the ARC
|
|
||||||
c := &ARCCache{
|
|
||||||
size: size,
|
|
||||||
p: 0,
|
|
||||||
t1: t1,
|
|
||||||
b1: b1,
|
|
||||||
t2: t2,
|
|
||||||
b2: b2,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// If the value is contained in T1 (recent), then
|
|
||||||
// promote it to T2 (frequent)
|
|
||||||
if val, ok := c.t1.Peek(key); ok {
|
|
||||||
c.t1.Remove(key)
|
|
||||||
c.t2.Add(key, val)
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is contained in T2 (frequent)
|
|
||||||
if val, ok := c.t2.Get(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// No hit
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache.
|
|
||||||
func (c *ARCCache) Add(key, value interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
// Check if the value is contained in T1 (recent), and potentially
|
|
||||||
// promote it to frequent T2
|
|
||||||
if c.t1.Contains(key) {
|
|
||||||
c.t1.Remove(key)
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the value is already in T2 (frequent) and update it
|
|
||||||
if c.t2.Contains(key) {
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this value was recently evicted as part of the
|
|
||||||
// recently used list
|
|
||||||
if c.b1.Contains(key) {
|
|
||||||
// T1 set is too small, increase P appropriately
|
|
||||||
delta := 1
|
|
||||||
b1Len := c.b1.Len()
|
|
||||||
b2Len := c.b2.Len()
|
|
||||||
if b2Len > b1Len {
|
|
||||||
delta = b2Len / b1Len
|
|
||||||
}
|
|
||||||
if c.p+delta >= c.size {
|
|
||||||
c.p = c.size
|
|
||||||
} else {
|
|
||||||
c.p += delta
|
|
||||||
}
|
|
||||||
|
|
||||||
// Potentially need to make room in the cache
|
|
||||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
|
||||||
c.replace(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from B1
|
|
||||||
c.b1.Remove(key)
|
|
||||||
|
|
||||||
// Add the key to the frequently used list
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this value was recently evicted as part of the
|
|
||||||
// frequently used list
|
|
||||||
if c.b2.Contains(key) {
|
|
||||||
// T2 set is too small, decrease P appropriately
|
|
||||||
delta := 1
|
|
||||||
b1Len := c.b1.Len()
|
|
||||||
b2Len := c.b2.Len()
|
|
||||||
if b1Len > b2Len {
|
|
||||||
delta = b1Len / b2Len
|
|
||||||
}
|
|
||||||
if delta >= c.p {
|
|
||||||
c.p = 0
|
|
||||||
} else {
|
|
||||||
c.p -= delta
|
|
||||||
}
|
|
||||||
|
|
||||||
// Potentially need to make room in the cache
|
|
||||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
|
||||||
c.replace(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from B2
|
|
||||||
c.b2.Remove(key)
|
|
||||||
|
|
||||||
// Add the key to the frequently used list
|
|
||||||
c.t2.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Potentially need to make room in the cache
|
|
||||||
if c.t1.Len()+c.t2.Len() >= c.size {
|
|
||||||
c.replace(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep the size of the ghost buffers trim
|
|
||||||
if c.b1.Len() > c.size-c.p {
|
|
||||||
c.b1.RemoveOldest()
|
|
||||||
}
|
|
||||||
if c.b2.Len() > c.p {
|
|
||||||
c.b2.RemoveOldest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add to the recently seen list
|
|
||||||
c.t1.Add(key, value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// replace is used to adaptively evict from either T1 or T2
|
|
||||||
// based on the current learned value of P
|
|
||||||
func (c *ARCCache) replace(b2ContainsKey bool) {
|
|
||||||
t1Len := c.t1.Len()
|
|
||||||
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
|
||||||
k, _, ok := c.t1.RemoveOldest()
|
|
||||||
if ok {
|
|
||||||
c.b1.Add(k, nil)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
k, _, ok := c.t2.RemoveOldest()
|
|
||||||
if ok {
|
|
||||||
c.b2.Add(k, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of cached entries
|
|
||||||
func (c *ARCCache) Len() int {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.t1.Len() + c.t2.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns all the cached keys
|
|
||||||
func (c *ARCCache) Keys() []interface{} {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
k1 := c.t1.Keys()
|
|
||||||
k2 := c.t2.Keys()
|
|
||||||
return append(k1, k2...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove is used to purge a key from the cache
|
|
||||||
func (c *ARCCache) Remove(key interface{}) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
if c.t1.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.t2.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.b1.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if c.b2.Remove(key) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to clear the cache
|
|
||||||
func (c *ARCCache) Purge() {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
c.t1.Purge()
|
|
||||||
c.t2.Purge()
|
|
||||||
c.b1.Purge()
|
|
||||||
c.b2.Purge()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains is used to check if the cache contains a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *ARCCache) Contains(key interface{}) bool {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
return c.t1.Contains(key) || c.t2.Contains(key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek is used to inspect the cache value of a key
|
|
||||||
// without updating recency or frequency.
|
|
||||||
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.RLock()
|
|
||||||
defer c.lock.RUnlock()
|
|
||||||
if val, ok := c.t1.Peek(key); ok {
|
|
||||||
return val, ok
|
|
||||||
}
|
|
||||||
return c.t2.Peek(key)
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
// Package lru provides three different LRU caches of varying sophistication.
|
|
||||||
//
|
|
||||||
// Cache is a simple LRU cache. It is based on the
|
|
||||||
// LRU implementation in groupcache:
|
|
||||||
// https://github.com/golang/groupcache/tree/master/lru
|
|
||||||
//
|
|
||||||
// TwoQueueCache tracks frequently used and recently used entries separately.
|
|
||||||
// This avoids a burst of accesses from taking out frequently used entries,
|
|
||||||
// at the cost of about 2x computational overhead and some extra bookkeeping.
|
|
||||||
//
|
|
||||||
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
|
|
||||||
// well as recent usage in both the frequent and recent caches. Its
|
|
||||||
// computational overhead is comparable to TwoQueueCache, but the memory
|
|
||||||
// overhead is linear with the size of the cache.
|
|
||||||
//
|
|
||||||
// ARC has been patented by IBM, so do not use it if that is problematic for
|
|
||||||
// your program.
|
|
||||||
//
|
|
||||||
// All caches in this package take locks while operating, and are therefore
|
|
||||||
// thread-safe for consumers.
|
|
||||||
package lru
|
|
@ -1,150 +0,0 @@
|
|||||||
package lru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/hashicorp/golang-lru/simplelru"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Cache is a thread-safe fixed size LRU cache.
|
|
||||||
type Cache struct {
|
|
||||||
lru simplelru.LRUCache
|
|
||||||
lock sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates an LRU of the given size.
|
|
||||||
func New(size int) (*Cache, error) {
|
|
||||||
return NewWithEvict(size, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWithEvict constructs a fixed size cache with the given eviction
|
|
||||||
// callback.
|
|
||||||
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
|
||||||
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c := &Cache{
|
|
||||||
lru: lru,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to completely clear the cache.
|
|
||||||
func (c *Cache) Purge() {
|
|
||||||
c.lock.Lock()
|
|
||||||
c.lru.Purge()
|
|
||||||
c.lock.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
|
||||||
func (c *Cache) Add(key, value interface{}) (evicted bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
evicted = c.lru.Add(key, value)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
value, ok = c.lru.Get(key)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return value, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains checks if a key is in the cache, without updating the
|
|
||||||
// recent-ness or deleting it for being stale.
|
|
||||||
func (c *Cache) Contains(key interface{}) bool {
|
|
||||||
c.lock.RLock()
|
|
||||||
containKey := c.lru.Contains(key)
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return containKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek returns the key value (or undefined if not found) without updating
|
|
||||||
// the "recently used"-ness of the key.
|
|
||||||
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
c.lock.RLock()
|
|
||||||
value, ok = c.lru.Peek(key)
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return value, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainsOrAdd checks if a key is in the cache without updating the
|
|
||||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
|
||||||
// Returns whether found and whether an eviction occurred.
|
|
||||||
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
if c.lru.Contains(key) {
|
|
||||||
return true, false
|
|
||||||
}
|
|
||||||
evicted = c.lru.Add(key, value)
|
|
||||||
return false, evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// PeekOrAdd checks if a key is in the cache without updating the
|
|
||||||
// recent-ness or deleting it for being stale, and if not, adds the value.
|
|
||||||
// Returns whether found and whether an eviction occurred.
|
|
||||||
func (c *Cache) PeekOrAdd(key, value interface{}) (previous interface{}, ok, evicted bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
|
|
||||||
previous, ok = c.lru.Peek(key)
|
|
||||||
if ok {
|
|
||||||
return previous, true, false
|
|
||||||
}
|
|
||||||
|
|
||||||
evicted = c.lru.Add(key, value)
|
|
||||||
return nil, false, evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the provided key from the cache.
|
|
||||||
func (c *Cache) Remove(key interface{}) (present bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
present = c.lru.Remove(key)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize changes the cache size.
|
|
||||||
func (c *Cache) Resize(size int) (evicted int) {
|
|
||||||
c.lock.Lock()
|
|
||||||
evicted = c.lru.Resize(size)
|
|
||||||
c.lock.Unlock()
|
|
||||||
return evicted
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOldest removes the oldest item from the cache.
|
|
||||||
func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
key, value, ok = c.lru.RemoveOldest()
|
|
||||||
c.lock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOldest returns the oldest entry
|
|
||||||
func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
key, value, ok = c.lru.GetOldest()
|
|
||||||
c.lock.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
|
||||||
func (c *Cache) Keys() []interface{} {
|
|
||||||
c.lock.RLock()
|
|
||||||
keys := c.lru.Keys()
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items in the cache.
|
|
||||||
func (c *Cache) Len() int {
|
|
||||||
c.lock.RLock()
|
|
||||||
length := c.lru.Len()
|
|
||||||
c.lock.RUnlock()
|
|
||||||
return length
|
|
||||||
}
|
|
@ -1,177 +0,0 @@
|
|||||||
package simplelru
|
|
||||||
|
|
||||||
import (
|
|
||||||
"container/list"
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EvictCallback is used to get a callback when a cache entry is evicted
|
|
||||||
type EvictCallback func(key interface{}, value interface{})
|
|
||||||
|
|
||||||
// LRU implements a non-thread safe fixed size LRU cache
|
|
||||||
type LRU struct {
|
|
||||||
size int
|
|
||||||
evictList *list.List
|
|
||||||
items map[interface{}]*list.Element
|
|
||||||
onEvict EvictCallback
|
|
||||||
}
|
|
||||||
|
|
||||||
// entry is used to hold a value in the evictList
|
|
||||||
type entry struct {
|
|
||||||
key interface{}
|
|
||||||
value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLRU constructs an LRU of the given size
|
|
||||||
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
|
||||||
if size <= 0 {
|
|
||||||
return nil, errors.New("Must provide a positive size")
|
|
||||||
}
|
|
||||||
c := &LRU{
|
|
||||||
size: size,
|
|
||||||
evictList: list.New(),
|
|
||||||
items: make(map[interface{}]*list.Element),
|
|
||||||
onEvict: onEvict,
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge is used to completely clear the cache.
|
|
||||||
func (c *LRU) Purge() {
|
|
||||||
for k, v := range c.items {
|
|
||||||
if c.onEvict != nil {
|
|
||||||
c.onEvict(k, v.Value.(*entry).value)
|
|
||||||
}
|
|
||||||
delete(c.items, k)
|
|
||||||
}
|
|
||||||
c.evictList.Init()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add adds a value to the cache. Returns true if an eviction occurred.
|
|
||||||
func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
|
||||||
// Check for existing item
|
|
||||||
if ent, ok := c.items[key]; ok {
|
|
||||||
c.evictList.MoveToFront(ent)
|
|
||||||
ent.Value.(*entry).value = value
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add new item
|
|
||||||
ent := &entry{key, value}
|
|
||||||
entry := c.evictList.PushFront(ent)
|
|
||||||
c.items[key] = entry
|
|
||||||
|
|
||||||
evict := c.evictList.Len() > c.size
|
|
||||||
// Verify size not exceeded
|
|
||||||
if evict {
|
|
||||||
c.removeOldest()
|
|
||||||
}
|
|
||||||
return evict
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get looks up a key's value from the cache.
|
|
||||||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
|
||||||
if ent, ok := c.items[key]; ok {
|
|
||||||
c.evictList.MoveToFront(ent)
|
|
||||||
if ent.Value.(*entry) == nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
return ent.Value.(*entry).value, true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains checks if a key is in the cache, without updating the recent-ness
|
|
||||||
// or deleting it for being stale.
|
|
||||||
func (c *LRU) Contains(key interface{}) (ok bool) {
|
|
||||||
_, ok = c.items[key]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peek returns the key value (or undefined if not found) without updating
|
|
||||||
// the "recently used"-ness of the key.
|
|
||||||
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
|
||||||
var ent *list.Element
|
|
||||||
if ent, ok = c.items[key]; ok {
|
|
||||||
return ent.Value.(*entry).value, true
|
|
||||||
}
|
|
||||||
return nil, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove removes the provided key from the cache, returning if the
|
|
||||||
// key was contained.
|
|
||||||
func (c *LRU) Remove(key interface{}) (present bool) {
|
|
||||||
if ent, ok := c.items[key]; ok {
|
|
||||||
c.removeElement(ent)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveOldest removes the oldest item from the cache.
|
|
||||||
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
ent := c.evictList.Back()
|
|
||||||
if ent != nil {
|
|
||||||
c.removeElement(ent)
|
|
||||||
kv := ent.Value.(*entry)
|
|
||||||
return kv.key, kv.value, true
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOldest returns the oldest entry
|
|
||||||
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
|
|
||||||
ent := c.evictList.Back()
|
|
||||||
if ent != nil {
|
|
||||||
kv := ent.Value.(*entry)
|
|
||||||
return kv.key, kv.value, true
|
|
||||||
}
|
|
||||||
return nil, nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
|
||||||
func (c *LRU) Keys() []interface{} {
|
|
||||||
keys := make([]interface{}, len(c.items))
|
|
||||||
i := 0
|
|
||||||
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
|
||||||
keys[i] = ent.Value.(*entry).key
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of items in the cache.
|
|
||||||
func (c *LRU) Len() int {
|
|
||||||
return c.evictList.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resize changes the cache size.
|
|
||||||
func (c *LRU) Resize(size int) (evicted int) {
|
|
||||||
diff := c.Len() - size
|
|
||||||
if diff < 0 {
|
|
||||||
diff = 0
|
|
||||||
}
|
|
||||||
for i := 0; i < diff; i++ {
|
|
||||||
c.removeOldest()
|
|
||||||
}
|
|
||||||
c.size = size
|
|
||||||
return diff
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeOldest removes the oldest item from the cache.
|
|
||||||
func (c *LRU) removeOldest() {
|
|
||||||
ent := c.evictList.Back()
|
|
||||||
if ent != nil {
|
|
||||||
c.removeElement(ent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeElement is used to remove a given list element from the cache
|
|
||||||
func (c *LRU) removeElement(e *list.Element) {
|
|
||||||
c.evictList.Remove(e)
|
|
||||||
kv := e.Value.(*entry)
|
|
||||||
delete(c.items, kv.key)
|
|
||||||
if c.onEvict != nil {
|
|
||||||
c.onEvict(kv.key, kv.value)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
package simplelru
|
|
||||||
|
|
||||||
// LRUCache is the interface for simple LRU cache.
|
|
||||||
type LRUCache interface {
|
|
||||||
// Adds a value to the cache, returns true if an eviction occurred and
|
|
||||||
// updates the "recently used"-ness of the key.
|
|
||||||
Add(key, value interface{}) bool
|
|
||||||
|
|
||||||
// Returns key's value from the cache and
|
|
||||||
// updates the "recently used"-ness of the key. #value, isFound
|
|
||||||
Get(key interface{}) (value interface{}, ok bool)
|
|
||||||
|
|
||||||
// Checks if a key exists in cache without updating the recent-ness.
|
|
||||||
Contains(key interface{}) (ok bool)
|
|
||||||
|
|
||||||
// Returns key's value without updating the "recently used"-ness of the key.
|
|
||||||
Peek(key interface{}) (value interface{}, ok bool)
|
|
||||||
|
|
||||||
// Removes a key from the cache.
|
|
||||||
Remove(key interface{}) bool
|
|
||||||
|
|
||||||
// Removes the oldest entry from cache.
|
|
||||||
RemoveOldest() (interface{}, interface{}, bool)
|
|
||||||
|
|
||||||
// Returns the oldest entry from the cache. #key, value, isFound
|
|
||||||
GetOldest() (interface{}, interface{}, bool)
|
|
||||||
|
|
||||||
// Returns a slice of the keys in the cache, from oldest to newest.
|
|
||||||
Keys() []interface{}
|
|
||||||
|
|
||||||
// Returns the number of items in the cache.
|
|
||||||
Len() int
|
|
||||||
|
|
||||||
// Clears all cache entries.
|
|
||||||
Purge()
|
|
||||||
|
|
||||||
// Resizes cache, returning number evicted
|
|
||||||
Resize(int) int
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
# Created by http://www.gitignore.io
|
|
||||||
|
|
||||||
### Go ###
|
|
||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
|
|
@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) [2014] [shiena]
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
@ -1,102 +0,0 @@
|
|||||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/shiena/ansicolor)](https://pkg.go.dev/github.com/shiena/ansicolor)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/shiena/ansicolor)](https://goreportcard.com/report/github.com/shiena/ansicolor)
|
|
||||||
|
|
||||||
# ansicolor
|
|
||||||
|
|
||||||
Ansicolor library provides color console in Windows as ANSICON for Golang.
|
|
||||||
|
|
||||||
## Features
|
|
||||||
|
|
||||||
|Escape sequence|Text attributes|
|
|
||||||
|---------------|----|
|
|
||||||
|\x1b[0m|All attributes off(color at startup)|
|
|
||||||
|\x1b[1m|Bold on(enable foreground intensity)|
|
|
||||||
|\x1b[4m|Underline on|
|
|
||||||
|\x1b[5m|Blink on(enable background intensity)|
|
|
||||||
|\x1b[21m|Bold off(disable foreground intensity)|
|
|
||||||
|\x1b[24m|Underline off|
|
|
||||||
|\x1b[25m|Blink off(disable background intensity)|
|
|
||||||
|
|
||||||
|Escape sequence|Foreground colors|
|
|
||||||
|---------------|----|
|
|
||||||
|\x1b[30m|Black|
|
|
||||||
|\x1b[31m|Red|
|
|
||||||
|\x1b[32m|Green|
|
|
||||||
|\x1b[33m|Yellow|
|
|
||||||
|\x1b[34m|Blue|
|
|
||||||
|\x1b[35m|Magenta|
|
|
||||||
|\x1b[36m|Cyan|
|
|
||||||
|\x1b[37m|White|
|
|
||||||
|\x1b[39m|Default(foreground color at startup)|
|
|
||||||
|\x1b[90m|Light Gray|
|
|
||||||
|\x1b[91m|Light Red|
|
|
||||||
|\x1b[92m|Light Green|
|
|
||||||
|\x1b[93m|Light Yellow|
|
|
||||||
|\x1b[94m|Light Blue|
|
|
||||||
|\x1b[95m|Light Magenta|
|
|
||||||
|\x1b[96m|Light Cyan|
|
|
||||||
|\x1b[97m|Light White|
|
|
||||||
|
|
||||||
|Escape sequence|Background colors|
|
|
||||||
|---------------|----|
|
|
||||||
|\x1b[40m|Black|
|
|
||||||
|\x1b[41m|Red|
|
|
||||||
|\x1b[42m|Green|
|
|
||||||
|\x1b[43m|Yellow|
|
|
||||||
|\x1b[44m|Blue|
|
|
||||||
|\x1b[45m|Magenta|
|
|
||||||
|\x1b[46m|Cyan|
|
|
||||||
|\x1b[47m|White|
|
|
||||||
|\x1b[49m|Default(background color at startup)|
|
|
||||||
|\x1b[100m|Light Gray|
|
|
||||||
|\x1b[101m|Light Red|
|
|
||||||
|\x1b[102m|Light Green|
|
|
||||||
|\x1b[103m|Light Yellow|
|
|
||||||
|\x1b[104m|Light Blue|
|
|
||||||
|\x1b[105m|Light Magenta|
|
|
||||||
|\x1b[106m|Light Cyan|
|
|
||||||
|\x1b[107m|Light White|
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/shiena/ansicolor"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
w := ansicolor.NewAnsiColorWriter(os.Stdout)
|
|
||||||
text := "%sforeground %sbold%s %sbackground%s\n"
|
|
||||||
fmt.Fprintf(w, text, "\x1b[31m", "\x1b[1m", "\x1b[21m", "\x1b[41;32m", "\x1b[0m")
|
|
||||||
fmt.Fprintf(w, text, "\x1b[32m", "\x1b[1m", "\x1b[21m", "\x1b[42;31m", "\x1b[0m")
|
|
||||||
fmt.Fprintf(w, text, "\x1b[33m", "\x1b[1m", "\x1b[21m", "\x1b[43;34m", "\x1b[0m")
|
|
||||||
fmt.Fprintf(w, text, "\x1b[34m", "\x1b[1m", "\x1b[21m", "\x1b[44;33m", "\x1b[0m")
|
|
||||||
fmt.Fprintf(w, text, "\x1b[35m", "\x1b[1m", "\x1b[21m", "\x1b[45;36m", "\x1b[0m")
|
|
||||||
fmt.Fprintf(w, text, "\x1b[36m", "\x1b[1m", "\x1b[21m", "\x1b[46;35m", "\x1b[0m")
|
|
||||||
fmt.Fprintf(w, text, "\x1b[37m", "\x1b[1m", "\x1b[21m", "\x1b[47;30m", "\x1b[0m")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
![screenshot](https://gist.githubusercontent.com/shiena/a1bada24b525314a7d5e/raw/c763aa7cda6e4fefaccf831e2617adc40b6151c7/main.png)
|
|
||||||
|
|
||||||
## See also:
|
|
||||||
|
|
||||||
- https://github.com/daviddengcn/go-colortext
|
|
||||||
- https://github.com/adoxa/ansicon
|
|
||||||
- https://github.com/aslakhellesoy/wac
|
|
||||||
- https://github.com/wsxiaoys/terminal
|
|
||||||
- https://github.com/mattn/go-colorable
|
|
||||||
|
|
||||||
## Contributing
|
|
||||||
|
|
||||||
1. Fork it
|
|
||||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
|
||||||
3. Commit your changes (`git commit -am 'Add some feature'`)
|
|
||||||
4. Push to the branch (`git push origin my-new-feature`)
|
|
||||||
5. Create new Pull Request
|
|
||||||
|
|
@ -1,42 +0,0 @@
|
|||||||
// Copyright 2014 shiena Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package ansicolor provides color console in Windows as ANSICON.
|
|
||||||
package ansicolor
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
type outputMode int
|
|
||||||
|
|
||||||
// DiscardNonColorEscSeq supports the divided color escape sequence.
|
|
||||||
// But non-color escape sequence is not output.
|
|
||||||
// Please use the OutputNonColorEscSeq If you want to output a non-color
|
|
||||||
// escape sequences such as ncurses. However, it does not support the divided
|
|
||||||
// color escape sequence.
|
|
||||||
const (
|
|
||||||
_ outputMode = iota
|
|
||||||
DiscardNonColorEscSeq
|
|
||||||
OutputNonColorEscSeq
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewAnsiColorWriter creates and initializes a new ansiColorWriter
|
|
||||||
// using io.Writer w as its initial contents.
|
|
||||||
// In the console of Windows, which change the foreground and background
|
|
||||||
// colors of the text by the escape sequence.
|
|
||||||
// In the console of other systems, which writes to w all text.
|
|
||||||
func NewAnsiColorWriter(w io.Writer) io.Writer {
|
|
||||||
return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewModeAnsiColorWriter create and initializes a new ansiColorWriter
|
|
||||||
// by specifying the outputMode.
|
|
||||||
func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer {
|
|
||||||
if _, ok := w.(*ansiColorWriter); !ok {
|
|
||||||
return &ansiColorWriter{
|
|
||||||
w: w,
|
|
||||||
mode: mode,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return w
|
|
||||||
}
|
|
@ -1,18 +0,0 @@
|
|||||||
// Copyright 2014 shiena Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package ansicolor
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
type ansiColorWriter struct {
|
|
||||||
w io.Writer
|
|
||||||
mode outputMode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
|
|
||||||
return cw.w.Write(p)
|
|
||||||
}
|
|
@ -1,417 +0,0 @@
|
|||||||
// Copyright 2014 shiena Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a MIT-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package ansicolor
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"io"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
type csiState int
|
|
||||||
|
|
||||||
const (
|
|
||||||
outsideCsiCode csiState = iota
|
|
||||||
firstCsiCode
|
|
||||||
secondCsiCode
|
|
||||||
)
|
|
||||||
|
|
||||||
type parseResult int
|
|
||||||
|
|
||||||
const (
|
|
||||||
noConsole parseResult = iota
|
|
||||||
changedColor
|
|
||||||
unknown
|
|
||||||
)
|
|
||||||
|
|
||||||
type ansiColorWriter struct {
|
|
||||||
w io.Writer
|
|
||||||
mode outputMode
|
|
||||||
state csiState
|
|
||||||
paramStartBuf bytes.Buffer
|
|
||||||
paramBuf bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
firstCsiChar byte = '\x1b'
|
|
||||||
secondeCsiChar byte = '['
|
|
||||||
separatorChar byte = ';'
|
|
||||||
sgrCode byte = 'm'
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
foregroundBlue = uint16(0x0001)
|
|
||||||
foregroundGreen = uint16(0x0002)
|
|
||||||
foregroundRed = uint16(0x0004)
|
|
||||||
foregroundIntensity = uint16(0x0008)
|
|
||||||
backgroundBlue = uint16(0x0010)
|
|
||||||
backgroundGreen = uint16(0x0020)
|
|
||||||
backgroundRed = uint16(0x0040)
|
|
||||||
backgroundIntensity = uint16(0x0080)
|
|
||||||
underscore = uint16(0x8000)
|
|
||||||
|
|
||||||
foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity
|
|
||||||
backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
ansiReset = "0"
|
|
||||||
ansiIntensityOn = "1"
|
|
||||||
ansiIntensityOff = "21"
|
|
||||||
ansiUnderlineOn = "4"
|
|
||||||
ansiUnderlineOff = "24"
|
|
||||||
ansiBlinkOn = "5"
|
|
||||||
ansiBlinkOff = "25"
|
|
||||||
|
|
||||||
ansiForegroundBlack = "30"
|
|
||||||
ansiForegroundRed = "31"
|
|
||||||
ansiForegroundGreen = "32"
|
|
||||||
ansiForegroundYellow = "33"
|
|
||||||
ansiForegroundBlue = "34"
|
|
||||||
ansiForegroundMagenta = "35"
|
|
||||||
ansiForegroundCyan = "36"
|
|
||||||
ansiForegroundWhite = "37"
|
|
||||||
ansiForegroundDefault = "39"
|
|
||||||
|
|
||||||
ansiBackgroundBlack = "40"
|
|
||||||
ansiBackgroundRed = "41"
|
|
||||||
ansiBackgroundGreen = "42"
|
|
||||||
ansiBackgroundYellow = "43"
|
|
||||||
ansiBackgroundBlue = "44"
|
|
||||||
ansiBackgroundMagenta = "45"
|
|
||||||
ansiBackgroundCyan = "46"
|
|
||||||
ansiBackgroundWhite = "47"
|
|
||||||
ansiBackgroundDefault = "49"
|
|
||||||
|
|
||||||
ansiLightForegroundGray = "90"
|
|
||||||
ansiLightForegroundRed = "91"
|
|
||||||
ansiLightForegroundGreen = "92"
|
|
||||||
ansiLightForegroundYellow = "93"
|
|
||||||
ansiLightForegroundBlue = "94"
|
|
||||||
ansiLightForegroundMagenta = "95"
|
|
||||||
ansiLightForegroundCyan = "96"
|
|
||||||
ansiLightForegroundWhite = "97"
|
|
||||||
|
|
||||||
ansiLightBackgroundGray = "100"
|
|
||||||
ansiLightBackgroundRed = "101"
|
|
||||||
ansiLightBackgroundGreen = "102"
|
|
||||||
ansiLightBackgroundYellow = "103"
|
|
||||||
ansiLightBackgroundBlue = "104"
|
|
||||||
ansiLightBackgroundMagenta = "105"
|
|
||||||
ansiLightBackgroundCyan = "106"
|
|
||||||
ansiLightBackgroundWhite = "107"
|
|
||||||
)
|
|
||||||
|
|
||||||
type drawType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
foreground drawType = iota
|
|
||||||
background
|
|
||||||
)
|
|
||||||
|
|
||||||
type winColor struct {
|
|
||||||
code uint16
|
|
||||||
drawType drawType
|
|
||||||
}
|
|
||||||
|
|
||||||
var colorMap = map[string]winColor{
|
|
||||||
ansiForegroundBlack: {0, foreground},
|
|
||||||
ansiForegroundRed: {foregroundRed, foreground},
|
|
||||||
ansiForegroundGreen: {foregroundGreen, foreground},
|
|
||||||
ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground},
|
|
||||||
ansiForegroundBlue: {foregroundBlue, foreground},
|
|
||||||
ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground},
|
|
||||||
ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground},
|
|
||||||
ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
|
|
||||||
ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
|
|
||||||
|
|
||||||
ansiBackgroundBlack: {0, background},
|
|
||||||
ansiBackgroundRed: {backgroundRed, background},
|
|
||||||
ansiBackgroundGreen: {backgroundGreen, background},
|
|
||||||
ansiBackgroundYellow: {backgroundRed | backgroundGreen, background},
|
|
||||||
ansiBackgroundBlue: {backgroundBlue, background},
|
|
||||||
ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background},
|
|
||||||
ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background},
|
|
||||||
ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background},
|
|
||||||
ansiBackgroundDefault: {0, background},
|
|
||||||
|
|
||||||
ansiLightForegroundGray: {foregroundIntensity, foreground},
|
|
||||||
ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground},
|
|
||||||
ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground},
|
|
||||||
ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground},
|
|
||||||
ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground},
|
|
||||||
ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground},
|
|
||||||
ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground},
|
|
||||||
ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground},
|
|
||||||
|
|
||||||
ansiLightBackgroundGray: {backgroundIntensity, background},
|
|
||||||
ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background},
|
|
||||||
ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background},
|
|
||||||
ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background},
|
|
||||||
ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background},
|
|
||||||
ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background},
|
|
||||||
ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background},
|
|
||||||
ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background},
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
|
||||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
|
||||||
defaultAttr *textAttributes
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
|
|
||||||
if screenInfo != nil {
|
|
||||||
colorMap[ansiForegroundDefault] = winColor{
|
|
||||||
screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue),
|
|
||||||
foreground,
|
|
||||||
}
|
|
||||||
colorMap[ansiBackgroundDefault] = winColor{
|
|
||||||
screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue),
|
|
||||||
background,
|
|
||||||
}
|
|
||||||
defaultAttr = convertTextAttr(screenInfo.WAttributes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type coord struct {
|
|
||||||
X, Y int16
|
|
||||||
}
|
|
||||||
|
|
||||||
type smallRect struct {
|
|
||||||
Left, Top, Right, Bottom int16
|
|
||||||
}
|
|
||||||
|
|
||||||
type consoleScreenBufferInfo struct {
|
|
||||||
DwSize coord
|
|
||||||
DwCursorPosition coord
|
|
||||||
WAttributes uint16
|
|
||||||
SrWindow smallRect
|
|
||||||
DwMaximumWindowSize coord
|
|
||||||
}
|
|
||||||
|
|
||||||
func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo {
|
|
||||||
var csbi consoleScreenBufferInfo
|
|
||||||
ret, _, _ := procGetConsoleScreenBufferInfo.Call(
|
|
||||||
hConsoleOutput,
|
|
||||||
uintptr(unsafe.Pointer(&csbi)))
|
|
||||||
if ret == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &csbi
|
|
||||||
}
|
|
||||||
|
|
||||||
func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
|
|
||||||
ret, _, _ := procSetConsoleTextAttribute.Call(
|
|
||||||
hConsoleOutput,
|
|
||||||
uintptr(wAttributes))
|
|
||||||
return ret != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type textAttributes struct {
|
|
||||||
foregroundColor uint16
|
|
||||||
backgroundColor uint16
|
|
||||||
foregroundIntensity uint16
|
|
||||||
backgroundIntensity uint16
|
|
||||||
underscore uint16
|
|
||||||
otherAttributes uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertTextAttr(winAttr uint16) *textAttributes {
|
|
||||||
fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue)
|
|
||||||
bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue)
|
|
||||||
fgIntensity := winAttr & foregroundIntensity
|
|
||||||
bgIntensity := winAttr & backgroundIntensity
|
|
||||||
underline := winAttr & underscore
|
|
||||||
otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore)
|
|
||||||
return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes}
|
|
||||||
}
|
|
||||||
|
|
||||||
func convertWinAttr(textAttr *textAttributes) uint16 {
|
|
||||||
var winAttr uint16
|
|
||||||
winAttr |= textAttr.foregroundColor
|
|
||||||
winAttr |= textAttr.backgroundColor
|
|
||||||
winAttr |= textAttr.foregroundIntensity
|
|
||||||
winAttr |= textAttr.backgroundIntensity
|
|
||||||
winAttr |= textAttr.underscore
|
|
||||||
winAttr |= textAttr.otherAttributes
|
|
||||||
return winAttr
|
|
||||||
}
|
|
||||||
|
|
||||||
func changeColor(param []byte) parseResult {
|
|
||||||
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
|
|
||||||
if screenInfo == nil {
|
|
||||||
return noConsole
|
|
||||||
}
|
|
||||||
|
|
||||||
winAttr := convertTextAttr(screenInfo.WAttributes)
|
|
||||||
strParam := string(param)
|
|
||||||
if len(strParam) <= 0 {
|
|
||||||
strParam = "0"
|
|
||||||
}
|
|
||||||
csiParam := strings.Split(strParam, string(separatorChar))
|
|
||||||
for _, p := range csiParam {
|
|
||||||
c, ok := colorMap[p]
|
|
||||||
switch {
|
|
||||||
case !ok:
|
|
||||||
switch p {
|
|
||||||
case ansiReset:
|
|
||||||
winAttr.foregroundColor = defaultAttr.foregroundColor
|
|
||||||
winAttr.backgroundColor = defaultAttr.backgroundColor
|
|
||||||
winAttr.foregroundIntensity = defaultAttr.foregroundIntensity
|
|
||||||
winAttr.backgroundIntensity = defaultAttr.backgroundIntensity
|
|
||||||
winAttr.underscore = 0
|
|
||||||
winAttr.otherAttributes = 0
|
|
||||||
case ansiIntensityOn:
|
|
||||||
winAttr.foregroundIntensity = foregroundIntensity
|
|
||||||
case ansiIntensityOff:
|
|
||||||
winAttr.foregroundIntensity = 0
|
|
||||||
case ansiUnderlineOn:
|
|
||||||
winAttr.underscore = underscore
|
|
||||||
case ansiUnderlineOff:
|
|
||||||
winAttr.underscore = 0
|
|
||||||
case ansiBlinkOn:
|
|
||||||
winAttr.backgroundIntensity = backgroundIntensity
|
|
||||||
case ansiBlinkOff:
|
|
||||||
winAttr.backgroundIntensity = 0
|
|
||||||
default:
|
|
||||||
// unknown code
|
|
||||||
}
|
|
||||||
case c.drawType == foreground:
|
|
||||||
winAttr.foregroundColor = c.code
|
|
||||||
case c.drawType == background:
|
|
||||||
winAttr.backgroundColor = c.code
|
|
||||||
}
|
|
||||||
}
|
|
||||||
winTextAttribute := convertWinAttr(winAttr)
|
|
||||||
setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute)
|
|
||||||
|
|
||||||
return changedColor
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEscapeSequence(command byte, param []byte) parseResult {
|
|
||||||
if defaultAttr == nil {
|
|
||||||
return noConsole
|
|
||||||
}
|
|
||||||
|
|
||||||
switch command {
|
|
||||||
case sgrCode:
|
|
||||||
return changeColor(param)
|
|
||||||
default:
|
|
||||||
return unknown
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *ansiColorWriter) flushBuffer() (int, error) {
|
|
||||||
return cw.flushTo(cw.w)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *ansiColorWriter) resetBuffer() (int, error) {
|
|
||||||
return cw.flushTo(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) {
|
|
||||||
var n1, n2 int
|
|
||||||
var err error
|
|
||||||
|
|
||||||
startBytes := cw.paramStartBuf.Bytes()
|
|
||||||
cw.paramStartBuf.Reset()
|
|
||||||
if w != nil {
|
|
||||||
n1, err = cw.w.Write(startBytes)
|
|
||||||
if err != nil {
|
|
||||||
return n1, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
n1 = len(startBytes)
|
|
||||||
}
|
|
||||||
paramBytes := cw.paramBuf.Bytes()
|
|
||||||
cw.paramBuf.Reset()
|
|
||||||
if w != nil {
|
|
||||||
n2, err = cw.w.Write(paramBytes)
|
|
||||||
if err != nil {
|
|
||||||
return n1 + n2, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
n2 = len(paramBytes)
|
|
||||||
}
|
|
||||||
return n1 + n2, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isParameterChar(b byte) bool {
|
|
||||||
return ('0' <= b && b <= '9') || b == separatorChar
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
|
|
||||||
r, nw, first, last := 0, 0, 0, 0
|
|
||||||
if cw.mode != DiscardNonColorEscSeq {
|
|
||||||
cw.state = outsideCsiCode
|
|
||||||
cw.resetBuffer()
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
for i, ch := range p {
|
|
||||||
switch cw.state {
|
|
||||||
case outsideCsiCode:
|
|
||||||
if ch == firstCsiChar {
|
|
||||||
cw.paramStartBuf.WriteByte(ch)
|
|
||||||
cw.state = firstCsiCode
|
|
||||||
}
|
|
||||||
case firstCsiCode:
|
|
||||||
switch ch {
|
|
||||||
case firstCsiChar:
|
|
||||||
cw.paramStartBuf.WriteByte(ch)
|
|
||||||
break
|
|
||||||
case secondeCsiChar:
|
|
||||||
cw.paramStartBuf.WriteByte(ch)
|
|
||||||
cw.state = secondCsiCode
|
|
||||||
last = i - 1
|
|
||||||
default:
|
|
||||||
cw.resetBuffer()
|
|
||||||
cw.state = outsideCsiCode
|
|
||||||
}
|
|
||||||
case secondCsiCode:
|
|
||||||
if isParameterChar(ch) {
|
|
||||||
cw.paramBuf.WriteByte(ch)
|
|
||||||
} else {
|
|
||||||
nw, err = cw.w.Write(p[first:last])
|
|
||||||
r += nw
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
first = i + 1
|
|
||||||
result := parseEscapeSequence(ch, cw.paramBuf.Bytes())
|
|
||||||
if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) {
|
|
||||||
cw.paramBuf.WriteByte(ch)
|
|
||||||
nw, err := cw.flushBuffer()
|
|
||||||
if err != nil {
|
|
||||||
return r, err
|
|
||||||
}
|
|
||||||
r += nw
|
|
||||||
} else {
|
|
||||||
n, _ := cw.resetBuffer()
|
|
||||||
// Add one more to the size of the buffer for the last ch
|
|
||||||
r += n + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
cw.state = outsideCsiCode
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
cw.state = outsideCsiCode
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode {
|
|
||||||
nw, err = cw.w.Write(p[first:])
|
|
||||||
r += nw
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, err
|
|
||||||
}
|
|
@ -0,0 +1,11 @@
|
|||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.10.x
|
||||||
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
|
- 1.13.x
|
||||||
|
- tip
|
||||||
|
matrix:
|
||||||
|
fast_finish: true
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
@ -0,0 +1,82 @@
|
|||||||
|
Copyright (c) 2016, Tom Thorogood.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are met:
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
* Neither the name of the Tom Thorogood nor the
|
||||||
|
names of its contributors may be used to endorse or promote products
|
||||||
|
derived from this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||||
|
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
---- Portions of the source code are also covered by the following license: ----
|
||||||
|
|
||||||
|
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
---- Portions of the source code are also covered by the following license: ----
|
||||||
|
|
||||||
|
Copyright (c) 2005-2016, Wojciech Muła
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
1. Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in the
|
||||||
|
documentation and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||||
|
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||||
|
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||||
|
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||||
|
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||||
|
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,108 @@
|
|||||||
|
# go-hex
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/tmthrgd/go-hex?status.svg)](https://godoc.org/github.com/tmthrgd/go-hex)
|
||||||
|
[![Build Status](https://travis-ci.org/tmthrgd/go-hex.svg?branch=master)](https://travis-ci.org/tmthrgd/go-hex)
|
||||||
|
|
||||||
|
An efficient hexadecimal implementation for Golang.
|
||||||
|
|
||||||
|
go-hex provides hex encoding and decoding using SSE/AVX instructions on x86-64.
|
||||||
|
|
||||||
|
## Download
|
||||||
|
|
||||||
|
```
|
||||||
|
go get github.com/tmthrgd/go-hex
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmark
|
||||||
|
|
||||||
|
go-hex:
|
||||||
|
```
|
||||||
|
BenchmarkEncode/15-8 100000000 17.4 ns/op 863.43 MB/s
|
||||||
|
BenchmarkEncode/32-8 100000000 11.9 ns/op 2690.43 MB/s
|
||||||
|
BenchmarkEncode/128-8 100000000 21.4 ns/op 5982.92 MB/s
|
||||||
|
BenchmarkEncode/1k-8 20000000 88.5 ns/op 11572.80 MB/s
|
||||||
|
BenchmarkEncode/16k-8 1000000 1254 ns/op 13058.10 MB/s
|
||||||
|
BenchmarkEncode/128k-8 100000 12965 ns/op 10109.53 MB/s
|
||||||
|
BenchmarkEncode/1M-8 10000 119465 ns/op 8777.23 MB/s
|
||||||
|
BenchmarkEncode/16M-8 500 3530380 ns/op 4752.24 MB/s
|
||||||
|
BenchmarkEncode/128M-8 50 28001913 ns/op 4793.16 MB/s
|
||||||
|
BenchmarkDecode/14-8 100000000 12.6 ns/op 1110.01 MB/s
|
||||||
|
BenchmarkDecode/32-8 100000000 12.5 ns/op 2558.10 MB/s
|
||||||
|
BenchmarkDecode/128-8 50000000 27.2 ns/op 4697.66 MB/s
|
||||||
|
BenchmarkDecode/1k-8 10000000 168 ns/op 6093.43 MB/s
|
||||||
|
BenchmarkDecode/16k-8 500000 2543 ns/op 6442.09 MB/s
|
||||||
|
BenchmarkDecode/128k-8 100000 20339 ns/op 6444.24 MB/s
|
||||||
|
BenchmarkDecode/1M-8 10000 164313 ns/op 6381.57 MB/s
|
||||||
|
BenchmarkDecode/16M-8 500 3099822 ns/op 5412.31 MB/s
|
||||||
|
BenchmarkDecode/128M-8 50 24865822 ns/op 5397.68 MB/s
|
||||||
|
```
|
||||||
|
|
||||||
|
[encoding/hex](https://golang.org/pkg/encoding/hex/):
|
||||||
|
```
|
||||||
|
BenchmarkRefEncode/15-8 50000000 36.1 ns/op 415.07 MB/s
|
||||||
|
BenchmarkRefEncode/32-8 20000000 72.9 ns/op 439.14 MB/s
|
||||||
|
BenchmarkRefEncode/128-8 5000000 289 ns/op 441.54 MB/s
|
||||||
|
BenchmarkRefEncode/1k-8 1000000 2268 ns/op 451.49 MB/s
|
||||||
|
BenchmarkRefEncode/16k-8 30000 39110 ns/op 418.91 MB/s
|
||||||
|
BenchmarkRefEncode/128k-8 5000 291260 ns/op 450.02 MB/s
|
||||||
|
BenchmarkRefEncode/1M-8 1000 2277578 ns/op 460.39 MB/s
|
||||||
|
BenchmarkRefEncode/16M-8 30 37087543 ns/op 452.37 MB/s
|
||||||
|
BenchmarkRefEncode/128M-8 5 293611713 ns/op 457.13 MB/s
|
||||||
|
BenchmarkRefDecode/14-8 30000000 53.7 ns/op 260.49 MB/s
|
||||||
|
BenchmarkRefDecode/32-8 10000000 128 ns/op 248.44 MB/s
|
||||||
|
BenchmarkRefDecode/128-8 3000000 481 ns/op 265.95 MB/s
|
||||||
|
BenchmarkRefDecode/1k-8 300000 4172 ns/op 245.43 MB/s
|
||||||
|
BenchmarkRefDecode/16k-8 10000 111989 ns/op 146.30 MB/s
|
||||||
|
BenchmarkRefDecode/128k-8 2000 909077 ns/op 144.18 MB/s
|
||||||
|
BenchmarkRefDecode/1M-8 200 7275779 ns/op 144.12 MB/s
|
||||||
|
BenchmarkRefDecode/16M-8 10 116574839 ns/op 143.92 MB/s
|
||||||
|
BenchmarkRefDecode/128M-8 2 933871637 ns/op 143.72 MB/s
|
||||||
|
```
|
||||||
|
|
||||||
|
[encoding/hex](https://golang.org/pkg/encoding/hex/) -> go-hex:
|
||||||
|
```
|
||||||
|
benchmark old ns/op new ns/op delta
|
||||||
|
BenchmarkEncode/15-8 36.1 17.4 -51.80%
|
||||||
|
BenchmarkEncode/32-8 72.9 11.9 -83.68%
|
||||||
|
BenchmarkEncode/128-8 289 21.4 -92.60%
|
||||||
|
BenchmarkEncode/1k-8 2268 88.5 -96.10%
|
||||||
|
BenchmarkEncode/16k-8 39110 1254 -96.79%
|
||||||
|
BenchmarkEncode/128k-8 291260 12965 -95.55%
|
||||||
|
BenchmarkEncode/1M-8 2277578 119465 -94.75%
|
||||||
|
BenchmarkEncode/16M-8 37087543 3530380 -90.48%
|
||||||
|
BenchmarkEncode/128M-8 293611713 28001913 -90.46%
|
||||||
|
BenchmarkDecode/14-8 53.7 12.6 -76.54%
|
||||||
|
BenchmarkDecode/32-8 128 12.5 -90.23%
|
||||||
|
BenchmarkDecode/128-8 481 27.2 -94.35%
|
||||||
|
BenchmarkDecode/1k-8 4172 168 -95.97%
|
||||||
|
BenchmarkDecode/16k-8 111989 2543 -97.73%
|
||||||
|
BenchmarkDecode/128k-8 909077 20339 -97.76%
|
||||||
|
BenchmarkDecode/1M-8 7275779 164313 -97.74%
|
||||||
|
BenchmarkDecode/16M-8 116574839 3099822 -97.34%
|
||||||
|
BenchmarkDecode/128M-8 933871637 24865822 -97.34%
|
||||||
|
|
||||||
|
benchmark old MB/s new MB/s speedup
|
||||||
|
BenchmarkEncode/15-8 415.07 863.43 2.08x
|
||||||
|
BenchmarkEncode/32-8 439.14 2690.43 6.13x
|
||||||
|
BenchmarkEncode/128-8 441.54 5982.92 13.55x
|
||||||
|
BenchmarkEncode/1k-8 451.49 11572.80 25.63x
|
||||||
|
BenchmarkEncode/16k-8 418.91 13058.10 31.17x
|
||||||
|
BenchmarkEncode/128k-8 450.02 10109.53 22.46x
|
||||||
|
BenchmarkEncode/1M-8 460.39 8777.23 19.06x
|
||||||
|
BenchmarkEncode/16M-8 452.37 4752.24 10.51x
|
||||||
|
BenchmarkEncode/128M-8 457.13 4793.16 10.49x
|
||||||
|
BenchmarkDecode/14-8 260.49 1110.01 4.26x
|
||||||
|
BenchmarkDecode/32-8 248.44 2558.10 10.30x
|
||||||
|
BenchmarkDecode/128-8 265.95 4697.66 17.66x
|
||||||
|
BenchmarkDecode/1k-8 245.43 6093.43 24.83x
|
||||||
|
BenchmarkDecode/16k-8 146.30 6442.09 44.03x
|
||||||
|
BenchmarkDecode/128k-8 144.18 6444.24 44.70x
|
||||||
|
BenchmarkDecode/1M-8 144.12 6381.57 44.28x
|
||||||
|
BenchmarkDecode/16M-8 143.92 5412.31 37.61x
|
||||||
|
BenchmarkDecode/128M-8 143.72 5397.68 37.56x
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
Unless otherwise noted, the go-hex source files are distributed under the Modified BSD License
|
||||||
|
found in the LICENSE file.
|
@ -0,0 +1,137 @@
|
|||||||
|
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Modified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
//
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package hex is an efficient hexadecimal implementation for Golang.
|
||||||
|
package hex
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errLength = errors.New("go-hex: odd length hex string")
|
||||||
|
|
||||||
|
var (
|
||||||
|
lower = []byte("0123456789abcdef")
|
||||||
|
upper = []byte("0123456789ABCDEF")
|
||||||
|
)
|
||||||
|
|
||||||
|
// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
|
||||||
|
type InvalidByteError byte
|
||||||
|
|
||||||
|
func (e InvalidByteError) Error() string {
|
||||||
|
return fmt.Sprintf("go-hex: invalid byte: %#U", rune(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodedLen returns the length of an encoding of n source bytes.
|
||||||
|
func EncodedLen(n int) int {
|
||||||
|
return n * 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodedLen returns the length of a decoding of n source bytes.
|
||||||
|
func DecodedLen(n int) int {
|
||||||
|
return n / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode encodes src into EncodedLen(len(src))
|
||||||
|
// bytes of dst. As a convenience, it returns the number
|
||||||
|
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||||
|
// Encode implements lowercase hexadecimal encoding.
|
||||||
|
func Encode(dst, src []byte) int {
|
||||||
|
return RawEncode(dst, src, lower)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeUpper encodes src into EncodedLen(len(src))
|
||||||
|
// bytes of dst. As a convenience, it returns the number
|
||||||
|
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||||
|
// EncodeUpper implements uppercase hexadecimal encoding.
|
||||||
|
func EncodeUpper(dst, src []byte) int {
|
||||||
|
return RawEncode(dst, src, upper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeToString returns the lowercase hexadecimal encoding of src.
|
||||||
|
func EncodeToString(src []byte) string {
|
||||||
|
return RawEncodeToString(src, lower)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeUpperToString returns the uppercase hexadecimal encoding of src.
|
||||||
|
func EncodeUpperToString(src []byte) string {
|
||||||
|
return RawEncodeToString(src, upper)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RawEncodeToString returns the hexadecimal encoding of src for a given
|
||||||
|
// alphabet.
|
||||||
|
func RawEncodeToString(src, alpha []byte) string {
|
||||||
|
dst := make([]byte, EncodedLen(len(src)))
|
||||||
|
RawEncode(dst, src, alpha)
|
||||||
|
return string(dst)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecodeString returns the bytes represented by the hexadecimal string s.
|
||||||
|
func DecodeString(s string) ([]byte, error) {
|
||||||
|
src := []byte(s)
|
||||||
|
dst := make([]byte, DecodedLen(len(src)))
|
||||||
|
|
||||||
|
if _, err := Decode(dst, src); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustDecodeString is like DecodeString but panics if the string cannot be
|
||||||
|
// parsed. It simplifies safe initialization of global variables holding
|
||||||
|
// binary data.
|
||||||
|
func MustDecodeString(str string) []byte {
|
||||||
|
dst, err := DecodeString(str)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeGeneric(dst, src, alpha []byte) {
|
||||||
|
for i, v := range src {
|
||||||
|
dst[i*2] = alpha[v>>4]
|
||||||
|
dst[i*2+1] = alpha[v&0x0f]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeGeneric(dst, src []byte) (uint64, bool) {
|
||||||
|
for i := 0; i < len(src)/2; i++ {
|
||||||
|
a, ok := fromHexChar(src[i*2])
|
||||||
|
if !ok {
|
||||||
|
return uint64(i * 2), false
|
||||||
|
}
|
||||||
|
|
||||||
|
b, ok := fromHexChar(src[i*2+1])
|
||||||
|
if !ok {
|
||||||
|
return uint64(i*2 + 1), false
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[i] = (a << 4) | b
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromHexChar converts a hex character into its value and a success flag.
|
||||||
|
func fromHexChar(c byte) (byte, bool) {
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return c - '0', true
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return c - 'a' + 10, true
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return c - 'A' + 10, true
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0, false
|
||||||
|
}
|
@ -0,0 +1,94 @@
|
|||||||
|
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Modified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
|
||||||
|
// +build amd64,!gccgo,!appengine
|
||||||
|
|
||||||
|
package hex
|
||||||
|
|
||||||
|
import "golang.org/x/sys/cpu"
|
||||||
|
|
||||||
|
// RawEncode encodes src into EncodedLen(len(src))
|
||||||
|
// bytes of dst. As a convenience, it returns the number
|
||||||
|
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||||
|
// RawEncode implements hexadecimal encoding for a given alphabet.
|
||||||
|
func RawEncode(dst, src, alpha []byte) int {
|
||||||
|
if len(alpha) != 16 {
|
||||||
|
panic("invalid alphabet")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dst) < len(src)*2 {
|
||||||
|
panic("dst buffer is too small")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(src) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case cpu.X86.HasAVX:
|
||||||
|
encodeAVX(&dst[0], &src[0], uint64(len(src)), &alpha[0])
|
||||||
|
case cpu.X86.HasSSE41:
|
||||||
|
encodeSSE(&dst[0], &src[0], uint64(len(src)), &alpha[0])
|
||||||
|
default:
|
||||||
|
encodeGeneric(dst, src, alpha)
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(src) * 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
|
||||||
|
// number of bytes written to dst.
|
||||||
|
//
|
||||||
|
// If Decode encounters invalid input, it returns an error describing the failure.
|
||||||
|
func Decode(dst, src []byte) (int, error) {
|
||||||
|
if len(src)%2 != 0 {
|
||||||
|
return 0, errLength
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dst) < len(src)/2 {
|
||||||
|
panic("dst buffer is too small")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(src) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
n uint64
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
switch {
|
||||||
|
case cpu.X86.HasAVX:
|
||||||
|
n, ok = decodeAVX(&dst[0], &src[0], uint64(len(src)))
|
||||||
|
case cpu.X86.HasSSE41:
|
||||||
|
n, ok = decodeSSE(&dst[0], &src[0], uint64(len(src)))
|
||||||
|
default:
|
||||||
|
n, ok = decodeGeneric(dst, src)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return 0, InvalidByteError(src[n])
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(src) / 2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate go run asm_gen.go
|
||||||
|
|
||||||
|
// This function is implemented in hex_encode_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func encodeAVX(dst *byte, src *byte, len uint64, alpha *byte)
|
||||||
|
|
||||||
|
// This function is implemented in hex_encode_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func encodeSSE(dst *byte, src *byte, len uint64, alpha *byte)
|
||||||
|
|
||||||
|
// This function is implemented in hex_decode_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func decodeAVX(dst *byte, src *byte, len uint64) (n uint64, ok bool)
|
||||||
|
|
||||||
|
// This function is implemented in hex_decode_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func decodeSSE(dst *byte, src *byte, len uint64) (n uint64, ok bool)
|
@ -0,0 +1,303 @@
|
|||||||
|
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Modified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
//
|
||||||
|
// Copyright 2005-2016, Wojciech Muła. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Simplified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
//
|
||||||
|
// This file is auto-generated - do not modify
|
||||||
|
|
||||||
|
// +build amd64,!gccgo,!appengine
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
DATA decodeBase<>+0x00(SB)/8, $0x3030303030303030
|
||||||
|
DATA decodeBase<>+0x08(SB)/8, $0x3030303030303030
|
||||||
|
DATA decodeBase<>+0x10(SB)/8, $0x2727272727272727
|
||||||
|
DATA decodeBase<>+0x18(SB)/8, $0x2727272727272727
|
||||||
|
GLOBL decodeBase<>(SB),RODATA,$32
|
||||||
|
|
||||||
|
DATA decodeToLower<>+0x00(SB)/8, $0x2020202020202020
|
||||||
|
DATA decodeToLower<>+0x08(SB)/8, $0x2020202020202020
|
||||||
|
GLOBL decodeToLower<>(SB),RODATA,$16
|
||||||
|
|
||||||
|
DATA decodeHigh<>+0x00(SB)/8, $0x0e0c0a0806040200
|
||||||
|
DATA decodeHigh<>+0x08(SB)/8, $0xffffffffffffffff
|
||||||
|
GLOBL decodeHigh<>(SB),RODATA,$16
|
||||||
|
|
||||||
|
DATA decodeLow<>+0x00(SB)/8, $0x0f0d0b0907050301
|
||||||
|
DATA decodeLow<>+0x08(SB)/8, $0xffffffffffffffff
|
||||||
|
GLOBL decodeLow<>(SB),RODATA,$16
|
||||||
|
|
||||||
|
DATA decodeValid<>+0x00(SB)/8, $0xb0b0b0b0b0b0b0b0
|
||||||
|
DATA decodeValid<>+0x08(SB)/8, $0xb0b0b0b0b0b0b0b0
|
||||||
|
DATA decodeValid<>+0x10(SB)/8, $0xb9b9b9b9b9b9b9b9
|
||||||
|
DATA decodeValid<>+0x18(SB)/8, $0xb9b9b9b9b9b9b9b9
|
||||||
|
DATA decodeValid<>+0x20(SB)/8, $0xe1e1e1e1e1e1e1e1
|
||||||
|
DATA decodeValid<>+0x28(SB)/8, $0xe1e1e1e1e1e1e1e1
|
||||||
|
DATA decodeValid<>+0x30(SB)/8, $0xe6e6e6e6e6e6e6e6
|
||||||
|
DATA decodeValid<>+0x38(SB)/8, $0xe6e6e6e6e6e6e6e6
|
||||||
|
GLOBL decodeValid<>(SB),RODATA,$64
|
||||||
|
|
||||||
|
DATA decodeToSigned<>+0x00(SB)/8, $0x8080808080808080
|
||||||
|
DATA decodeToSigned<>+0x08(SB)/8, $0x8080808080808080
|
||||||
|
GLOBL decodeToSigned<>(SB),RODATA,$16
|
||||||
|
|
||||||
|
TEXT ·decodeAVX(SB),NOSPLIT,$0
|
||||||
|
MOVQ dst+0(FP), DI
|
||||||
|
MOVQ src+8(FP), SI
|
||||||
|
MOVQ len+16(FP), BX
|
||||||
|
MOVQ SI, R15
|
||||||
|
MOVOU decodeValid<>(SB), X14
|
||||||
|
MOVOU decodeValid<>+0x20(SB), X15
|
||||||
|
MOVW $65535, DX
|
||||||
|
CMPQ BX, $16
|
||||||
|
JB tail
|
||||||
|
bigloop:
|
||||||
|
MOVOU (SI), X0
|
||||||
|
VPXOR decodeToSigned<>(SB), X0, X1
|
||||||
|
POR decodeToLower<>(SB), X0
|
||||||
|
VPXOR decodeToSigned<>(SB), X0, X2
|
||||||
|
VPCMPGTB X1, X14, X3
|
||||||
|
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||||
|
VPCMPGTB X2, X15, X4
|
||||||
|
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||||
|
PAND X4, X1
|
||||||
|
POR X2, X3
|
||||||
|
POR X1, X3
|
||||||
|
PMOVMSKB X3, AX
|
||||||
|
TESTW AX, DX
|
||||||
|
JNZ invalid
|
||||||
|
PSUBB decodeBase<>(SB), X0
|
||||||
|
PANDN decodeBase<>+0x10(SB), X4
|
||||||
|
PSUBB X4, X0
|
||||||
|
VPSHUFB decodeLow<>(SB), X0, X3
|
||||||
|
PSHUFB decodeHigh<>(SB), X0
|
||||||
|
PSLLW $4, X0
|
||||||
|
POR X3, X0
|
||||||
|
MOVQ X0, (DI)
|
||||||
|
SUBQ $16, BX
|
||||||
|
JZ ret
|
||||||
|
ADDQ $16, SI
|
||||||
|
ADDQ $8, DI
|
||||||
|
CMPQ BX, $16
|
||||||
|
JAE bigloop
|
||||||
|
tail:
|
||||||
|
MOVQ $16, CX
|
||||||
|
SUBQ BX, CX
|
||||||
|
SHRW CX, DX
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_in_2
|
||||||
|
JE tail_in_4
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_in_6
|
||||||
|
JE tail_in_8
|
||||||
|
CMPQ BX, $12
|
||||||
|
JB tail_in_10
|
||||||
|
JE tail_in_12
|
||||||
|
tail_in_14:
|
||||||
|
PINSRW $6, 12(SI), X0
|
||||||
|
tail_in_12:
|
||||||
|
PINSRW $5, 10(SI), X0
|
||||||
|
tail_in_10:
|
||||||
|
PINSRW $4, 8(SI), X0
|
||||||
|
tail_in_8:
|
||||||
|
PINSRQ $0, (SI), X0
|
||||||
|
JMP tail_conv
|
||||||
|
tail_in_6:
|
||||||
|
PINSRW $2, 4(SI), X0
|
||||||
|
tail_in_4:
|
||||||
|
PINSRW $1, 2(SI), X0
|
||||||
|
tail_in_2:
|
||||||
|
PINSRW $0, (SI), X0
|
||||||
|
tail_conv:
|
||||||
|
VPXOR decodeToSigned<>(SB), X0, X1
|
||||||
|
POR decodeToLower<>(SB), X0
|
||||||
|
VPXOR decodeToSigned<>(SB), X0, X2
|
||||||
|
VPCMPGTB X1, X14, X3
|
||||||
|
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||||
|
VPCMPGTB X2, X15, X4
|
||||||
|
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||||
|
PAND X4, X1
|
||||||
|
POR X2, X3
|
||||||
|
POR X1, X3
|
||||||
|
PMOVMSKB X3, AX
|
||||||
|
TESTW AX, DX
|
||||||
|
JNZ invalid
|
||||||
|
PSUBB decodeBase<>(SB), X0
|
||||||
|
PANDN decodeBase<>+0x10(SB), X4
|
||||||
|
PSUBB X4, X0
|
||||||
|
VPSHUFB decodeLow<>(SB), X0, X3
|
||||||
|
PSHUFB decodeHigh<>(SB), X0
|
||||||
|
PSLLW $4, X0
|
||||||
|
POR X3, X0
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_out_2
|
||||||
|
JE tail_out_4
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_out_6
|
||||||
|
JE tail_out_8
|
||||||
|
CMPQ BX, $12
|
||||||
|
JB tail_out_10
|
||||||
|
JE tail_out_12
|
||||||
|
tail_out_14:
|
||||||
|
PEXTRB $6, X0, 6(DI)
|
||||||
|
tail_out_12:
|
||||||
|
PEXTRB $5, X0, 5(DI)
|
||||||
|
tail_out_10:
|
||||||
|
PEXTRB $4, X0, 4(DI)
|
||||||
|
tail_out_8:
|
||||||
|
MOVL X0, (DI)
|
||||||
|
JMP ret
|
||||||
|
tail_out_6:
|
||||||
|
PEXTRB $2, X0, 2(DI)
|
||||||
|
tail_out_4:
|
||||||
|
PEXTRB $1, X0, 1(DI)
|
||||||
|
tail_out_2:
|
||||||
|
PEXTRB $0, X0, (DI)
|
||||||
|
ret:
|
||||||
|
MOVB $1, ok+32(FP)
|
||||||
|
RET
|
||||||
|
invalid:
|
||||||
|
BSFW AX, AX
|
||||||
|
SUBQ R15, SI
|
||||||
|
ADDQ SI, AX
|
||||||
|
MOVQ AX, n+24(FP)
|
||||||
|
MOVB $0, ok+32(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT ·decodeSSE(SB),NOSPLIT,$0
|
||||||
|
MOVQ dst+0(FP), DI
|
||||||
|
MOVQ src+8(FP), SI
|
||||||
|
MOVQ len+16(FP), BX
|
||||||
|
MOVQ SI, R15
|
||||||
|
MOVOU decodeValid<>(SB), X14
|
||||||
|
MOVOU decodeValid<>+0x20(SB), X15
|
||||||
|
MOVW $65535, DX
|
||||||
|
CMPQ BX, $16
|
||||||
|
JB tail
|
||||||
|
bigloop:
|
||||||
|
MOVOU (SI), X0
|
||||||
|
MOVOU X0, X1
|
||||||
|
PXOR decodeToSigned<>(SB), X1
|
||||||
|
POR decodeToLower<>(SB), X0
|
||||||
|
MOVOU X0, X2
|
||||||
|
PXOR decodeToSigned<>(SB), X2
|
||||||
|
MOVOU X14, X3
|
||||||
|
PCMPGTB X1, X3
|
||||||
|
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||||
|
MOVOU X15, X4
|
||||||
|
PCMPGTB X2, X4
|
||||||
|
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||||
|
PAND X4, X1
|
||||||
|
POR X2, X3
|
||||||
|
POR X1, X3
|
||||||
|
PMOVMSKB X3, AX
|
||||||
|
TESTW AX, DX
|
||||||
|
JNZ invalid
|
||||||
|
PSUBB decodeBase<>(SB), X0
|
||||||
|
PANDN decodeBase<>+0x10(SB), X4
|
||||||
|
PSUBB X4, X0
|
||||||
|
MOVOU X0, X3
|
||||||
|
PSHUFB decodeLow<>(SB), X3
|
||||||
|
PSHUFB decodeHigh<>(SB), X0
|
||||||
|
PSLLW $4, X0
|
||||||
|
POR X3, X0
|
||||||
|
MOVQ X0, (DI)
|
||||||
|
SUBQ $16, BX
|
||||||
|
JZ ret
|
||||||
|
ADDQ $16, SI
|
||||||
|
ADDQ $8, DI
|
||||||
|
CMPQ BX, $16
|
||||||
|
JAE bigloop
|
||||||
|
tail:
|
||||||
|
MOVQ $16, CX
|
||||||
|
SUBQ BX, CX
|
||||||
|
SHRW CX, DX
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_in_2
|
||||||
|
JE tail_in_4
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_in_6
|
||||||
|
JE tail_in_8
|
||||||
|
CMPQ BX, $12
|
||||||
|
JB tail_in_10
|
||||||
|
JE tail_in_12
|
||||||
|
tail_in_14:
|
||||||
|
PINSRW $6, 12(SI), X0
|
||||||
|
tail_in_12:
|
||||||
|
PINSRW $5, 10(SI), X0
|
||||||
|
tail_in_10:
|
||||||
|
PINSRW $4, 8(SI), X0
|
||||||
|
tail_in_8:
|
||||||
|
PINSRQ $0, (SI), X0
|
||||||
|
JMP tail_conv
|
||||||
|
tail_in_6:
|
||||||
|
PINSRW $2, 4(SI), X0
|
||||||
|
tail_in_4:
|
||||||
|
PINSRW $1, 2(SI), X0
|
||||||
|
tail_in_2:
|
||||||
|
PINSRW $0, (SI), X0
|
||||||
|
tail_conv:
|
||||||
|
MOVOU X0, X1
|
||||||
|
PXOR decodeToSigned<>(SB), X1
|
||||||
|
POR decodeToLower<>(SB), X0
|
||||||
|
MOVOU X0, X2
|
||||||
|
PXOR decodeToSigned<>(SB), X2
|
||||||
|
MOVOU X14, X3
|
||||||
|
PCMPGTB X1, X3
|
||||||
|
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||||
|
MOVOU X15, X4
|
||||||
|
PCMPGTB X2, X4
|
||||||
|
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||||
|
PAND X4, X1
|
||||||
|
POR X2, X3
|
||||||
|
POR X1, X3
|
||||||
|
PMOVMSKB X3, AX
|
||||||
|
TESTW AX, DX
|
||||||
|
JNZ invalid
|
||||||
|
PSUBB decodeBase<>(SB), X0
|
||||||
|
PANDN decodeBase<>+0x10(SB), X4
|
||||||
|
PSUBB X4, X0
|
||||||
|
MOVOU X0, X3
|
||||||
|
PSHUFB decodeLow<>(SB), X3
|
||||||
|
PSHUFB decodeHigh<>(SB), X0
|
||||||
|
PSLLW $4, X0
|
||||||
|
POR X3, X0
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_out_2
|
||||||
|
JE tail_out_4
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_out_6
|
||||||
|
JE tail_out_8
|
||||||
|
CMPQ BX, $12
|
||||||
|
JB tail_out_10
|
||||||
|
JE tail_out_12
|
||||||
|
tail_out_14:
|
||||||
|
PEXTRB $6, X0, 6(DI)
|
||||||
|
tail_out_12:
|
||||||
|
PEXTRB $5, X0, 5(DI)
|
||||||
|
tail_out_10:
|
||||||
|
PEXTRB $4, X0, 4(DI)
|
||||||
|
tail_out_8:
|
||||||
|
MOVL X0, (DI)
|
||||||
|
JMP ret
|
||||||
|
tail_out_6:
|
||||||
|
PEXTRB $2, X0, 2(DI)
|
||||||
|
tail_out_4:
|
||||||
|
PEXTRB $1, X0, 1(DI)
|
||||||
|
tail_out_2:
|
||||||
|
PEXTRB $0, X0, (DI)
|
||||||
|
ret:
|
||||||
|
MOVB $1, ok+32(FP)
|
||||||
|
RET
|
||||||
|
invalid:
|
||||||
|
BSFW AX, AX
|
||||||
|
SUBQ R15, SI
|
||||||
|
ADDQ SI, AX
|
||||||
|
MOVQ AX, n+24(FP)
|
||||||
|
MOVB $0, ok+32(FP)
|
||||||
|
RET
|
@ -0,0 +1,227 @@
|
|||||||
|
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Modified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
//
|
||||||
|
// Copyright 2005-2016, Wojciech Muła. All rights reserved.
|
||||||
|
// Use of this source code is governed by a
|
||||||
|
// Simplified BSD License license that can be found in
|
||||||
|
// the LICENSE file.
|
||||||
|
//
|
||||||
|
// This file is auto-generated - do not modify
|
||||||
|
|
||||||
|
// +build amd64,!gccgo,!appengine
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
DATA encodeMask<>+0x00(SB)/8, $0x0f0f0f0f0f0f0f0f
|
||||||
|
DATA encodeMask<>+0x08(SB)/8, $0x0f0f0f0f0f0f0f0f
|
||||||
|
GLOBL encodeMask<>(SB),RODATA,$16
|
||||||
|
|
||||||
|
TEXT ·encodeAVX(SB),NOSPLIT,$0
|
||||||
|
MOVQ dst+0(FP), DI
|
||||||
|
MOVQ src+8(FP), SI
|
||||||
|
MOVQ len+16(FP), BX
|
||||||
|
MOVQ alpha+24(FP), DX
|
||||||
|
MOVOU (DX), X15
|
||||||
|
CMPQ BX, $16
|
||||||
|
JB tail
|
||||||
|
bigloop:
|
||||||
|
MOVOU -16(SI)(BX*1), X0
|
||||||
|
VPAND encodeMask<>(SB), X0, X1
|
||||||
|
PSRLW $4, X0
|
||||||
|
PAND encodeMask<>(SB), X0
|
||||||
|
VPUNPCKHBW X1, X0, X3
|
||||||
|
PUNPCKLBW X1, X0
|
||||||
|
VPSHUFB X0, X15, X1
|
||||||
|
VPSHUFB X3, X15, X2
|
||||||
|
MOVOU X2, -16(DI)(BX*2)
|
||||||
|
MOVOU X1, -32(DI)(BX*2)
|
||||||
|
SUBQ $16, BX
|
||||||
|
JZ ret
|
||||||
|
CMPQ BX, $16
|
||||||
|
JAE bigloop
|
||||||
|
tail:
|
||||||
|
CMPQ BX, $2
|
||||||
|
JB tail_in_1
|
||||||
|
JE tail_in_2
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_in_3
|
||||||
|
JE tail_in_4
|
||||||
|
CMPQ BX, $6
|
||||||
|
JB tail_in_5
|
||||||
|
JE tail_in_6
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_in_7
|
||||||
|
tail_in_8:
|
||||||
|
MOVQ (SI), X0
|
||||||
|
JMP tail_conv
|
||||||
|
tail_in_7:
|
||||||
|
PINSRB $6, 6(SI), X0
|
||||||
|
tail_in_6:
|
||||||
|
PINSRB $5, 5(SI), X0
|
||||||
|
tail_in_5:
|
||||||
|
PINSRB $4, 4(SI), X0
|
||||||
|
tail_in_4:
|
||||||
|
PINSRD $0, (SI), X0
|
||||||
|
JMP tail_conv
|
||||||
|
tail_in_3:
|
||||||
|
PINSRB $2, 2(SI), X0
|
||||||
|
tail_in_2:
|
||||||
|
PINSRB $1, 1(SI), X0
|
||||||
|
tail_in_1:
|
||||||
|
PINSRB $0, (SI), X0
|
||||||
|
tail_conv:
|
||||||
|
VPAND encodeMask<>(SB), X0, X1
|
||||||
|
PSRLW $4, X0
|
||||||
|
PAND encodeMask<>(SB), X0
|
||||||
|
PUNPCKLBW X1, X0
|
||||||
|
VPSHUFB X0, X15, X1
|
||||||
|
CMPQ BX, $2
|
||||||
|
JB tail_out_1
|
||||||
|
JE tail_out_2
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_out_3
|
||||||
|
JE tail_out_4
|
||||||
|
CMPQ BX, $6
|
||||||
|
JB tail_out_5
|
||||||
|
JE tail_out_6
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_out_7
|
||||||
|
tail_out_8:
|
||||||
|
MOVOU X1, (DI)
|
||||||
|
SUBQ $8, BX
|
||||||
|
JZ ret
|
||||||
|
ADDQ $8, SI
|
||||||
|
ADDQ $16, DI
|
||||||
|
JMP tail
|
||||||
|
tail_out_7:
|
||||||
|
PEXTRB $13, X1, 13(DI)
|
||||||
|
PEXTRB $12, X1, 12(DI)
|
||||||
|
tail_out_6:
|
||||||
|
PEXTRB $11, X1, 11(DI)
|
||||||
|
PEXTRB $10, X1, 10(DI)
|
||||||
|
tail_out_5:
|
||||||
|
PEXTRB $9, X1, 9(DI)
|
||||||
|
PEXTRB $8, X1, 8(DI)
|
||||||
|
tail_out_4:
|
||||||
|
MOVQ X1, (DI)
|
||||||
|
RET
|
||||||
|
tail_out_3:
|
||||||
|
PEXTRB $5, X1, 5(DI)
|
||||||
|
PEXTRB $4, X1, 4(DI)
|
||||||
|
tail_out_2:
|
||||||
|
PEXTRB $3, X1, 3(DI)
|
||||||
|
PEXTRB $2, X1, 2(DI)
|
||||||
|
tail_out_1:
|
||||||
|
PEXTRB $1, X1, 1(DI)
|
||||||
|
PEXTRB $0, X1, (DI)
|
||||||
|
ret:
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT ·encodeSSE(SB),NOSPLIT,$0
|
||||||
|
MOVQ dst+0(FP), DI
|
||||||
|
MOVQ src+8(FP), SI
|
||||||
|
MOVQ len+16(FP), BX
|
||||||
|
MOVQ alpha+24(FP), DX
|
||||||
|
MOVOU (DX), X15
|
||||||
|
CMPQ BX, $16
|
||||||
|
JB tail
|
||||||
|
bigloop:
|
||||||
|
MOVOU -16(SI)(BX*1), X0
|
||||||
|
MOVOU X0, X1
|
||||||
|
PAND encodeMask<>(SB), X1
|
||||||
|
PSRLW $4, X0
|
||||||
|
PAND encodeMask<>(SB), X0
|
||||||
|
MOVOU X0, X3
|
||||||
|
PUNPCKHBW X1, X3
|
||||||
|
PUNPCKLBW X1, X0
|
||||||
|
MOVOU X15, X1
|
||||||
|
PSHUFB X0, X1
|
||||||
|
MOVOU X15, X2
|
||||||
|
PSHUFB X3, X2
|
||||||
|
MOVOU X2, -16(DI)(BX*2)
|
||||||
|
MOVOU X1, -32(DI)(BX*2)
|
||||||
|
SUBQ $16, BX
|
||||||
|
JZ ret
|
||||||
|
CMPQ BX, $16
|
||||||
|
JAE bigloop
|
||||||
|
tail:
|
||||||
|
CMPQ BX, $2
|
||||||
|
JB tail_in_1
|
||||||
|
JE tail_in_2
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_in_3
|
||||||
|
JE tail_in_4
|
||||||
|
CMPQ BX, $6
|
||||||
|
JB tail_in_5
|
||||||
|
JE tail_in_6
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_in_7
|
||||||
|
tail_in_8:
|
||||||
|
MOVQ (SI), X0
|
||||||
|
JMP tail_conv
|
||||||
|
tail_in_7:
|
||||||
|
PINSRB $6, 6(SI), X0
|
||||||
|
tail_in_6:
|
||||||
|
PINSRB $5, 5(SI), X0
|
||||||
|
tail_in_5:
|
||||||
|
PINSRB $4, 4(SI), X0
|
||||||
|
tail_in_4:
|
||||||
|
PINSRD $0, (SI), X0
|
||||||
|
JMP tail_conv
|
||||||
|
tail_in_3:
|
||||||
|
PINSRB $2, 2(SI), X0
|
||||||
|
tail_in_2:
|
||||||
|
PINSRB $1, 1(SI), X0
|
||||||
|
tail_in_1:
|
||||||
|
PINSRB $0, (SI), X0
|
||||||
|
tail_conv:
|
||||||
|
MOVOU X0, X1
|
||||||
|
PAND encodeMask<>(SB), X1
|
||||||
|
PSRLW $4, X0
|
||||||
|
PAND encodeMask<>(SB), X0
|
||||||
|
PUNPCKLBW X1, X0
|
||||||
|
MOVOU X15, X1
|
||||||
|
PSHUFB X0, X1
|
||||||
|
CMPQ BX, $2
|
||||||
|
JB tail_out_1
|
||||||
|
JE tail_out_2
|
||||||
|
CMPQ BX, $4
|
||||||
|
JB tail_out_3
|
||||||
|
JE tail_out_4
|
||||||
|
CMPQ BX, $6
|
||||||
|
JB tail_out_5
|
||||||
|
JE tail_out_6
|
||||||
|
CMPQ BX, $8
|
||||||
|
JB tail_out_7
|
||||||
|
tail_out_8:
|
||||||
|
MOVOU X1, (DI)
|
||||||
|
SUBQ $8, BX
|
||||||
|
JZ ret
|
||||||
|
ADDQ $8, SI
|
||||||
|
ADDQ $16, DI
|
||||||
|
JMP tail
|
||||||
|
tail_out_7:
|
||||||
|
PEXTRB $13, X1, 13(DI)
|
||||||
|
PEXTRB $12, X1, 12(DI)
|
||||||
|
tail_out_6:
|
||||||
|
PEXTRB $11, X1, 11(DI)
|
||||||
|
PEXTRB $10, X1, 10(DI)
|
||||||
|
tail_out_5:
|
||||||
|
PEXTRB $9, X1, 9(DI)
|
||||||
|
PEXTRB $8, X1, 8(DI)
|
||||||
|
tail_out_4:
|
||||||
|
MOVQ X1, (DI)
|
||||||
|
RET
|
||||||
|
tail_out_3:
|
||||||
|
PEXTRB $5, X1, 5(DI)
|
||||||
|
PEXTRB $4, X1, 4(DI)
|
||||||
|
tail_out_2:
|
||||||
|
PEXTRB $3, X1, 3(DI)
|
||||||
|
PEXTRB $2, X1, 2(DI)
|
||||||
|
tail_out_1:
|
||||||
|
PEXTRB $1, X1, 1(DI)
|
||||||
|
PEXTRB $0, X1, (DI)
|
||||||
|
ret:
|
||||||
|
RET
|
@ -0,0 +1,36 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64 gccgo appengine
|
||||||
|
|
||||||
|
package hex
|
||||||
|
|
||||||
|
// RawEncode encodes src into EncodedLen(len(src))
|
||||||
|
// bytes of dst. As a convenience, it returns the number
|
||||||
|
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||||
|
// RawEncode implements hexadecimal encoding for a given alphabet.
|
||||||
|
func RawEncode(dst, src, alpha []byte) int {
|
||||||
|
if len(alpha) != 16 {
|
||||||
|
panic("invalid alphabet")
|
||||||
|
}
|
||||||
|
|
||||||
|
encodeGeneric(dst, src, alpha)
|
||||||
|
return len(src) * 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
|
||||||
|
// number of bytes written to dst.
|
||||||
|
//
|
||||||
|
// If Decode encounters invalid input, it returns an error describing the failure.
|
||||||
|
func Decode(dst, src []byte) (int, error) {
|
||||||
|
if len(src)%2 == 1 {
|
||||||
|
return 0, errLength
|
||||||
|
}
|
||||||
|
|
||||||
|
if n, ok := decodeGeneric(dst, src); !ok {
|
||||||
|
return 0, InvalidByteError(src[n])
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(src) / 2, nil
|
||||||
|
}
|
@ -0,0 +1,3 @@
|
|||||||
|
# Patterns for files created by this project.
|
||||||
|
# For other files, use global gitignore.
|
||||||
|
*.s3db
|
@ -0,0 +1,6 @@
|
|||||||
|
trailingComma: all
|
||||||
|
tabWidth: 2
|
||||||
|
semi: false
|
||||||
|
singleQuote: true
|
||||||
|
proseWrap: always
|
||||||
|
printWidth: 100
|
@ -0,0 +1,687 @@
|
|||||||
|
## [1.1.12](https://github.com/uptrace/bun/compare/v1.1.11...v1.1.12) (2023-02-20)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.11](https://github.com/uptrace/bun/compare/v1.1.10...v1.1.11) (2023-02-01)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* add support for inserting values with unicode encoding for mssql dialect ([e98c6c0](https://github.com/uptrace/bun/commit/e98c6c0f033b553bea3bbc783aa56c2eaa17718f))
|
||||||
|
* fix relation tag ([a3eedff](https://github.com/uptrace/bun/commit/a3eedff49700490d4998dcdcdc04f554d8f17166))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.10](https://github.com/uptrace/bun/compare/v1.1.9...v1.1.10) (2023-01-16)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* allow QueryEvent to better detect operations in raw queries ([8e44735](https://github.com/uptrace/bun/commit/8e4473538364bae6562055d35e94c3e9c0b77691))
|
||||||
|
* append default VARCHAR length instead of hardcoding it in the type definition ([e5079c7](https://github.com/uptrace/bun/commit/e5079c70343ba8c8b410aed23ac1d1ae5a2c9ff6))
|
||||||
|
* prevent panic when use pg array with custom database type ([67e4412](https://github.com/uptrace/bun/commit/67e4412a972a9ed5f3a1d07c66957beedbc8a8a3))
|
||||||
|
* properly return sql.ErrNoRows when scanning []byte ([996fead](https://github.com/uptrace/bun/commit/996fead2595fbcaff4878b77befe6709a54b3a4d))
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* mssql output support for update or delete query ([#718](https://github.com/uptrace/bun/issues/718)) ([08876b4](https://github.com/uptrace/bun/commit/08876b4d420e761cbfa658aa6bb89b3f7c62c240))
|
||||||
|
* add Err method to query builder ([c722c90](https://github.com/uptrace/bun/commit/c722c90f3dce2642ca4f4c2ab3f9a35cd496b557))
|
||||||
|
* add support for time.Time array in Postgres ([3dd6f3b](https://github.com/uptrace/bun/commit/3dd6f3b2ac1bfbcda08240dc1676647b61715a9c))
|
||||||
|
* mssql and pg merge query ([#723](https://github.com/uptrace/bun/issues/723)) ([deea764](https://github.com/uptrace/bun/commit/deea764d9380b16aad34228aa32717d10f2a4bab))
|
||||||
|
* setError on attempt to set non-positive .Varchar() ([3335e0b](https://github.com/uptrace/bun/commit/3335e0b9d6d3f424145e1f715223a0fffe773d9a))
|
||||||
|
|
||||||
|
|
||||||
|
### Reverts
|
||||||
|
|
||||||
|
* go 1.18 ([67a4488](https://github.com/uptrace/bun/commit/67a448897eaaf1ebc54d629dfd3b2509b35da352))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.9](https://github.com/uptrace/bun/compare/v1.1.8...v1.1.9) (2022-11-23)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* addng dialect override for append-bool ([#695](https://github.com/uptrace/bun/issues/695)) ([338f2f0](https://github.com/uptrace/bun/commit/338f2f04105ad89e64530db86aeb387e2ad4789e))
|
||||||
|
* don't call hooks twice for whereExists ([9057857](https://github.com/uptrace/bun/commit/90578578e717f248e4b6eb114c5b495fd8d4ed41))
|
||||||
|
* don't lock migrations when running Migrate and Rollback ([69a7354](https://github.com/uptrace/bun/commit/69a7354d987ff2ed5338c9ef5f4ce320724299ab))
|
||||||
|
* **query:** make WhereDeleted compatible with ForceDelete ([299c3fd](https://github.com/uptrace/bun/commit/299c3fd57866aaecd127a8f219c95332898475db)), closes [#673](https://github.com/uptrace/bun/issues/673)
|
||||||
|
* relation join soft delete SQL generate ([a98f4e9](https://github.com/uptrace/bun/commit/a98f4e9f2bbdbc2b81cd13aa228a1a91eb905ba2))
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* add migrate.Exec ([d368bbe](https://github.com/uptrace/bun/commit/d368bbe52bb1ee3dabf0aada190bf967eec10255))
|
||||||
|
* **update:** "skipupdate" while bulk ([1a32b2f](https://github.com/uptrace/bun/commit/1a32b2ffbd5bc9a8d8b5978dd0f16c9fb79242ee))
|
||||||
|
* **zerolog:** added zerolog hook ([9d2267d](https://github.com/uptrace/bun/commit/9d2267d414b47164ab6ceada55bf311ad548a6b0))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.8](https://github.com/uptrace/bun/compare/v1.1.7...v1.1.8) (2022-08-29)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* **bunotel:** handle option attributes ([#656](https://github.com/uptrace/bun/issues/656)) ([9f1e0bd](https://github.com/uptrace/bun/commit/9f1e0bd19fc0300f12996b3e6595f093024e06b6))
|
||||||
|
* driver.Valuer returns itself causes stackoverflow ([c9f51d3](https://github.com/uptrace/bun/commit/c9f51d3e2dabed0c29c26a4221abbc426a7206f3)), closes [#657](https://github.com/uptrace/bun/issues/657)
|
||||||
|
* **pgdriver:** return FATAL and PANIC errors immediately ([4595e38](https://github.com/uptrace/bun/commit/4595e385d3706116e47bf9dc295186ec7a2ab0f9))
|
||||||
|
* quote m2m table name fixes [#649](https://github.com/uptrace/bun/issues/649) ([61a634e](https://github.com/uptrace/bun/commit/61a634e4cd5c18df4b75f756d4b0f06ea94bc3c8))
|
||||||
|
* support multi-level embed column ([177ec4c](https://github.com/uptrace/bun/commit/177ec4c6e04f92957614ad4724bc82c422649a4b)), closes [#643](https://github.com/uptrace/bun/issues/643)
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* conditions not supporting composite in ([e5d78d4](https://github.com/uptrace/bun/commit/e5d78d464b94b78438cf275b4c35f713d129961d))
|
||||||
|
* **idb:** support raw query ([be4e688](https://github.com/uptrace/bun/commit/be4e6886ad94b4b6ca42f24f73d79a15b1ac3188))
|
||||||
|
* **migrate:** add MissingMigrations ([42567d0](https://github.com/uptrace/bun/commit/42567d052280f2c412d4796df7178915e537e6d9))
|
||||||
|
* **pgdriver:** implement database/sql/driver.SessionResetter ([bda298a](https://github.com/uptrace/bun/commit/bda298ac66305e5b00ba67d72d3973625930c6b9))
|
||||||
|
* **pgdriver:** provide access to the underlying net.Conn ([d07ea0e](https://github.com/uptrace/bun/commit/d07ea0ed1541225b5f08e59a4c87383811f7f051))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.7](https://github.com/uptrace/bun/compare/v1.1.6...v1.1.7) (2022-07-29)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* change ScanAndCount without a limit to select all rows ([de5c570](https://github.com/uptrace/bun/commit/de5c5704166563aea41a82f7863f2db88ff108e2))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.6](https://github.com/uptrace/bun/compare/v1.1.5...v1.1.6) (2022-07-10)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* bunotel add set attributes to query metrics ([dae82cc](https://github.com/uptrace/bun/commit/dae82cc0e3af49be1e474027b55c34364676985d))
|
||||||
|
* **db.ScanRows:** ensure rows.Close is called ([9ffbc6a](https://github.com/uptrace/bun/commit/9ffbc6a46e24b908742b6973f33ef8e5b17cc12b))
|
||||||
|
* merge apply ([3081849](https://github.com/uptrace/bun/commit/30818499eacddd3b1a3e749091ba6a1468125641))
|
||||||
|
* **migrate:** close conn/tx on error ([7b168ea](https://github.com/uptrace/bun/commit/7b168eabfe0f844bcbf8dc89629d04c385b9f58c))
|
||||||
|
* **migrate:** type Migration should be used as a value rather than a pointer ([fb43935](https://github.com/uptrace/bun/commit/fb4393582b49fe528800a66aac5fb1c9a6033048))
|
||||||
|
* **migrate:** type MigrationGroup should be used as a value rather than a pointer ([649da1b](https://github.com/uptrace/bun/commit/649da1b3c158060add9b61b32c289260daafa65a))
|
||||||
|
* mssql cursor pagination ([#589](https://github.com/uptrace/bun/issues/589)) ([b34ec97](https://github.com/uptrace/bun/commit/b34ec97ddda95629f73762721d60fd3e00e7e99f))
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* "skipupdate" model field tag ([#565](https://github.com/uptrace/bun/issues/565)) ([9288294](https://github.com/uptrace/bun/commit/928829482c718a0c215aa4f4adfa6f3fb3ed4302))
|
||||||
|
* add pgdriver write error to log ([5ddda3d](https://github.com/uptrace/bun/commit/5ddda3de31cd08ceee4bdea64ceae8d15eace07b))
|
||||||
|
* add query string representation ([520da7e](https://github.com/uptrace/bun/commit/520da7e1d6dbf7b06846f6b39a7f99e8753c1466))
|
||||||
|
* add relation condition with tag ([fe5bbf6](https://github.com/uptrace/bun/commit/fe5bbf64f33d25b310e5510ece7d705b9eb3bfea))
|
||||||
|
* add support for ON UPDATE and ON DELETE rules on belongs-to relationships from struct tags ([#533](https://github.com/uptrace/bun/issues/533)) ([a327b2a](https://github.com/uptrace/bun/commit/a327b2ae216abb55a705626296c0cdbf8d648697))
|
||||||
|
* add tx methods to IDB ([#587](https://github.com/uptrace/bun/issues/587)) ([feab313](https://github.com/uptrace/bun/commit/feab313c0358200b6e270ac70f4551b011ab5276))
|
||||||
|
* added raw query calls ([#596](https://github.com/uptrace/bun/issues/596)) ([127644d](https://github.com/uptrace/bun/commit/127644d2eea443736fbd6bed3417595d439e4639))
|
||||||
|
* **bunotel:** add option to enable formatting of queries ([#547](https://github.com/uptrace/bun/issues/547)) ([b9c768c](https://github.com/uptrace/bun/commit/b9c768cec3b5dea36c3c9c344d1e76e0ffad1369))
|
||||||
|
* **config.go:** add sslrootcert support to DSN parameters ([3bd5d69](https://github.com/uptrace/bun/commit/3bd5d692d7df4f30d07b835d6a46fc7af382489a))
|
||||||
|
* create an extra module for newrelic ([#599](https://github.com/uptrace/bun/issues/599)) ([6c676ce](https://github.com/uptrace/bun/commit/6c676ce13f05fe763471fbec2d5a2db48bc88650))
|
||||||
|
* **migrate:** add WithMarkAppliedOnSuccess ([31b2cc4](https://github.com/uptrace/bun/commit/31b2cc4f5ccd794a436d081073d4974835d3780d))
|
||||||
|
* **pgdialect:** add hstore support ([66b44f7](https://github.com/uptrace/bun/commit/66b44f7c0edc205927fb8be96aaf263b31828fa1))
|
||||||
|
* **pgdialect:** add identity support ([646251e](https://github.com/uptrace/bun/commit/646251ec02a1e2ec717e907e6f128d8b51f17c6d))
|
||||||
|
* **pgdriver:** expose pgdriver.ParseTime ([405a7d7](https://github.com/uptrace/bun/commit/405a7d78d8f60cf27e8f175deaf95db5877d84be))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.5](https://github.com/uptrace/bun/compare/v1.1.4...v1.1.5) (2022-05-12)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* **driver/sqliteshim:** make it work with recent version of modernc sqlite ([2360584](https://github.com/uptrace/bun/commit/23605846c20684e39bf1eaac50a2147a1b68a729))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [1.1.4](https://github.com/uptrace/bun/compare/v1.1.3...v1.1.4) (2022-04-20)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* automatically set nullzero when there is default:value option ([72c44ae](https://github.com/uptrace/bun/commit/72c44aebbeec3a83ed97ea25a3262174d744df65))
|
||||||
|
* fix ForceDelete on live/undeleted rows ([1a33250](https://github.com/uptrace/bun/commit/1a33250f27f00e752a735ce10311ac95dcb0c968))
|
||||||
|
* fix OmitZero and value overriding ([087ea07](https://github.com/uptrace/bun/commit/087ea0730551f1e841bacb6ad2fa3afd512a1df8))
|
||||||
|
* rename Query to QueryBuilder ([98d111b](https://github.com/uptrace/bun/commit/98d111b7cc00fa61b6b2cec147f43285f4baadb4))
|
||||||
|
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
* add ApplyQueryBuilder ([582eca0](https://github.com/uptrace/bun/commit/582eca09cf2b59e67c2e4a2ad24f1a74cb53addd))
|
||||||
|
* **config.go:** add connect_timeout to DSN parsable params ([998b04d](https://github.com/uptrace/bun/commit/998b04d51a9a4f182ac3458f90db8dbf9185c4ba)), closes [#505](https://github.com/uptrace/bun/issues/505)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# [1.1.3](https://github.com/uptrace/bun/compare/v1.1.2...v) (2022-03-29)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- fix panic message when has-many encounter an error
|
||||||
|
([cfd2747](https://github.com/uptrace/bun/commit/cfd27475fac89a1c8cf798bfa64898bd77bbba79))
|
||||||
|
- **migrate:** change rollback to match migrate behavior
|
||||||
|
([df5af9c](https://github.com/uptrace/bun/commit/df5af9c9cbdf54ce243e037bbb2c7b154f8422b3))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- added QueryBuilder interface for SelectQuery, UpdateQuery, DeleteQuery
|
||||||
|
([#499](https://github.com/uptrace/bun/issues/499))
|
||||||
|
([59fef48](https://github.com/uptrace/bun/commit/59fef48f6b3ec7f32bdda779b6693c333ff1dfdb))
|
||||||
|
|
||||||
|
# [1.1.2](https://github.com/uptrace/bun/compare/v1.1.2...v) (2022-03-22)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- correctly handle bun.In([][]byte{...})
|
||||||
|
([800616e](https://github.com/uptrace/bun/commit/800616ed28ca600ad676319a10adb970b2b4daf6))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- accept extend option to allow extending existing models
|
||||||
|
([48b80e4](https://github.com/uptrace/bun/commit/48b80e4f7e3ed8a28fd305f7853ebe7ab984a497))
|
||||||
|
|
||||||
|
# [1.1.0](https://github.com/uptrace/bun/compare/v1.1.0-beta.1...v1.1.0) (2022-02-28)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- Added [MSSQL](https://bun.uptrace.dev/guide/drivers.html#mssql) support as a 4th fully supported
|
||||||
|
DBMS.
|
||||||
|
- Added `SetColumn("col_name", "upper(?)", "hello")` in addition to
|
||||||
|
`Set("col_name = upper(?)", "hello")` which works for all 4 supported DBMS.
|
||||||
|
|
||||||
|
* improve nil ptr values handling
|
||||||
|
([b398e6b](https://github.com/uptrace/bun/commit/b398e6bea840ea2fd3e001b7879c0b00b6dcd6f7))
|
||||||
|
|
||||||
|
### Breaking changes
|
||||||
|
|
||||||
|
- Bun no longer automatically marks some fields like `ID int64` as `pk` and `autoincrement`. You
|
||||||
|
need to manually add those options:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type Model struct {
|
||||||
|
- ID int64
|
||||||
|
+ ID int64 `bun:",pk,autoincrement"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Bun [v1.0.25](#1024-2022-02-22) prints warnings for models with missing options so you are
|
||||||
|
recommended to upgrade to v1.0.24 before upgrading to v1.1.x.
|
||||||
|
|
||||||
|
- Also, Bun no longer adds `nullzero` option to `soft_delete` fields.
|
||||||
|
|
||||||
|
- Removed `nopk` and `allowzero` options.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- append slice values
|
||||||
|
([4a65129](https://github.com/uptrace/bun/commit/4a651294fb0f1e73079553024810c3ead9777311))
|
||||||
|
- check for nils when appeding driver.Value
|
||||||
|
([7bb1640](https://github.com/uptrace/bun/commit/7bb1640a00fceca1e1075fe6544b9a4842ab2b26))
|
||||||
|
- cleanup soft deletes for mssql
|
||||||
|
([e72e2c5](https://github.com/uptrace/bun/commit/e72e2c5d0a85f3d26c3fa22c7284c2de1dcfda8e))
|
||||||
|
- **dbfixture:** apply cascade option. Fixes [#447](https://github.com/uptrace/bun/issues/447)
|
||||||
|
([d32d988](https://github.com/uptrace/bun/commit/d32d98840bc23e74c836f8192cb4bc9529aa9233))
|
||||||
|
- create table WithForeignKey() and has-many relation
|
||||||
|
([3cf5649](https://github.com/uptrace/bun/commit/3cf56491706b5652c383dbe007ff2389ad64922e))
|
||||||
|
- do not emit m2m relations in WithForeignKeys()
|
||||||
|
([56c8c5e](https://github.com/uptrace/bun/commit/56c8c5ed44c0d6d734c3d3161c642ce8437e2248))
|
||||||
|
- accept dest in select queries
|
||||||
|
([33b5b6f](https://github.com/uptrace/bun/commit/33b5b6ff660b77238a737a543ca12675c7f0c284))
|
||||||
|
|
||||||
|
## [1.0.25](https://github.com/uptrace/bun/compare/v1.0.23...v1.0.25) (2022-02-22)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
### Deprecated
|
||||||
|
|
||||||
|
In the comming v1.1.x release, Bun will stop automatically adding `,pk,autoincrement` options on
|
||||||
|
`ID int64/int32` fields. This version (v1.0.23) only prints a warning when it encounters such
|
||||||
|
fields, but the code will continue working as before.
|
||||||
|
|
||||||
|
To fix warnings, add missing options:
|
||||||
|
|
||||||
|
```diff
|
||||||
|
type Model struct {
|
||||||
|
- ID int64
|
||||||
|
+ ID int64 `bun:",pk,autoincrement"`
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
To silence warnings:
|
||||||
|
|
||||||
|
```go
|
||||||
|
bun.SetWarnLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||||
|
```
|
||||||
|
|
||||||
|
Bun will also print a warning on [soft delete](https://bun.uptrace.dev/guide/soft-deletes.html)
|
||||||
|
fields without a `,nullzero` option. You can fix the warning by adding missing `,nullzero` or
|
||||||
|
`,allowzero` options.
|
||||||
|
|
||||||
|
In v1.1.x, such options as `,nopk` and `,allowzero` will not be necessary and will be removed.
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- fix missing autoincrement warning
|
||||||
|
([3bc9c72](https://github.com/uptrace/bun/commit/3bc9c721e1c1c5104c256a0c01c4525df6ecefc2))
|
||||||
|
|
||||||
|
* append slice values
|
||||||
|
([4a65129](https://github.com/uptrace/bun/commit/4a651294fb0f1e73079553024810c3ead9777311))
|
||||||
|
* don't automatically set pk, nullzero, and autoincrement options
|
||||||
|
([519a0df](https://github.com/uptrace/bun/commit/519a0df9707de01a418aba0d6b7482cfe4c9a532))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add CreateTableQuery.DetectForeignKeys
|
||||||
|
([a958fcb](https://github.com/uptrace/bun/commit/a958fcbab680b0c5ad7980f369c7b73f7673db87))
|
||||||
|
|
||||||
|
## [1.0.22](https://github.com/uptrace/bun/compare/v1.0.21...v1.0.22) (2022-01-28)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- improve scan error message
|
||||||
|
([54048b2](https://github.com/uptrace/bun/commit/54048b296b9648fd62107ce6fa6fd7e6e2a648c7))
|
||||||
|
- properly discover json.Marshaler on ptr field
|
||||||
|
([3b321b0](https://github.com/uptrace/bun/commit/3b321b08601c4b8dc6bcaa24adea20875883ac14))
|
||||||
|
|
||||||
|
### Breaking (MySQL, MariaDB)
|
||||||
|
|
||||||
|
- **insert:** get last insert id only with pk support auto increment
|
||||||
|
([79e7c79](https://github.com/uptrace/bun/commit/79e7c797beea54bfc9dc1cb0141a7520ff941b4d)). Make
|
||||||
|
sure your MySQL models have `bun:",pk,autoincrement"` options if you are using autoincrements.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- refuse to start when version check does not pass
|
||||||
|
([ff8d767](https://github.com/uptrace/bun/commit/ff8d76794894eeaebede840e5199720f3f5cf531))
|
||||||
|
- support Column in ValuesQuery
|
||||||
|
([0707679](https://github.com/uptrace/bun/commit/0707679b075cac57efa8e6fe9019b57b2da4bcc7))
|
||||||
|
|
||||||
|
## [1.0.21](https://github.com/uptrace/bun/compare/v1.0.20...v1.0.21) (2022-01-06)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- append where to index create
|
||||||
|
([1de6cea](https://github.com/uptrace/bun/commit/1de6ceaa8bba59b69fbe0cc6916d1b27da5586d8))
|
||||||
|
- check if slice is nil when calling BeforeAppendModel
|
||||||
|
([938d9da](https://github.com/uptrace/bun/commit/938d9dadb72ceeeb906064d9575278929d20cbbe))
|
||||||
|
- **dbfixture:** directly set matching types via reflect
|
||||||
|
([780504c](https://github.com/uptrace/bun/commit/780504cf1da687fc51a22d002ea66e2ccc41e1a3))
|
||||||
|
- properly handle driver.Valuer and type:json
|
||||||
|
([a17454a](https://github.com/uptrace/bun/commit/a17454ac6b95b2a2e927d0c4e4aee96494108389))
|
||||||
|
- support scanning string into uint64
|
||||||
|
([73cc117](https://github.com/uptrace/bun/commit/73cc117a9f7a623ced1fdaedb4546e8e7470e4d3))
|
||||||
|
- unique module name for opentelemetry example
|
||||||
|
([f2054fe](https://github.com/uptrace/bun/commit/f2054fe1d11cea3b21d69dab6f6d6d7d97ba06bb))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add anonymous fields with type name
|
||||||
|
([508375b](https://github.com/uptrace/bun/commit/508375b8f2396cb088fd4399a9259584353eb7e5))
|
||||||
|
- add baseQuery.GetConn()
|
||||||
|
([81a9bee](https://github.com/uptrace/bun/commit/81a9beecb74fed7ec3574a1d42acdf10a74e0b00))
|
||||||
|
- create new queries from baseQuery
|
||||||
|
([ae1dd61](https://github.com/uptrace/bun/commit/ae1dd611a91c2b7c79bc2bc12e9a53e857791e71))
|
||||||
|
- support INSERT ... RETURNING for MariaDB >= 10.5.0
|
||||||
|
([b6531c0](https://github.com/uptrace/bun/commit/b6531c00ecbd4c7ec56b4131fab213f9313edc1b))
|
||||||
|
|
||||||
|
## [1.0.20](https://github.com/uptrace/bun/compare/v1.0.19...v1.0.20) (2021-12-19)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- add Event.QueryTemplate and change Event.Query to be always formatted
|
||||||
|
([52b1ccd](https://github.com/uptrace/bun/commit/52b1ccdf3578418aa427adef9dcf942d90ae4fdd))
|
||||||
|
- change GetTableName to return formatted table name in case ModelTableExpr
|
||||||
|
([95144dd](https://github.com/uptrace/bun/commit/95144dde937b4ac88b36b0bd8b01372421069b44))
|
||||||
|
- change ScanAndCount to work with transactions
|
||||||
|
([5b3f2c0](https://github.com/uptrace/bun/commit/5b3f2c021c424da366caffd33589e8adde821403))
|
||||||
|
- **dbfixture:** directly call funcs bypassing template eval
|
||||||
|
([a61974b](https://github.com/uptrace/bun/commit/a61974ba2d24361c5357fb9bda1f3eceec5a45cd))
|
||||||
|
- don't append CASCADE by default in drop table/column queries
|
||||||
|
([26457ea](https://github.com/uptrace/bun/commit/26457ea5cb20862d232e6e5fa4dbdeac5d444bf1))
|
||||||
|
- **migrate:** mark migrations as applied on error so the migration can be rolled back
|
||||||
|
([8ce33fb](https://github.com/uptrace/bun/commit/8ce33fbbac8e33077c20daf19a14c5ff2291bcae))
|
||||||
|
- respect nullzero when appending struct fields. Fixes
|
||||||
|
[#339](https://github.com/uptrace/bun/issues/339)
|
||||||
|
([ffd02f3](https://github.com/uptrace/bun/commit/ffd02f3170b3cccdd670a48d563cfb41094c05d6))
|
||||||
|
- reuse tx for relation join ([#366](https://github.com/uptrace/bun/issues/366))
|
||||||
|
([60bdb1a](https://github.com/uptrace/bun/commit/60bdb1ac84c0a699429eead3b7fdfbf14fe69ac6))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add `Dialect()` to Transaction and IDB interface
|
||||||
|
([693f1e1](https://github.com/uptrace/bun/commit/693f1e135999fc31cf83b99a2530a695b20f4e1b))
|
||||||
|
- add model embedding via embed:prefix\_
|
||||||
|
([9a2cedc](https://github.com/uptrace/bun/commit/9a2cedc8b08fa8585d4bfced338bd0a40d736b1d))
|
||||||
|
- change the default logoutput to stderr
|
||||||
|
([4bf5773](https://github.com/uptrace/bun/commit/4bf577382f19c64457cbf0d64490401450954654)),
|
||||||
|
closes [#349](https://github.com/uptrace/bun/issues/349)
|
||||||
|
|
||||||
|
## [1.0.19](https://github.com/uptrace/bun/compare/v1.0.18...v1.0.19) (2021-11-30)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add support for column:name to specify column name
|
||||||
|
([e37b460](https://github.com/uptrace/bun/commit/e37b4602823babc8221970e086cfed90c6ad4cf4))
|
||||||
|
|
||||||
|
## [1.0.18](https://github.com/uptrace/bun/compare/v1.0.17...v1.0.18) (2021-11-24)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- use correct operation for UpdateQuery
|
||||||
|
([687a004](https://github.com/uptrace/bun/commit/687a004ef7ec6fe1ef06c394965dd2c2d822fc82))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add pgdriver.Notify
|
||||||
|
([7ee443d](https://github.com/uptrace/bun/commit/7ee443d1b869d8ddc4746850f7425d0a9ccd012b))
|
||||||
|
- CreateTableQuery.PartitionBy and CreateTableQuery.TableSpace
|
||||||
|
([cd3ab4d](https://github.com/uptrace/bun/commit/cd3ab4d8f3682f5a30b87c2ebc2d7e551d739078))
|
||||||
|
- **pgdriver:** add CopyFrom and CopyTo
|
||||||
|
([0b97703](https://github.com/uptrace/bun/commit/0b977030b5c05f509e11d13550b5f99dfd62358d))
|
||||||
|
- support InsertQuery.Ignore on PostgreSQL
|
||||||
|
([1aa9d14](https://github.com/uptrace/bun/commit/1aa9d149da8e46e63ff79192e394fde4d18d9b60))
|
||||||
|
|
||||||
|
## [1.0.17](https://github.com/uptrace/bun/compare/v1.0.16...v1.0.17) (2021-11-11)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- don't call rollback when tx is already done
|
||||||
|
([8246c2a](https://github.com/uptrace/bun/commit/8246c2a63e2e6eba314201c6ba87f094edf098b9))
|
||||||
|
- **mysql:** escape backslash char in strings
|
||||||
|
([fb32029](https://github.com/uptrace/bun/commit/fb32029ea7604d066800b16df21f239b71bf121d))
|
||||||
|
|
||||||
|
## [1.0.16](https://github.com/uptrace/bun/compare/v1.0.15...v1.0.16) (2021-11-07)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- call query hook when tx is started, committed, or rolled back
|
||||||
|
([30e85b5](https://github.com/uptrace/bun/commit/30e85b5366b2e51951ef17a0cf362b58f708dab1))
|
||||||
|
- **pgdialect:** auto-enable array support if the sql type is an array
|
||||||
|
([62c1012](https://github.com/uptrace/bun/commit/62c1012b2482e83969e5c6f5faf89e655ce78138))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- support multiple tag options join:left_col1=right_col1,join:left_col2=right_col2
|
||||||
|
([78cd5aa](https://github.com/uptrace/bun/commit/78cd5aa60a5c7d1323bb89081db2b2b811113052))
|
||||||
|
- **tag:** log with bad tag name
|
||||||
|
([4e82d75](https://github.com/uptrace/bun/commit/4e82d75be2dabdba1a510df4e1fbb86092f92f4c))
|
||||||
|
|
||||||
|
## [1.0.15](https://github.com/uptrace/bun/compare/v1.0.14...v1.0.15) (2021-10-29)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- fixed bug creating table when model has no columns
|
||||||
|
([042c50b](https://github.com/uptrace/bun/commit/042c50bfe41caaa6e279e02c887c3a84a3acd84f))
|
||||||
|
- init table with dialect once
|
||||||
|
([9a1ce1e](https://github.com/uptrace/bun/commit/9a1ce1e492602742bb2f587e9ed24e50d7d07cad))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- accept columns in WherePK
|
||||||
|
([b3e7035](https://github.com/uptrace/bun/commit/b3e70356db1aa4891115a10902316090fccbc8bf))
|
||||||
|
- support ADD COLUMN IF NOT EXISTS
|
||||||
|
([ca7357c](https://github.com/uptrace/bun/commit/ca7357cdfe283e2f0b94eb638372e18401c486e9))
|
||||||
|
|
||||||
|
## [1.0.14](https://github.com/uptrace/bun/compare/v1.0.13...v1.0.14) (2021-10-24)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- correct binary serialization for mysql ([#259](https://github.com/uptrace/bun/issues/259))
|
||||||
|
([e899f50](https://github.com/uptrace/bun/commit/e899f50b22ef6759ef8c029a6cd3f25f2bde17ef))
|
||||||
|
- correctly escape single quotes in pg arrays
|
||||||
|
([3010847](https://github.com/uptrace/bun/commit/3010847f5c2c50bce1969689a0b77fd8a6fb7e55))
|
||||||
|
- use BLOB sql type to encode []byte in MySQL and SQLite
|
||||||
|
([725ec88](https://github.com/uptrace/bun/commit/725ec8843824a7fc8f4058ead75ab0e62a78192a))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- warn when there are args but no placeholders
|
||||||
|
([06dde21](https://github.com/uptrace/bun/commit/06dde215c8d0bde2b2364597190729a160e536a1))
|
||||||
|
|
||||||
|
## [1.0.13](https://github.com/uptrace/bun/compare/v1.0.12...v1.0.13) (2021-10-17)
|
||||||
|
|
||||||
|
### Breaking Change
|
||||||
|
|
||||||
|
- **pgdriver:** enable TLS by default with InsecureSkipVerify=true
|
||||||
|
([15ec635](https://github.com/uptrace/bun/commit/15ec6356a04d5cf62d2efbeb189610532dc5eb31))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add BeforeAppendModelHook
|
||||||
|
([0b55de7](https://github.com/uptrace/bun/commit/0b55de77aaffc1ed0894ef16f45df77bca7d93c1))
|
||||||
|
- **pgdriver:** add support for unix socket DSN
|
||||||
|
([f398cec](https://github.com/uptrace/bun/commit/f398cec1c3873efdf61ac0b94ebe06c657f0cf91))
|
||||||
|
|
||||||
|
## [1.0.12](https://github.com/uptrace/bun/compare/v1.0.11...v1.0.12) (2021-10-14)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- add InsertQuery.ColumnExpr to specify columns
|
||||||
|
([60ffe29](https://github.com/uptrace/bun/commit/60ffe293b37912d95f28e69734ff51edf4b27da7))
|
||||||
|
- **bundebug:** change WithVerbose to accept a bool flag
|
||||||
|
([b2f8b91](https://github.com/uptrace/bun/commit/b2f8b912de1dc29f40c79066de1e9d6379db666c))
|
||||||
|
- **pgdialect:** fix bytea[] handling
|
||||||
|
([a5ca013](https://github.com/uptrace/bun/commit/a5ca013742c5a2e947b43d13f9c2fc0cf6a65d9c))
|
||||||
|
- **pgdriver:** rename DriverOption to Option
|
||||||
|
([51c1702](https://github.com/uptrace/bun/commit/51c1702431787d7369904b2624e346bf3e59c330))
|
||||||
|
- support allowzero on the soft delete field
|
||||||
|
([d0abec7](https://github.com/uptrace/bun/commit/d0abec71a9a546472a83bd70ed4e6a7357659a9b))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- **bundebug:** allow to configure the hook using env var, for example, BUNDEBUG={0,1,2}
|
||||||
|
([ce92852](https://github.com/uptrace/bun/commit/ce928524cab9a83395f3772ae9dd5d7732af281d))
|
||||||
|
- **bunotel:** report DBStats metrics
|
||||||
|
([b9b1575](https://github.com/uptrace/bun/commit/b9b15750f405cdbd345b776f5a56c6f742bc7361))
|
||||||
|
- **pgdriver:** add Error.StatementTimeout
|
||||||
|
([8a7934d](https://github.com/uptrace/bun/commit/8a7934dd788057828bb2b0983732b4394b74e960))
|
||||||
|
- **pgdriver:** allow setting Network in config
|
||||||
|
([b24b5d8](https://github.com/uptrace/bun/commit/b24b5d8014195a56ad7a4c634c10681038e6044d))
|
||||||
|
|
||||||
|
## [1.0.11](https://github.com/uptrace/bun/compare/v1.0.10...v1.0.11) (2021-10-05)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- **mysqldialect:** remove duplicate AppendTime
|
||||||
|
([8d42090](https://github.com/uptrace/bun/commit/8d42090af34a1760004482c7fc0923b114d79937))
|
||||||
|
|
||||||
|
## [1.0.10](https://github.com/uptrace/bun/compare/v1.0.9...v1.0.10) (2021-10-05)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- add UpdateQuery.OmitZero
|
||||||
|
([2294db6](https://github.com/uptrace/bun/commit/2294db61d228711435fff1075409a30086b37555))
|
||||||
|
- make ExcludeColumn work with many-to-many queries
|
||||||
|
([300e12b](https://github.com/uptrace/bun/commit/300e12b993554ff839ec4fa6bbea97e16aca1b55))
|
||||||
|
- **mysqldialect:** append time in local timezone
|
||||||
|
([e763cc8](https://github.com/uptrace/bun/commit/e763cc81eac4b11fff4e074ad3ff6cd970a71697))
|
||||||
|
- **tagparser:** improve parsing options with brackets
|
||||||
|
([0daa61e](https://github.com/uptrace/bun/commit/0daa61edc3c4d927ed260332b99ee09f4bb6b42f))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add timetz parsing
|
||||||
|
([6e415c4](https://github.com/uptrace/bun/commit/6e415c4c5fa2c8caf4bb4aed4e5897fe5676f5a5))
|
||||||
|
|
||||||
|
## [1.0.9](https://github.com/uptrace/bun/compare/v1.0.8...v1.0.9) (2021-09-27)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- change DBStats to use uint32 instead of uint64 to make it work on i386
|
||||||
|
([caca2a7](https://github.com/uptrace/bun/commit/caca2a7130288dec49fa26b49c8550140ee52f4c))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add IQuery and QueryEvent.IQuery
|
||||||
|
([b762942](https://github.com/uptrace/bun/commit/b762942fa3b1d8686d0a559f93f2a6847b83d9c1))
|
||||||
|
- add QueryEvent.Model
|
||||||
|
([7688201](https://github.com/uptrace/bun/commit/7688201b485d14d3e393956f09a3200ea4d4e31d))
|
||||||
|
- **bunotel:** add experimental bun.query.timing metric
|
||||||
|
([2cdb384](https://github.com/uptrace/bun/commit/2cdb384678631ccadac0fb75f524bd5e91e96ee2))
|
||||||
|
- **pgdriver:** add Config.ConnParams to session config params
|
||||||
|
([408caf0](https://github.com/uptrace/bun/commit/408caf0bb579e23e26fc6149efd6851814c22517))
|
||||||
|
- **pgdriver:** allow specifying timeout in DSN
|
||||||
|
([7dbc71b](https://github.com/uptrace/bun/commit/7dbc71b3494caddc2e97d113f00067071b9e19da))
|
||||||
|
|
||||||
|
## [1.0.8](https://github.com/uptrace/bun/compare/v1.0.7...v1.0.8) (2021-09-18)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- don't append soft delete where for insert queries with on conflict clause
|
||||||
|
([27c477c](https://github.com/uptrace/bun/commit/27c477ce071d4c49c99a2531d638ed9f20e33461))
|
||||||
|
- improve bun.NullTime to accept string
|
||||||
|
([73ad6f5](https://github.com/uptrace/bun/commit/73ad6f5640a0a9b09f8df2bc4ab9cb510021c50c))
|
||||||
|
- make allowzero work with auto-detected primary keys
|
||||||
|
([82ca87c](https://github.com/uptrace/bun/commit/82ca87c7c49797d507b31fdaacf8343716d4feff))
|
||||||
|
- support soft deletes on nil model
|
||||||
|
([0556e3c](https://github.com/uptrace/bun/commit/0556e3c63692a7f4e48659d52b55ffd9cca0202a))
|
||||||
|
|
||||||
|
## [1.0.7](https://github.com/uptrace/bun/compare/v1.0.6...v1.0.7) (2021-09-15)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- don't append zero time as NULL without nullzero tag
|
||||||
|
([3b8d9cb](https://github.com/uptrace/bun/commit/3b8d9cb4e39eb17f79a618396bbbe0adbc66b07b))
|
||||||
|
- **pgdriver:** return PostgreSQL DATE as a string
|
||||||
|
([40be0e8](https://github.com/uptrace/bun/commit/40be0e8ea85f8932b7a410a6fc2dd3acd2d18ebc))
|
||||||
|
- specify table alias for soft delete where
|
||||||
|
([5fff1dc](https://github.com/uptrace/bun/commit/5fff1dc1dd74fa48623a24fa79e358a544dfac0b))
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- add SelectQuery.Exists helper
|
||||||
|
([c3e59c1](https://github.com/uptrace/bun/commit/c3e59c1bc58b43c4b8e33e7d170ad33a08fbc3c7))
|
||||||
|
|
||||||
|
## [1.0.6](https://github.com/uptrace/bun/compare/v1.0.5...v1.0.6) (2021-09-11)
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
- change unique tag to create a separate unique constraint
|
||||||
|
([8401615](https://github.com/uptrace/bun/commit/84016155a77ca77613cc054277fefadae3098757))
|
||||||
|
- improve zero checker for ptr values
|
||||||
|
([2b3623d](https://github.com/uptrace/bun/commit/2b3623dd665d873911fd20ca707016929921e862))
|
||||||
|
|
||||||
|
## v1.0.5 - Sep 09 2021
|
||||||
|
|
||||||
|
- chore: tweak bundebug colors
|
||||||
|
- fix: check if table is present when appending columns
|
||||||
|
- fix: copy []byte when scanning
|
||||||
|
|
||||||
|
## v1.0.4 - Sep 08 2021
|
||||||
|
|
||||||
|
- Added support for MariaDB.
|
||||||
|
- Restored default `SET` for `ON CONFLICT DO UPDATE` queries.
|
||||||
|
|
||||||
|
## v1.0.3 - Sep 06 2021
|
||||||
|
|
||||||
|
- Fixed bulk soft deletes.
|
||||||
|
- pgdialect: fixed scanning into an array pointer.
|
||||||
|
|
||||||
|
## v1.0.2 - Sep 04 2021
|
||||||
|
|
||||||
|
- Changed to completely ignore fields marked with `bun:"-"`. If you want to be able to scan into
|
||||||
|
such columns, use `bun:",scanonly"`.
|
||||||
|
- pgdriver: fixed SASL authentication handling.
|
||||||
|
|
||||||
|
## v1.0.1 - Sep 02 2021
|
||||||
|
|
||||||
|
- pgdriver: added erroneous zero writes retry.
|
||||||
|
- Improved column handling in Relation callback.
|
||||||
|
|
||||||
|
## v1.0.0 - Sep 01 2021
|
||||||
|
|
||||||
|
- First stable release.
|
||||||
|
|
||||||
|
## v0.4.1 - Aug 18 2021
|
||||||
|
|
||||||
|
- Fixed migrate package to properly rollback migrations.
|
||||||
|
- Added `allowzero` tag option that undoes `nullzero` option.
|
||||||
|
|
||||||
|
## v0.4.0 - Aug 11 2021
|
||||||
|
|
||||||
|
- Changed `WhereGroup` function to accept `*SelectQuery`.
|
||||||
|
- Fixed query hooks for count queries.
|
||||||
|
|
||||||
|
## v0.3.4 - Jul 19 2021
|
||||||
|
|
||||||
|
- Renamed `migrate.CreateGo` to `CreateGoMigration`.
|
||||||
|
- Added `migrate.WithPackageName` to customize the Go package name in generated migrations.
|
||||||
|
- Renamed `migrate.CreateSQL` to `CreateSQLMigrations` and changed `CreateSQLMigrations` to create
|
||||||
|
both up and down migration files.
|
||||||
|
|
||||||
|
## v0.3.1 - Jul 12 2021
|
||||||
|
|
||||||
|
- Renamed `alias` field struct tag to `alt` so it is not confused with column alias.
|
||||||
|
- Reworked migrate package API. See
|
||||||
|
[migrate](https://github.com/uptrace/bun/tree/master/example/migrate) example for details.
|
||||||
|
|
||||||
|
## v0.3.0 - Jul 09 2021
|
||||||
|
|
||||||
|
- Changed migrate package to return structured data instead of logging the progress. See
|
||||||
|
[migrate](https://github.com/uptrace/bun/tree/master/example/migrate) example for details.
|
||||||
|
|
||||||
|
## v0.2.14 - Jul 01 2021
|
||||||
|
|
||||||
|
- Added [sqliteshim](https://pkg.go.dev/github.com/uptrace/bun/driver/sqliteshim) by
|
||||||
|
[Ivan Trubach](https://github.com/tie).
|
||||||
|
- Added support for MySQL 5.7 in addition to MySQL 8.
|
||||||
|
|
||||||
|
## v0.2.12 - Jun 29 2021
|
||||||
|
|
||||||
|
- Fixed scanners for net.IP and net.IPNet.
|
||||||
|
|
||||||
|
## v0.2.10 - Jun 29 2021
|
||||||
|
|
||||||
|
- Fixed pgdriver to format passed query args.
|
||||||
|
|
||||||
|
## v0.2.9 - Jun 27 2021
|
||||||
|
|
||||||
|
- Added support for prepared statements in pgdriver.
|
||||||
|
|
||||||
|
## v0.2.7 - Jun 26 2021
|
||||||
|
|
||||||
|
- Added `UpdateQuery.Bulk` helper to generate bulk-update queries.
|
||||||
|
|
||||||
|
Before:
|
||||||
|
|
||||||
|
```go
|
||||||
|
models := []Model{
|
||||||
|
{42, "hello"},
|
||||||
|
{43, "world"},
|
||||||
|
}
|
||||||
|
return db.NewUpdate().
|
||||||
|
With("_data", db.NewValues(&models)).
|
||||||
|
Model(&models).
|
||||||
|
Table("_data").
|
||||||
|
Set("model.str = _data.str").
|
||||||
|
Where("model.id = _data.id")
|
||||||
|
```
|
||||||
|
|
||||||
|
Now:
|
||||||
|
|
||||||
|
```go
|
||||||
|
db.NewUpdate().
|
||||||
|
Model(&models).
|
||||||
|
Bulk()
|
||||||
|
```
|
||||||
|
|
||||||
|
## v0.2.5 - Jun 25 2021
|
||||||
|
|
||||||
|
- Changed time.Time to always append zero time as `NULL`.
|
||||||
|
- Added `db.RunInTx` helper.
|
||||||
|
|
||||||
|
## v0.2.4 - Jun 21 2021
|
||||||
|
|
||||||
|
- Added SSL support to pgdriver.
|
||||||
|
|
||||||
|
## v0.2.3 - Jun 20 2021
|
||||||
|
|
||||||
|
- Replaced `ForceDelete(ctx)` with `ForceDelete().Exec(ctx)` for soft deletes.
|
||||||
|
|
||||||
|
## v0.2.1 - Jun 17 2021
|
||||||
|
|
||||||
|
- Renamed `DBI` to `IConn`. `IConn` is a common interface for `*sql.DB`, `*sql.Conn`, and `*sql.Tx`.
|
||||||
|
- Added `IDB`. `IDB` is a common interface for `*bun.DB`, `bun.Conn`, and `bun.Tx`.
|
||||||
|
|
||||||
|
## v0.2.0 - Jun 16 2021
|
||||||
|
|
||||||
|
- Changed [model hooks](https://bun.uptrace.dev/guide/hooks.html#model-hooks). See
|
||||||
|
[model-hooks](example/model-hooks) example.
|
||||||
|
- Renamed `has-one` to `belongs-to`. Renamed `belongs-to` to `has-one`. Previously Bun used
|
||||||
|
incorrect names for these relations.
|
@ -0,0 +1,34 @@
|
|||||||
|
## Running tests
|
||||||
|
|
||||||
|
To run tests, you need Docker which starts PostgreSQL and MySQL servers:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
cd internal/dbtest
|
||||||
|
./test.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
To ease debugging, you can run tests and print all executed queries:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
BUNDEBUG=2 TZ= go test -run=TestName
|
||||||
|
```
|
||||||
|
|
||||||
|
## Releasing
|
||||||
|
|
||||||
|
1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
TAG=v1.0.0 ./scripts/release.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Open a pull request and wait for the build to finish.
|
||||||
|
|
||||||
|
3. Merge the pull request and run `tag.sh` to create tags for packages:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
TAG=v1.0.0 ./scripts/tag.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
To contribute to the docs visit https://github.com/go-bun/bun-docs
|
@ -0,0 +1,24 @@
|
|||||||
|
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,30 @@
|
|||||||
|
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||||
|
EXAMPLE_GO_MOD_DIRS := $(shell find ./example/ -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||||
|
|
||||||
|
test:
|
||||||
|
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||||
|
echo "go test in $${dir}"; \
|
||||||
|
(cd "$${dir}" && \
|
||||||
|
go test && \
|
||||||
|
env GOOS=linux GOARCH=386 go test && \
|
||||||
|
go vet); \
|
||||||
|
done
|
||||||
|
|
||||||
|
go_mod_tidy:
|
||||||
|
go get -u && go mod tidy -go=1.18
|
||||||
|
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||||
|
echo "go mod tidy in $${dir}"; \
|
||||||
|
(cd "$${dir}" && \
|
||||||
|
go get -u ./... && \
|
||||||
|
go mod tidy -go=1.18); \
|
||||||
|
done
|
||||||
|
|
||||||
|
fmt:
|
||||||
|
gofmt -w -s ./
|
||||||
|
goimports -w -local github.com/uptrace/bun ./
|
||||||
|
|
||||||
|
run-examples:
|
||||||
|
set -e; for dir in $(EXAMPLE_GO_MOD_DIRS); do \
|
||||||
|
echo "go run . in $${dir}"; \
|
||||||
|
(cd "$${dir}" && go run .); \
|
||||||
|
done
|
@ -0,0 +1,144 @@
|
|||||||
|
# SQL-first Golang ORM for PostgreSQL, MySQL, MSSQL, and SQLite
|
||||||
|
|
||||||
|
[![build workflow](https://github.com/uptrace/bun/actions/workflows/build.yml/badge.svg)](https://github.com/uptrace/bun/actions)
|
||||||
|
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun)](https://pkg.go.dev/github.com/uptrace/bun)
|
||||||
|
[![Documentation](https://img.shields.io/badge/bun-documentation-informational)](https://bun.uptrace.dev/)
|
||||||
|
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
|
||||||
|
|
||||||
|
> Bun is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace
|
||||||
|
> is an open-source APM tool that supports distributed tracing, metrics, and logs. You can use it to
|
||||||
|
> monitor applications and set up automatic alerts to receive notifications via email, Slack,
|
||||||
|
> Telegram, and others.
|
||||||
|
>
|
||||||
|
> See [OpenTelemetry](example/opentelemetry) example which demonstrates how you can use Uptrace to
|
||||||
|
> monitor Bun.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- Works with [PostgreSQL](https://bun.uptrace.dev/guide/drivers.html#postgresql),
|
||||||
|
[MySQL](https://bun.uptrace.dev/guide/drivers.html#mysql) (including MariaDB),
|
||||||
|
[MSSQL](https://bun.uptrace.dev/guide/drivers.html#mssql),
|
||||||
|
[SQLite](https://bun.uptrace.dev/guide/drivers.html#sqlite).
|
||||||
|
- [ORM-like](/example/basic/) experience using good old SQL. Bun supports structs, map, scalars, and
|
||||||
|
slices of map/structs/scalars.
|
||||||
|
- [Bulk inserts](https://bun.uptrace.dev/guide/query-insert.html).
|
||||||
|
- [Bulk updates](https://bun.uptrace.dev/guide/query-update.html) using common table expressions.
|
||||||
|
- [Bulk deletes](https://bun.uptrace.dev/guide/query-delete.html).
|
||||||
|
- [Fixtures](https://bun.uptrace.dev/guide/fixtures.html).
|
||||||
|
- [Migrations](https://bun.uptrace.dev/guide/migrations.html).
|
||||||
|
- [Soft deletes](https://bun.uptrace.dev/guide/soft-deletes.html).
|
||||||
|
|
||||||
|
### Resources
|
||||||
|
|
||||||
|
- [**Get started**](https://bun.uptrace.dev/guide/golang-orm.html)
|
||||||
|
- [Examples](https://github.com/uptrace/bun/tree/master/example)
|
||||||
|
- [Discussions](https://github.com/uptrace/bun/discussions)
|
||||||
|
- [Chat](https://discord.gg/rWtp5Aj)
|
||||||
|
- [Reference](https://pkg.go.dev/github.com/uptrace/bun)
|
||||||
|
- [Starter kit](https://github.com/go-bun/bun-starter-kit)
|
||||||
|
|
||||||
|
### Tutorials
|
||||||
|
|
||||||
|
Wrote a tutorial for Bun? Create a PR to add here and on [Bun](https://bun.uptrace.dev/) site.
|
||||||
|
|
||||||
|
### Featured projects using Bun
|
||||||
|
|
||||||
|
- [uptrace](https://github.com/uptrace/uptrace) - Distributed tracing and metrics.
|
||||||
|
- [paralus](https://github.com/paralus/paralus) - All-in-one Kubernetes access manager.
|
||||||
|
- [inovex/scrumlr.io](https://github.com/inovex/scrumlr.io) - Webapp for collaborative online
|
||||||
|
retrospectives.
|
||||||
|
- [gotosocial](https://github.com/superseriousbusiness/gotosocial) - Golang fediverse server.
|
||||||
|
- [lorawan-stack](https://github.com/TheThingsNetwork/lorawan-stack) - The Things Stack, an Open
|
||||||
|
Source LoRaWAN Network Server.
|
||||||
|
- [anti-phishing-bot](https://github.com/Benricheson101/anti-phishing-bot) - Discord bot for
|
||||||
|
deleting Steam/Discord phishing links.
|
||||||
|
- [emerald-web3-gateway](https://github.com/oasisprotocol/emerald-web3-gateway) - Web3 Gateway for
|
||||||
|
the Oasis Emerald paratime.
|
||||||
|
- [lndhub.go](https://github.com/getAlby/lndhub.go) - accounting wrapper for the Lightning Network.
|
||||||
|
- [penguin-statistics](https://github.com/penguin-statistics/backend-next) - Penguin Statistics v3
|
||||||
|
Backend.
|
||||||
|
- And
|
||||||
|
[hundreds more](https://github.com/uptrace/bun/network/dependents?package_id=UGFja2FnZS0yMjkxOTc4OTA4).
|
||||||
|
|
||||||
|
## Why another database client?
|
||||||
|
|
||||||
|
So you can elegantly write complex queries:
|
||||||
|
|
||||||
|
```go
|
||||||
|
regionalSales := db.NewSelect().
|
||||||
|
ColumnExpr("region").
|
||||||
|
ColumnExpr("SUM(amount) AS total_sales").
|
||||||
|
TableExpr("orders").
|
||||||
|
GroupExpr("region")
|
||||||
|
|
||||||
|
topRegions := db.NewSelect().
|
||||||
|
ColumnExpr("region").
|
||||||
|
TableExpr("regional_sales").
|
||||||
|
Where("total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales)")
|
||||||
|
|
||||||
|
var items []map[string]interface{}
|
||||||
|
err := db.NewSelect().
|
||||||
|
With("regional_sales", regionalSales).
|
||||||
|
With("top_regions", topRegions).
|
||||||
|
ColumnExpr("region").
|
||||||
|
ColumnExpr("product").
|
||||||
|
ColumnExpr("SUM(quantity) AS product_units").
|
||||||
|
ColumnExpr("SUM(amount) AS product_sales").
|
||||||
|
TableExpr("orders").
|
||||||
|
Where("region IN (SELECT region FROM top_regions)").
|
||||||
|
GroupExpr("region").
|
||||||
|
GroupExpr("product").
|
||||||
|
Scan(ctx, &items)
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
WITH regional_sales AS (
|
||||||
|
SELECT region, SUM(amount) AS total_sales
|
||||||
|
FROM orders
|
||||||
|
GROUP BY region
|
||||||
|
), top_regions AS (
|
||||||
|
SELECT region
|
||||||
|
FROM regional_sales
|
||||||
|
WHERE total_sales > (SELECT SUM(total_sales)/10 FROM regional_sales)
|
||||||
|
)
|
||||||
|
SELECT region,
|
||||||
|
product,
|
||||||
|
SUM(quantity) AS product_units,
|
||||||
|
SUM(amount) AS product_sales
|
||||||
|
FROM orders
|
||||||
|
WHERE region IN (SELECT region FROM top_regions)
|
||||||
|
GROUP BY region, product
|
||||||
|
```
|
||||||
|
|
||||||
|
And scan results into scalars, structs, maps, slices of structs/maps/scalars:
|
||||||
|
|
||||||
|
```go
|
||||||
|
users := make([]User, 0)
|
||||||
|
if err := db.NewSelect().Model(&users).OrderExpr("id ASC").Scan(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
user1 := new(User)
|
||||||
|
if err := db.NewSelect().Model(user1).Where("id = ?", 1).Scan(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
See [**Getting started**](https://bun.uptrace.dev/guide/golang-orm.html) guide and check
|
||||||
|
[examples](example).
|
||||||
|
|
||||||
|
## See also
|
||||||
|
|
||||||
|
- [Golang HTTP router](https://github.com/uptrace/bunrouter)
|
||||||
|
- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
|
||||||
|
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
See [CONTRIBUTING.md](CONTRIBUTING.md) for some hints.
|
||||||
|
|
||||||
|
And thanks to all the people who already contributed!
|
||||||
|
|
||||||
|
<a href="https://github.com/uptrace/bun/graphs/contributors">
|
||||||
|
<img src="https://contributors-img.web.app/image?repo=uptrace/bun" />
|
||||||
|
</a>
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue