parent
8b4b8d3b59
commit
c420ce5f86
@ -0,0 +1,16 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
type ConfigBunClient struct {
|
||||
Dns string // 地址
|
||||
}
|
||||
|
||||
// BunClient
|
||||
// https://bun.uptrace.dev/
|
||||
type BunClient struct {
|
||||
Db *bun.DB // 驱动
|
||||
config *ConfigBunClient // 配置
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
_ "github.com/denisenkom/go-mssqldb"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect/mssqldialect"
|
||||
)
|
||||
|
||||
func NewBunMssqlClient(config *ConfigBunClient) (*BunClient, error) {
|
||||
|
||||
var err error
|
||||
c := &BunClient{config: config}
|
||||
|
||||
sqlDb, err := sql.Open("sqlserver", c.config.Dns)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("加载驱动失败:%v", err))
|
||||
}
|
||||
|
||||
c.Db = bun.NewDB(sqlDb, mssqldialect.New())
|
||||
|
||||
return c, nil
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect/mysqldialect"
|
||||
)
|
||||
|
||||
func NewBunMysqlClient(config *ConfigBunClient) (*BunClient, error) {
|
||||
|
||||
var err error
|
||||
c := &BunClient{config: config}
|
||||
|
||||
sqlDb, err := sql.Open("mysql", c.config.Dns)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("加载驱动失败:%v", err))
|
||||
}
|
||||
|
||||
c.Db = bun.NewDB(sqlDb, mysqldialect.New())
|
||||
|
||||
return c, nil
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect/pgdialect"
|
||||
"github.com/uptrace/bun/driver/pgdriver"
|
||||
)
|
||||
|
||||
func NewBunPgsqlClient(config *ConfigBunClient) (*BunClient, error) {
|
||||
|
||||
c := &BunClient{config: config}
|
||||
|
||||
sqlDb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(c.config.Dns)))
|
||||
|
||||
c.Db = bun.NewDB(sqlDb, pgdialect.New())
|
||||
|
||||
return c, nil
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect/sqlitedialect"
|
||||
"github.com/uptrace/bun/driver/sqliteshim"
|
||||
)
|
||||
|
||||
func NewBunSqliteClient(config *ConfigBunClient) (*BunClient, error) {
|
||||
|
||||
var err error
|
||||
c := &BunClient{config: config}
|
||||
|
||||
sqlDb, err := sql.Open(sqliteshim.ShimName, c.config.Dns)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("加载驱动失败:%v", err))
|
||||
}
|
||||
|
||||
c.Db = bun.NewDB(sqlDb, sqlitedialect.New())
|
||||
|
||||
return c, nil
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/lqs/sqlingo"
|
||||
)
|
||||
|
||||
func NewSqLiNgoPostgresClient(config *ConfigSqLiNgoClient) (*SqLiNgoClient, error) {
|
||||
|
||||
var err error
|
||||
c := &SqLiNgoClient{config: config}
|
||||
|
||||
c.Db, err = sqlingo.Open("postgres", c.config.Dns)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("连接失败:%v", err))
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
@ -0,0 +1,21 @@
|
||||
package dorm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/lqs/sqlingo"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
)
|
||||
|
||||
func NewSqLiNgoSqliteClient(config *ConfigSqLiNgoClient) (*SqLiNgoClient, error) {
|
||||
|
||||
var err error
|
||||
c := &SqLiNgoClient{config: config}
|
||||
|
||||
c.Db, err = sqlingo.Open("sqlite", c.config.Dns)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("连接失败:%v", err))
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
Copyright (C) 2014 Kevin Ballard
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the "Software"),
|
||||
to deal in the Software without restriction, including without limitation
|
||||
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
and/or sell copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
|
||||
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@ -0,0 +1,36 @@
|
||||
PACKAGE
|
||||
|
||||
package shellquote
|
||||
import "github.com/kballard/go-shellquote"
|
||||
|
||||
Shellquote provides utilities for joining/splitting strings using sh's
|
||||
word-splitting rules.
|
||||
|
||||
VARIABLES
|
||||
|
||||
var (
|
||||
UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string")
|
||||
UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string")
|
||||
UnterminatedEscapeError = errors.New("Unterminated backslash-escape")
|
||||
)
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Join(args ...string) string
|
||||
Join quotes each argument and joins them with a space. If passed to
|
||||
/bin/sh, the resulting string will be split back into the original
|
||||
arguments.
|
||||
|
||||
func Split(input string) (words []string, err error)
|
||||
Split splits a string according to /bin/sh's word-splitting rules. It
|
||||
supports backslash-escapes, single-quotes, and double-quotes. Notably it
|
||||
does not support the $'' style of quoting. It also doesn't attempt to
|
||||
perform any other sort of expansion, including brace expansion, shell
|
||||
expansion, or pathname expansion.
|
||||
|
||||
If the given input has an unterminated quoted string or ends in a
|
||||
backslash-escape, one of UnterminatedSingleQuoteError,
|
||||
UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned.
|
||||
|
||||
|
@ -0,0 +1,3 @@
|
||||
// Shellquote provides utilities for joining/splitting strings using sh's
|
||||
// word-splitting rules.
|
||||
package shellquote
|
@ -0,0 +1,102 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Join quotes each argument and joins them with a space.
|
||||
// If passed to /bin/sh, the resulting string will be split back into the
|
||||
// original arguments.
|
||||
func Join(args ...string) string {
|
||||
var buf bytes.Buffer
|
||||
for i, arg := range args {
|
||||
if i != 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
quote(arg, &buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
const (
|
||||
specialChars = "\\'\"`${[|&;<>()*?!"
|
||||
extraSpecialChars = " \t\n"
|
||||
prefixChars = "~"
|
||||
)
|
||||
|
||||
func quote(word string, buf *bytes.Buffer) {
|
||||
// We want to try to produce a "nice" output. As such, we will
|
||||
// backslash-escape most characters, but if we encounter a space, or if we
|
||||
// encounter an extra-special char (which doesn't work with
|
||||
// backslash-escaping) we switch over to quoting the whole word. We do this
|
||||
// with a space because it's typically easier for people to read multi-word
|
||||
// arguments when quoted with a space rather than with ugly backslashes
|
||||
// everywhere.
|
||||
origLen := buf.Len()
|
||||
|
||||
if len(word) == 0 {
|
||||
// oops, no content
|
||||
buf.WriteString("''")
|
||||
return
|
||||
}
|
||||
|
||||
cur, prev := word, word
|
||||
atStart := true
|
||||
for len(cur) > 0 {
|
||||
c, l := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l:]
|
||||
if strings.ContainsRune(specialChars, c) || (atStart && strings.ContainsRune(prefixChars, c)) {
|
||||
// copy the non-special chars up to this point
|
||||
if len(cur) < len(prev) {
|
||||
buf.WriteString(prev[0 : len(prev)-len(cur)-l])
|
||||
}
|
||||
buf.WriteByte('\\')
|
||||
buf.WriteRune(c)
|
||||
prev = cur
|
||||
} else if strings.ContainsRune(extraSpecialChars, c) {
|
||||
// start over in quote mode
|
||||
buf.Truncate(origLen)
|
||||
goto quote
|
||||
}
|
||||
atStart = false
|
||||
}
|
||||
if len(prev) > 0 {
|
||||
buf.WriteString(prev)
|
||||
}
|
||||
return
|
||||
|
||||
quote:
|
||||
// quote mode
|
||||
// Use single-quotes, but if we find a single-quote in the word, we need
|
||||
// to terminate the string, emit an escaped quote, and start the string up
|
||||
// again
|
||||
inQuote := false
|
||||
for len(word) > 0 {
|
||||
i := strings.IndexRune(word, '\'')
|
||||
if i == -1 {
|
||||
break
|
||||
}
|
||||
if i > 0 {
|
||||
if !inQuote {
|
||||
buf.WriteByte('\'')
|
||||
inQuote = true
|
||||
}
|
||||
buf.WriteString(word[0:i])
|
||||
}
|
||||
word = word[i+1:]
|
||||
if inQuote {
|
||||
buf.WriteByte('\'')
|
||||
inQuote = false
|
||||
}
|
||||
buf.WriteString("\\'")
|
||||
}
|
||||
if len(word) > 0 {
|
||||
if !inQuote {
|
||||
buf.WriteByte('\'')
|
||||
}
|
||||
buf.WriteString(word)
|
||||
buf.WriteByte('\'')
|
||||
}
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
package shellquote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
UnterminatedSingleQuoteError = errors.New("Unterminated single-quoted string")
|
||||
UnterminatedDoubleQuoteError = errors.New("Unterminated double-quoted string")
|
||||
UnterminatedEscapeError = errors.New("Unterminated backslash-escape")
|
||||
)
|
||||
|
||||
var (
|
||||
splitChars = " \n\t"
|
||||
singleChar = '\''
|
||||
doubleChar = '"'
|
||||
escapeChar = '\\'
|
||||
doubleEscapeChars = "$`\"\n\\"
|
||||
)
|
||||
|
||||
// Split splits a string according to /bin/sh's word-splitting rules. It
|
||||
// supports backslash-escapes, single-quotes, and double-quotes. Notably it does
|
||||
// not support the $'' style of quoting. It also doesn't attempt to perform any
|
||||
// other sort of expansion, including brace expansion, shell expansion, or
|
||||
// pathname expansion.
|
||||
//
|
||||
// If the given input has an unterminated quoted string or ends in a
|
||||
// backslash-escape, one of UnterminatedSingleQuoteError,
|
||||
// UnterminatedDoubleQuoteError, or UnterminatedEscapeError is returned.
|
||||
func Split(input string) (words []string, err error) {
|
||||
var buf bytes.Buffer
|
||||
words = make([]string, 0)
|
||||
|
||||
for len(input) > 0 {
|
||||
// skip any splitChars at the start
|
||||
c, l := utf8.DecodeRuneInString(input)
|
||||
if strings.ContainsRune(splitChars, c) {
|
||||
input = input[l:]
|
||||
continue
|
||||
} else if c == escapeChar {
|
||||
// Look ahead for escaped newline so we can skip over it
|
||||
next := input[l:]
|
||||
if len(next) == 0 {
|
||||
err = UnterminatedEscapeError
|
||||
return
|
||||
}
|
||||
c2, l2 := utf8.DecodeRuneInString(next)
|
||||
if c2 == '\n' {
|
||||
input = next[l2:]
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var word string
|
||||
word, input, err = splitWord(input, &buf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
words = append(words, word)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func splitWord(input string, buf *bytes.Buffer) (word string, remainder string, err error) {
|
||||
buf.Reset()
|
||||
|
||||
raw:
|
||||
{
|
||||
cur := input
|
||||
for len(cur) > 0 {
|
||||
c, l := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l:]
|
||||
if c == singleChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto single
|
||||
} else if c == doubleChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto double
|
||||
} else if c == escapeChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto escape
|
||||
} else if strings.ContainsRune(splitChars, c) {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
return buf.String(), cur, nil
|
||||
}
|
||||
}
|
||||
if len(input) > 0 {
|
||||
buf.WriteString(input)
|
||||
input = ""
|
||||
}
|
||||
goto done
|
||||
}
|
||||
|
||||
escape:
|
||||
{
|
||||
if len(input) == 0 {
|
||||
return "", "", UnterminatedEscapeError
|
||||
}
|
||||
c, l := utf8.DecodeRuneInString(input)
|
||||
if c == '\n' {
|
||||
// a backslash-escaped newline is elided from the output entirely
|
||||
} else {
|
||||
buf.WriteString(input[:l])
|
||||
}
|
||||
input = input[l:]
|
||||
}
|
||||
goto raw
|
||||
|
||||
single:
|
||||
{
|
||||
i := strings.IndexRune(input, singleChar)
|
||||
if i == -1 {
|
||||
return "", "", UnterminatedSingleQuoteError
|
||||
}
|
||||
buf.WriteString(input[0:i])
|
||||
input = input[i+1:]
|
||||
goto raw
|
||||
}
|
||||
|
||||
double:
|
||||
{
|
||||
cur := input
|
||||
for len(cur) > 0 {
|
||||
c, l := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l:]
|
||||
if c == doubleChar {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l])
|
||||
input = cur
|
||||
goto raw
|
||||
} else if c == escapeChar {
|
||||
// bash only supports certain escapes in double-quoted strings
|
||||
c2, l2 := utf8.DecodeRuneInString(cur)
|
||||
cur = cur[l2:]
|
||||
if strings.ContainsRune(doubleEscapeChars, c2) {
|
||||
buf.WriteString(input[0 : len(input)-len(cur)-l-l2])
|
||||
if c2 == '\n' {
|
||||
// newline is special, skip the backslash entirely
|
||||
} else {
|
||||
buf.WriteRune(c2)
|
||||
}
|
||||
input = cur
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", UnterminatedDoubleQuoteError
|
||||
}
|
||||
|
||||
done:
|
||||
return buf.String(), input, nil
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
- 1.12.x
|
||||
- 1.13.x
|
||||
- tip
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
@ -0,0 +1,82 @@
|
||||
Copyright (c) 2016, Tom Thorogood.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
* Neither the name of the Tom Thorogood nor the
|
||||
names of its contributors may be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
---- Portions of the source code are also covered by the following license: ----
|
||||
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
---- Portions of the source code are also covered by the following license: ----
|
||||
|
||||
Copyright (c) 2005-2016, Wojciech Muła
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
||||
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
|
||||
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
|
||||
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,108 @@
|
||||
# go-hex
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/tmthrgd/go-hex?status.svg)](https://godoc.org/github.com/tmthrgd/go-hex)
|
||||
[![Build Status](https://travis-ci.org/tmthrgd/go-hex.svg?branch=master)](https://travis-ci.org/tmthrgd/go-hex)
|
||||
|
||||
An efficient hexadecimal implementation for Golang.
|
||||
|
||||
go-hex provides hex encoding and decoding using SSE/AVX instructions on x86-64.
|
||||
|
||||
## Download
|
||||
|
||||
```
|
||||
go get github.com/tmthrgd/go-hex
|
||||
```
|
||||
|
||||
## Benchmark
|
||||
|
||||
go-hex:
|
||||
```
|
||||
BenchmarkEncode/15-8 100000000 17.4 ns/op 863.43 MB/s
|
||||
BenchmarkEncode/32-8 100000000 11.9 ns/op 2690.43 MB/s
|
||||
BenchmarkEncode/128-8 100000000 21.4 ns/op 5982.92 MB/s
|
||||
BenchmarkEncode/1k-8 20000000 88.5 ns/op 11572.80 MB/s
|
||||
BenchmarkEncode/16k-8 1000000 1254 ns/op 13058.10 MB/s
|
||||
BenchmarkEncode/128k-8 100000 12965 ns/op 10109.53 MB/s
|
||||
BenchmarkEncode/1M-8 10000 119465 ns/op 8777.23 MB/s
|
||||
BenchmarkEncode/16M-8 500 3530380 ns/op 4752.24 MB/s
|
||||
BenchmarkEncode/128M-8 50 28001913 ns/op 4793.16 MB/s
|
||||
BenchmarkDecode/14-8 100000000 12.6 ns/op 1110.01 MB/s
|
||||
BenchmarkDecode/32-8 100000000 12.5 ns/op 2558.10 MB/s
|
||||
BenchmarkDecode/128-8 50000000 27.2 ns/op 4697.66 MB/s
|
||||
BenchmarkDecode/1k-8 10000000 168 ns/op 6093.43 MB/s
|
||||
BenchmarkDecode/16k-8 500000 2543 ns/op 6442.09 MB/s
|
||||
BenchmarkDecode/128k-8 100000 20339 ns/op 6444.24 MB/s
|
||||
BenchmarkDecode/1M-8 10000 164313 ns/op 6381.57 MB/s
|
||||
BenchmarkDecode/16M-8 500 3099822 ns/op 5412.31 MB/s
|
||||
BenchmarkDecode/128M-8 50 24865822 ns/op 5397.68 MB/s
|
||||
```
|
||||
|
||||
[encoding/hex](https://golang.org/pkg/encoding/hex/):
|
||||
```
|
||||
BenchmarkRefEncode/15-8 50000000 36.1 ns/op 415.07 MB/s
|
||||
BenchmarkRefEncode/32-8 20000000 72.9 ns/op 439.14 MB/s
|
||||
BenchmarkRefEncode/128-8 5000000 289 ns/op 441.54 MB/s
|
||||
BenchmarkRefEncode/1k-8 1000000 2268 ns/op 451.49 MB/s
|
||||
BenchmarkRefEncode/16k-8 30000 39110 ns/op 418.91 MB/s
|
||||
BenchmarkRefEncode/128k-8 5000 291260 ns/op 450.02 MB/s
|
||||
BenchmarkRefEncode/1M-8 1000 2277578 ns/op 460.39 MB/s
|
||||
BenchmarkRefEncode/16M-8 30 37087543 ns/op 452.37 MB/s
|
||||
BenchmarkRefEncode/128M-8 5 293611713 ns/op 457.13 MB/s
|
||||
BenchmarkRefDecode/14-8 30000000 53.7 ns/op 260.49 MB/s
|
||||
BenchmarkRefDecode/32-8 10000000 128 ns/op 248.44 MB/s
|
||||
BenchmarkRefDecode/128-8 3000000 481 ns/op 265.95 MB/s
|
||||
BenchmarkRefDecode/1k-8 300000 4172 ns/op 245.43 MB/s
|
||||
BenchmarkRefDecode/16k-8 10000 111989 ns/op 146.30 MB/s
|
||||
BenchmarkRefDecode/128k-8 2000 909077 ns/op 144.18 MB/s
|
||||
BenchmarkRefDecode/1M-8 200 7275779 ns/op 144.12 MB/s
|
||||
BenchmarkRefDecode/16M-8 10 116574839 ns/op 143.92 MB/s
|
||||
BenchmarkRefDecode/128M-8 2 933871637 ns/op 143.72 MB/s
|
||||
```
|
||||
|
||||
[encoding/hex](https://golang.org/pkg/encoding/hex/) -> go-hex:
|
||||
```
|
||||
benchmark old ns/op new ns/op delta
|
||||
BenchmarkEncode/15-8 36.1 17.4 -51.80%
|
||||
BenchmarkEncode/32-8 72.9 11.9 -83.68%
|
||||
BenchmarkEncode/128-8 289 21.4 -92.60%
|
||||
BenchmarkEncode/1k-8 2268 88.5 -96.10%
|
||||
BenchmarkEncode/16k-8 39110 1254 -96.79%
|
||||
BenchmarkEncode/128k-8 291260 12965 -95.55%
|
||||
BenchmarkEncode/1M-8 2277578 119465 -94.75%
|
||||
BenchmarkEncode/16M-8 37087543 3530380 -90.48%
|
||||
BenchmarkEncode/128M-8 293611713 28001913 -90.46%
|
||||
BenchmarkDecode/14-8 53.7 12.6 -76.54%
|
||||
BenchmarkDecode/32-8 128 12.5 -90.23%
|
||||
BenchmarkDecode/128-8 481 27.2 -94.35%
|
||||
BenchmarkDecode/1k-8 4172 168 -95.97%
|
||||
BenchmarkDecode/16k-8 111989 2543 -97.73%
|
||||
BenchmarkDecode/128k-8 909077 20339 -97.76%
|
||||
BenchmarkDecode/1M-8 7275779 164313 -97.74%
|
||||
BenchmarkDecode/16M-8 116574839 3099822 -97.34%
|
||||
BenchmarkDecode/128M-8 933871637 24865822 -97.34%
|
||||
|
||||
benchmark old MB/s new MB/s speedup
|
||||
BenchmarkEncode/15-8 415.07 863.43 2.08x
|
||||
BenchmarkEncode/32-8 439.14 2690.43 6.13x
|
||||
BenchmarkEncode/128-8 441.54 5982.92 13.55x
|
||||
BenchmarkEncode/1k-8 451.49 11572.80 25.63x
|
||||
BenchmarkEncode/16k-8 418.91 13058.10 31.17x
|
||||
BenchmarkEncode/128k-8 450.02 10109.53 22.46x
|
||||
BenchmarkEncode/1M-8 460.39 8777.23 19.06x
|
||||
BenchmarkEncode/16M-8 452.37 4752.24 10.51x
|
||||
BenchmarkEncode/128M-8 457.13 4793.16 10.49x
|
||||
BenchmarkDecode/14-8 260.49 1110.01 4.26x
|
||||
BenchmarkDecode/32-8 248.44 2558.10 10.30x
|
||||
BenchmarkDecode/128-8 265.95 4697.66 17.66x
|
||||
BenchmarkDecode/1k-8 245.43 6093.43 24.83x
|
||||
BenchmarkDecode/16k-8 146.30 6442.09 44.03x
|
||||
BenchmarkDecode/128k-8 144.18 6444.24 44.70x
|
||||
BenchmarkDecode/1M-8 144.12 6381.57 44.28x
|
||||
BenchmarkDecode/16M-8 143.92 5412.31 37.61x
|
||||
BenchmarkDecode/128M-8 143.72 5397.68 37.56x
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Unless otherwise noted, the go-hex source files are distributed under the Modified BSD License
|
||||
found in the LICENSE file.
|
@ -0,0 +1,137 @@
|
||||
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||
// Use of this source code is governed by a
|
||||
// Modified BSD License license that can be found in
|
||||
// the LICENSE file.
|
||||
//
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package hex is an efficient hexadecimal implementation for Golang.
|
||||
package hex
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var errLength = errors.New("go-hex: odd length hex string")
|
||||
|
||||
var (
|
||||
lower = []byte("0123456789abcdef")
|
||||
upper = []byte("0123456789ABCDEF")
|
||||
)
|
||||
|
||||
// InvalidByteError values describe errors resulting from an invalid byte in a hex string.
|
||||
type InvalidByteError byte
|
||||
|
||||
func (e InvalidByteError) Error() string {
|
||||
return fmt.Sprintf("go-hex: invalid byte: %#U", rune(e))
|
||||
}
|
||||
|
||||
// EncodedLen returns the length of an encoding of n source bytes.
|
||||
func EncodedLen(n int) int {
|
||||
return n * 2
|
||||
}
|
||||
|
||||
// DecodedLen returns the length of a decoding of n source bytes.
|
||||
func DecodedLen(n int) int {
|
||||
return n / 2
|
||||
}
|
||||
|
||||
// Encode encodes src into EncodedLen(len(src))
|
||||
// bytes of dst. As a convenience, it returns the number
|
||||
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||
// Encode implements lowercase hexadecimal encoding.
|
||||
func Encode(dst, src []byte) int {
|
||||
return RawEncode(dst, src, lower)
|
||||
}
|
||||
|
||||
// EncodeUpper encodes src into EncodedLen(len(src))
|
||||
// bytes of dst. As a convenience, it returns the number
|
||||
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||
// EncodeUpper implements uppercase hexadecimal encoding.
|
||||
func EncodeUpper(dst, src []byte) int {
|
||||
return RawEncode(dst, src, upper)
|
||||
}
|
||||
|
||||
// EncodeToString returns the lowercase hexadecimal encoding of src.
|
||||
func EncodeToString(src []byte) string {
|
||||
return RawEncodeToString(src, lower)
|
||||
}
|
||||
|
||||
// EncodeUpperToString returns the uppercase hexadecimal encoding of src.
|
||||
func EncodeUpperToString(src []byte) string {
|
||||
return RawEncodeToString(src, upper)
|
||||
}
|
||||
|
||||
// RawEncodeToString returns the hexadecimal encoding of src for a given
|
||||
// alphabet.
|
||||
func RawEncodeToString(src, alpha []byte) string {
|
||||
dst := make([]byte, EncodedLen(len(src)))
|
||||
RawEncode(dst, src, alpha)
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
// DecodeString returns the bytes represented by the hexadecimal string s.
|
||||
func DecodeString(s string) ([]byte, error) {
|
||||
src := []byte(s)
|
||||
dst := make([]byte, DecodedLen(len(src)))
|
||||
|
||||
if _, err := Decode(dst, src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// MustDecodeString is like DecodeString but panics if the string cannot be
|
||||
// parsed. It simplifies safe initialization of global variables holding
|
||||
// binary data.
|
||||
func MustDecodeString(str string) []byte {
|
||||
dst, err := DecodeString(str)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func encodeGeneric(dst, src, alpha []byte) {
|
||||
for i, v := range src {
|
||||
dst[i*2] = alpha[v>>4]
|
||||
dst[i*2+1] = alpha[v&0x0f]
|
||||
}
|
||||
}
|
||||
|
||||
func decodeGeneric(dst, src []byte) (uint64, bool) {
|
||||
for i := 0; i < len(src)/2; i++ {
|
||||
a, ok := fromHexChar(src[i*2])
|
||||
if !ok {
|
||||
return uint64(i * 2), false
|
||||
}
|
||||
|
||||
b, ok := fromHexChar(src[i*2+1])
|
||||
if !ok {
|
||||
return uint64(i*2 + 1), false
|
||||
}
|
||||
|
||||
dst[i] = (a << 4) | b
|
||||
}
|
||||
|
||||
return 0, true
|
||||
}
|
||||
|
||||
// fromHexChar converts a hex character into its value and a success flag.
|
||||
func fromHexChar(c byte) (byte, bool) {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
}
|
@ -0,0 +1,94 @@
|
||||
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||
// Use of this source code is governed by a
|
||||
// Modified BSD License license that can be found in
|
||||
// the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
package hex
|
||||
|
||||
import "golang.org/x/sys/cpu"
|
||||
|
||||
// RawEncode encodes src into EncodedLen(len(src))
|
||||
// bytes of dst. As a convenience, it returns the number
|
||||
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||
// RawEncode implements hexadecimal encoding for a given alphabet.
|
||||
func RawEncode(dst, src, alpha []byte) int {
|
||||
if len(alpha) != 16 {
|
||||
panic("invalid alphabet")
|
||||
}
|
||||
|
||||
if len(dst) < len(src)*2 {
|
||||
panic("dst buffer is too small")
|
||||
}
|
||||
|
||||
if len(src) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch {
|
||||
case cpu.X86.HasAVX:
|
||||
encodeAVX(&dst[0], &src[0], uint64(len(src)), &alpha[0])
|
||||
case cpu.X86.HasSSE41:
|
||||
encodeSSE(&dst[0], &src[0], uint64(len(src)), &alpha[0])
|
||||
default:
|
||||
encodeGeneric(dst, src, alpha)
|
||||
}
|
||||
|
||||
return len(src) * 2
|
||||
}
|
||||
|
||||
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
|
||||
// number of bytes written to dst.
|
||||
//
|
||||
// If Decode encounters invalid input, it returns an error describing the failure.
|
||||
func Decode(dst, src []byte) (int, error) {
|
||||
if len(src)%2 != 0 {
|
||||
return 0, errLength
|
||||
}
|
||||
|
||||
if len(dst) < len(src)/2 {
|
||||
panic("dst buffer is too small")
|
||||
}
|
||||
|
||||
if len(src) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var (
|
||||
n uint64
|
||||
ok bool
|
||||
)
|
||||
switch {
|
||||
case cpu.X86.HasAVX:
|
||||
n, ok = decodeAVX(&dst[0], &src[0], uint64(len(src)))
|
||||
case cpu.X86.HasSSE41:
|
||||
n, ok = decodeSSE(&dst[0], &src[0], uint64(len(src)))
|
||||
default:
|
||||
n, ok = decodeGeneric(dst, src)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
return 0, InvalidByteError(src[n])
|
||||
}
|
||||
|
||||
return len(src) / 2, nil
|
||||
}
|
||||
|
||||
//go:generate go run asm_gen.go
|
||||
|
||||
// This function is implemented in hex_encode_amd64.s
|
||||
//go:noescape
|
||||
func encodeAVX(dst *byte, src *byte, len uint64, alpha *byte)
|
||||
|
||||
// This function is implemented in hex_encode_amd64.s
|
||||
//go:noescape
|
||||
func encodeSSE(dst *byte, src *byte, len uint64, alpha *byte)
|
||||
|
||||
// This function is implemented in hex_decode_amd64.s
|
||||
//go:noescape
|
||||
func decodeAVX(dst *byte, src *byte, len uint64) (n uint64, ok bool)
|
||||
|
||||
// This function is implemented in hex_decode_amd64.s
|
||||
//go:noescape
|
||||
func decodeSSE(dst *byte, src *byte, len uint64) (n uint64, ok bool)
|
@ -0,0 +1,303 @@
|
||||
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||
// Use of this source code is governed by a
|
||||
// Modified BSD License license that can be found in
|
||||
// the LICENSE file.
|
||||
//
|
||||
// Copyright 2005-2016, Wojciech Muła. All rights reserved.
|
||||
// Use of this source code is governed by a
|
||||
// Simplified BSD License license that can be found in
|
||||
// the LICENSE file.
|
||||
//
|
||||
// This file is auto-generated - do not modify
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA decodeBase<>+0x00(SB)/8, $0x3030303030303030
|
||||
DATA decodeBase<>+0x08(SB)/8, $0x3030303030303030
|
||||
DATA decodeBase<>+0x10(SB)/8, $0x2727272727272727
|
||||
DATA decodeBase<>+0x18(SB)/8, $0x2727272727272727
|
||||
GLOBL decodeBase<>(SB),RODATA,$32
|
||||
|
||||
DATA decodeToLower<>+0x00(SB)/8, $0x2020202020202020
|
||||
DATA decodeToLower<>+0x08(SB)/8, $0x2020202020202020
|
||||
GLOBL decodeToLower<>(SB),RODATA,$16
|
||||
|
||||
DATA decodeHigh<>+0x00(SB)/8, $0x0e0c0a0806040200
|
||||
DATA decodeHigh<>+0x08(SB)/8, $0xffffffffffffffff
|
||||
GLOBL decodeHigh<>(SB),RODATA,$16
|
||||
|
||||
DATA decodeLow<>+0x00(SB)/8, $0x0f0d0b0907050301
|
||||
DATA decodeLow<>+0x08(SB)/8, $0xffffffffffffffff
|
||||
GLOBL decodeLow<>(SB),RODATA,$16
|
||||
|
||||
DATA decodeValid<>+0x00(SB)/8, $0xb0b0b0b0b0b0b0b0
|
||||
DATA decodeValid<>+0x08(SB)/8, $0xb0b0b0b0b0b0b0b0
|
||||
DATA decodeValid<>+0x10(SB)/8, $0xb9b9b9b9b9b9b9b9
|
||||
DATA decodeValid<>+0x18(SB)/8, $0xb9b9b9b9b9b9b9b9
|
||||
DATA decodeValid<>+0x20(SB)/8, $0xe1e1e1e1e1e1e1e1
|
||||
DATA decodeValid<>+0x28(SB)/8, $0xe1e1e1e1e1e1e1e1
|
||||
DATA decodeValid<>+0x30(SB)/8, $0xe6e6e6e6e6e6e6e6
|
||||
DATA decodeValid<>+0x38(SB)/8, $0xe6e6e6e6e6e6e6e6
|
||||
GLOBL decodeValid<>(SB),RODATA,$64
|
||||
|
||||
DATA decodeToSigned<>+0x00(SB)/8, $0x8080808080808080
|
||||
DATA decodeToSigned<>+0x08(SB)/8, $0x8080808080808080
|
||||
GLOBL decodeToSigned<>(SB),RODATA,$16
|
||||
|
||||
TEXT ·decodeAVX(SB),NOSPLIT,$0
|
||||
MOVQ dst+0(FP), DI
|
||||
MOVQ src+8(FP), SI
|
||||
MOVQ len+16(FP), BX
|
||||
MOVQ SI, R15
|
||||
MOVOU decodeValid<>(SB), X14
|
||||
MOVOU decodeValid<>+0x20(SB), X15
|
||||
MOVW $65535, DX
|
||||
CMPQ BX, $16
|
||||
JB tail
|
||||
bigloop:
|
||||
MOVOU (SI), X0
|
||||
VPXOR decodeToSigned<>(SB), X0, X1
|
||||
POR decodeToLower<>(SB), X0
|
||||
VPXOR decodeToSigned<>(SB), X0, X2
|
||||
VPCMPGTB X1, X14, X3
|
||||
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||
VPCMPGTB X2, X15, X4
|
||||
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||
PAND X4, X1
|
||||
POR X2, X3
|
||||
POR X1, X3
|
||||
PMOVMSKB X3, AX
|
||||
TESTW AX, DX
|
||||
JNZ invalid
|
||||
PSUBB decodeBase<>(SB), X0
|
||||
PANDN decodeBase<>+0x10(SB), X4
|
||||
PSUBB X4, X0
|
||||
VPSHUFB decodeLow<>(SB), X0, X3
|
||||
PSHUFB decodeHigh<>(SB), X0
|
||||
PSLLW $4, X0
|
||||
POR X3, X0
|
||||
MOVQ X0, (DI)
|
||||
SUBQ $16, BX
|
||||
JZ ret
|
||||
ADDQ $16, SI
|
||||
ADDQ $8, DI
|
||||
CMPQ BX, $16
|
||||
JAE bigloop
|
||||
tail:
|
||||
MOVQ $16, CX
|
||||
SUBQ BX, CX
|
||||
SHRW CX, DX
|
||||
CMPQ BX, $4
|
||||
JB tail_in_2
|
||||
JE tail_in_4
|
||||
CMPQ BX, $8
|
||||
JB tail_in_6
|
||||
JE tail_in_8
|
||||
CMPQ BX, $12
|
||||
JB tail_in_10
|
||||
JE tail_in_12
|
||||
tail_in_14:
|
||||
PINSRW $6, 12(SI), X0
|
||||
tail_in_12:
|
||||
PINSRW $5, 10(SI), X0
|
||||
tail_in_10:
|
||||
PINSRW $4, 8(SI), X0
|
||||
tail_in_8:
|
||||
PINSRQ $0, (SI), X0
|
||||
JMP tail_conv
|
||||
tail_in_6:
|
||||
PINSRW $2, 4(SI), X0
|
||||
tail_in_4:
|
||||
PINSRW $1, 2(SI), X0
|
||||
tail_in_2:
|
||||
PINSRW $0, (SI), X0
|
||||
tail_conv:
|
||||
VPXOR decodeToSigned<>(SB), X0, X1
|
||||
POR decodeToLower<>(SB), X0
|
||||
VPXOR decodeToSigned<>(SB), X0, X2
|
||||
VPCMPGTB X1, X14, X3
|
||||
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||
VPCMPGTB X2, X15, X4
|
||||
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||
PAND X4, X1
|
||||
POR X2, X3
|
||||
POR X1, X3
|
||||
PMOVMSKB X3, AX
|
||||
TESTW AX, DX
|
||||
JNZ invalid
|
||||
PSUBB decodeBase<>(SB), X0
|
||||
PANDN decodeBase<>+0x10(SB), X4
|
||||
PSUBB X4, X0
|
||||
VPSHUFB decodeLow<>(SB), X0, X3
|
||||
PSHUFB decodeHigh<>(SB), X0
|
||||
PSLLW $4, X0
|
||||
POR X3, X0
|
||||
CMPQ BX, $4
|
||||
JB tail_out_2
|
||||
JE tail_out_4
|
||||
CMPQ BX, $8
|
||||
JB tail_out_6
|
||||
JE tail_out_8
|
||||
CMPQ BX, $12
|
||||
JB tail_out_10
|
||||
JE tail_out_12
|
||||
tail_out_14:
|
||||
PEXTRB $6, X0, 6(DI)
|
||||
tail_out_12:
|
||||
PEXTRB $5, X0, 5(DI)
|
||||
tail_out_10:
|
||||
PEXTRB $4, X0, 4(DI)
|
||||
tail_out_8:
|
||||
MOVL X0, (DI)
|
||||
JMP ret
|
||||
tail_out_6:
|
||||
PEXTRB $2, X0, 2(DI)
|
||||
tail_out_4:
|
||||
PEXTRB $1, X0, 1(DI)
|
||||
tail_out_2:
|
||||
PEXTRB $0, X0, (DI)
|
||||
ret:
|
||||
MOVB $1, ok+32(FP)
|
||||
RET
|
||||
invalid:
|
||||
BSFW AX, AX
|
||||
SUBQ R15, SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, n+24(FP)
|
||||
MOVB $0, ok+32(FP)
|
||||
RET
|
||||
|
||||
TEXT ·decodeSSE(SB),NOSPLIT,$0
|
||||
MOVQ dst+0(FP), DI
|
||||
MOVQ src+8(FP), SI
|
||||
MOVQ len+16(FP), BX
|
||||
MOVQ SI, R15
|
||||
MOVOU decodeValid<>(SB), X14
|
||||
MOVOU decodeValid<>+0x20(SB), X15
|
||||
MOVW $65535, DX
|
||||
CMPQ BX, $16
|
||||
JB tail
|
||||
bigloop:
|
||||
MOVOU (SI), X0
|
||||
MOVOU X0, X1
|
||||
PXOR decodeToSigned<>(SB), X1
|
||||
POR decodeToLower<>(SB), X0
|
||||
MOVOU X0, X2
|
||||
PXOR decodeToSigned<>(SB), X2
|
||||
MOVOU X14, X3
|
||||
PCMPGTB X1, X3
|
||||
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||
MOVOU X15, X4
|
||||
PCMPGTB X2, X4
|
||||
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||
PAND X4, X1
|
||||
POR X2, X3
|
||||
POR X1, X3
|
||||
PMOVMSKB X3, AX
|
||||
TESTW AX, DX
|
||||
JNZ invalid
|
||||
PSUBB decodeBase<>(SB), X0
|
||||
PANDN decodeBase<>+0x10(SB), X4
|
||||
PSUBB X4, X0
|
||||
MOVOU X0, X3
|
||||
PSHUFB decodeLow<>(SB), X3
|
||||
PSHUFB decodeHigh<>(SB), X0
|
||||
PSLLW $4, X0
|
||||
POR X3, X0
|
||||
MOVQ X0, (DI)
|
||||
SUBQ $16, BX
|
||||
JZ ret
|
||||
ADDQ $16, SI
|
||||
ADDQ $8, DI
|
||||
CMPQ BX, $16
|
||||
JAE bigloop
|
||||
tail:
|
||||
MOVQ $16, CX
|
||||
SUBQ BX, CX
|
||||
SHRW CX, DX
|
||||
CMPQ BX, $4
|
||||
JB tail_in_2
|
||||
JE tail_in_4
|
||||
CMPQ BX, $8
|
||||
JB tail_in_6
|
||||
JE tail_in_8
|
||||
CMPQ BX, $12
|
||||
JB tail_in_10
|
||||
JE tail_in_12
|
||||
tail_in_14:
|
||||
PINSRW $6, 12(SI), X0
|
||||
tail_in_12:
|
||||
PINSRW $5, 10(SI), X0
|
||||
tail_in_10:
|
||||
PINSRW $4, 8(SI), X0
|
||||
tail_in_8:
|
||||
PINSRQ $0, (SI), X0
|
||||
JMP tail_conv
|
||||
tail_in_6:
|
||||
PINSRW $2, 4(SI), X0
|
||||
tail_in_4:
|
||||
PINSRW $1, 2(SI), X0
|
||||
tail_in_2:
|
||||
PINSRW $0, (SI), X0
|
||||
tail_conv:
|
||||
MOVOU X0, X1
|
||||
PXOR decodeToSigned<>(SB), X1
|
||||
POR decodeToLower<>(SB), X0
|
||||
MOVOU X0, X2
|
||||
PXOR decodeToSigned<>(SB), X2
|
||||
MOVOU X14, X3
|
||||
PCMPGTB X1, X3
|
||||
PCMPGTB decodeValid<>+0x10(SB), X1
|
||||
MOVOU X15, X4
|
||||
PCMPGTB X2, X4
|
||||
PCMPGTB decodeValid<>+0x30(SB), X2
|
||||
PAND X4, X1
|
||||
POR X2, X3
|
||||
POR X1, X3
|
||||
PMOVMSKB X3, AX
|
||||
TESTW AX, DX
|
||||
JNZ invalid
|
||||
PSUBB decodeBase<>(SB), X0
|
||||
PANDN decodeBase<>+0x10(SB), X4
|
||||
PSUBB X4, X0
|
||||
MOVOU X0, X3
|
||||
PSHUFB decodeLow<>(SB), X3
|
||||
PSHUFB decodeHigh<>(SB), X0
|
||||
PSLLW $4, X0
|
||||
POR X3, X0
|
||||
CMPQ BX, $4
|
||||
JB tail_out_2
|
||||
JE tail_out_4
|
||||
CMPQ BX, $8
|
||||
JB tail_out_6
|
||||
JE tail_out_8
|
||||
CMPQ BX, $12
|
||||
JB tail_out_10
|
||||
JE tail_out_12
|
||||
tail_out_14:
|
||||
PEXTRB $6, X0, 6(DI)
|
||||
tail_out_12:
|
||||
PEXTRB $5, X0, 5(DI)
|
||||
tail_out_10:
|
||||
PEXTRB $4, X0, 4(DI)
|
||||
tail_out_8:
|
||||
MOVL X0, (DI)
|
||||
JMP ret
|
||||
tail_out_6:
|
||||
PEXTRB $2, X0, 2(DI)
|
||||
tail_out_4:
|
||||
PEXTRB $1, X0, 1(DI)
|
||||
tail_out_2:
|
||||
PEXTRB $0, X0, (DI)
|
||||
ret:
|
||||
MOVB $1, ok+32(FP)
|
||||
RET
|
||||
invalid:
|
||||
BSFW AX, AX
|
||||
SUBQ R15, SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, n+24(FP)
|
||||
MOVB $0, ok+32(FP)
|
||||
RET
|
@ -0,0 +1,227 @@
|
||||
// Copyright 2016 Tom Thorogood. All rights reserved.
|
||||
// Use of this source code is governed by a
|
||||
// Modified BSD License license that can be found in
|
||||
// the LICENSE file.
|
||||
//
|
||||
// Copyright 2005-2016, Wojciech Muła. All rights reserved.
|
||||
// Use of this source code is governed by a
|
||||
// Simplified BSD License license that can be found in
|
||||
// the LICENSE file.
|
||||
//
|
||||
// This file is auto-generated - do not modify
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
DATA encodeMask<>+0x00(SB)/8, $0x0f0f0f0f0f0f0f0f
|
||||
DATA encodeMask<>+0x08(SB)/8, $0x0f0f0f0f0f0f0f0f
|
||||
GLOBL encodeMask<>(SB),RODATA,$16
|
||||
|
||||
TEXT ·encodeAVX(SB),NOSPLIT,$0
|
||||
MOVQ dst+0(FP), DI
|
||||
MOVQ src+8(FP), SI
|
||||
MOVQ len+16(FP), BX
|
||||
MOVQ alpha+24(FP), DX
|
||||
MOVOU (DX), X15
|
||||
CMPQ BX, $16
|
||||
JB tail
|
||||
bigloop:
|
||||
MOVOU -16(SI)(BX*1), X0
|
||||
VPAND encodeMask<>(SB), X0, X1
|
||||
PSRLW $4, X0
|
||||
PAND encodeMask<>(SB), X0
|
||||
VPUNPCKHBW X1, X0, X3
|
||||
PUNPCKLBW X1, X0
|
||||
VPSHUFB X0, X15, X1
|
||||
VPSHUFB X3, X15, X2
|
||||
MOVOU X2, -16(DI)(BX*2)
|
||||
MOVOU X1, -32(DI)(BX*2)
|
||||
SUBQ $16, BX
|
||||
JZ ret
|
||||
CMPQ BX, $16
|
||||
JAE bigloop
|
||||
tail:
|
||||
CMPQ BX, $2
|
||||
JB tail_in_1
|
||||
JE tail_in_2
|
||||
CMPQ BX, $4
|
||||
JB tail_in_3
|
||||
JE tail_in_4
|
||||
CMPQ BX, $6
|
||||
JB tail_in_5
|
||||
JE tail_in_6
|
||||
CMPQ BX, $8
|
||||
JB tail_in_7
|
||||
tail_in_8:
|
||||
MOVQ (SI), X0
|
||||
JMP tail_conv
|
||||
tail_in_7:
|
||||
PINSRB $6, 6(SI), X0
|
||||
tail_in_6:
|
||||
PINSRB $5, 5(SI), X0
|
||||
tail_in_5:
|
||||
PINSRB $4, 4(SI), X0
|
||||
tail_in_4:
|
||||
PINSRD $0, (SI), X0
|
||||
JMP tail_conv
|
||||
tail_in_3:
|
||||
PINSRB $2, 2(SI), X0
|
||||
tail_in_2:
|
||||
PINSRB $1, 1(SI), X0
|
||||
tail_in_1:
|
||||
PINSRB $0, (SI), X0
|
||||
tail_conv:
|
||||
VPAND encodeMask<>(SB), X0, X1
|
||||
PSRLW $4, X0
|
||||
PAND encodeMask<>(SB), X0
|
||||
PUNPCKLBW X1, X0
|
||||
VPSHUFB X0, X15, X1
|
||||
CMPQ BX, $2
|
||||
JB tail_out_1
|
||||
JE tail_out_2
|
||||
CMPQ BX, $4
|
||||
JB tail_out_3
|
||||
JE tail_out_4
|
||||
CMPQ BX, $6
|
||||
JB tail_out_5
|
||||
JE tail_out_6
|
||||
CMPQ BX, $8
|
||||
JB tail_out_7
|
||||
tail_out_8:
|
||||
MOVOU X1, (DI)
|
||||
SUBQ $8, BX
|
||||
JZ ret
|
||||
ADDQ $8, SI
|
||||
ADDQ $16, DI
|
||||
JMP tail
|
||||
tail_out_7:
|
||||
PEXTRB $13, X1, 13(DI)
|
||||
PEXTRB $12, X1, 12(DI)
|
||||
tail_out_6:
|
||||
PEXTRB $11, X1, 11(DI)
|
||||
PEXTRB $10, X1, 10(DI)
|
||||
tail_out_5:
|
||||
PEXTRB $9, X1, 9(DI)
|
||||
PEXTRB $8, X1, 8(DI)
|
||||
tail_out_4:
|
||||
MOVQ X1, (DI)
|
||||
RET
|
||||
tail_out_3:
|
||||
PEXTRB $5, X1, 5(DI)
|
||||
PEXTRB $4, X1, 4(DI)
|
||||
tail_out_2:
|
||||
PEXTRB $3, X1, 3(DI)
|
||||
PEXTRB $2, X1, 2(DI)
|
||||
tail_out_1:
|
||||
PEXTRB $1, X1, 1(DI)
|
||||
PEXTRB $0, X1, (DI)
|
||||
ret:
|
||||
RET
|
||||
|
||||
TEXT ·encodeSSE(SB),NOSPLIT,$0
|
||||
MOVQ dst+0(FP), DI
|
||||
MOVQ src+8(FP), SI
|
||||
MOVQ len+16(FP), BX
|
||||
MOVQ alpha+24(FP), DX
|
||||
MOVOU (DX), X15
|
||||
CMPQ BX, $16
|
||||
JB tail
|
||||
bigloop:
|
||||
MOVOU -16(SI)(BX*1), X0
|
||||
MOVOU X0, X1
|
||||
PAND encodeMask<>(SB), X1
|
||||
PSRLW $4, X0
|
||||
PAND encodeMask<>(SB), X0
|
||||
MOVOU X0, X3
|
||||
PUNPCKHBW X1, X3
|
||||
PUNPCKLBW X1, X0
|
||||
MOVOU X15, X1
|
||||
PSHUFB X0, X1
|
||||
MOVOU X15, X2
|
||||
PSHUFB X3, X2
|
||||
MOVOU X2, -16(DI)(BX*2)
|
||||
MOVOU X1, -32(DI)(BX*2)
|
||||
SUBQ $16, BX
|
||||
JZ ret
|
||||
CMPQ BX, $16
|
||||
JAE bigloop
|
||||
tail:
|
||||
CMPQ BX, $2
|
||||
JB tail_in_1
|
||||
JE tail_in_2
|
||||
CMPQ BX, $4
|
||||
JB tail_in_3
|
||||
JE tail_in_4
|
||||
CMPQ BX, $6
|
||||
JB tail_in_5
|
||||
JE tail_in_6
|
||||
CMPQ BX, $8
|
||||
JB tail_in_7
|
||||
tail_in_8:
|
||||
MOVQ (SI), X0
|
||||
JMP tail_conv
|
||||
tail_in_7:
|
||||
PINSRB $6, 6(SI), X0
|
||||
tail_in_6:
|
||||
PINSRB $5, 5(SI), X0
|
||||
tail_in_5:
|
||||
PINSRB $4, 4(SI), X0
|
||||
tail_in_4:
|
||||
PINSRD $0, (SI), X0
|
||||
JMP tail_conv
|
||||
tail_in_3:
|
||||
PINSRB $2, 2(SI), X0
|
||||
tail_in_2:
|
||||
PINSRB $1, 1(SI), X0
|
||||
tail_in_1:
|
||||
PINSRB $0, (SI), X0
|
||||
tail_conv:
|
||||
MOVOU X0, X1
|
||||
PAND encodeMask<>(SB), X1
|
||||
PSRLW $4, X0
|
||||
PAND encodeMask<>(SB), X0
|
||||
PUNPCKLBW X1, X0
|
||||
MOVOU X15, X1
|
||||
PSHUFB X0, X1
|
||||
CMPQ BX, $2
|
||||
JB tail_out_1
|
||||
JE tail_out_2
|
||||
CMPQ BX, $4
|
||||
JB tail_out_3
|
||||
JE tail_out_4
|
||||
CMPQ BX, $6
|
||||
JB tail_out_5
|
||||
JE tail_out_6
|
||||
CMPQ BX, $8
|
||||
JB tail_out_7
|
||||
tail_out_8:
|
||||
MOVOU X1, (DI)
|
||||
SUBQ $8, BX
|
||||
JZ ret
|
||||
ADDQ $8, SI
|
||||
ADDQ $16, DI
|
||||
JMP tail
|
||||
tail_out_7:
|
||||
PEXTRB $13, X1, 13(DI)
|
||||
PEXTRB $12, X1, 12(DI)
|
||||
tail_out_6:
|
||||
PEXTRB $11, X1, 11(DI)
|
||||
PEXTRB $10, X1, 10(DI)
|
||||
tail_out_5:
|
||||
PEXTRB $9, X1, 9(DI)
|
||||
PEXTRB $8, X1, 8(DI)
|
||||
tail_out_4:
|
||||
MOVQ X1, (DI)
|
||||
RET
|
||||
tail_out_3:
|
||||
PEXTRB $5, X1, 5(DI)
|
||||
PEXTRB $4, X1, 4(DI)
|
||||
tail_out_2:
|
||||
PEXTRB $3, X1, 3(DI)
|
||||
PEXTRB $2, X1, 2(DI)
|
||||
tail_out_1:
|
||||
PEXTRB $1, X1, 1(DI)
|
||||
PEXTRB $0, X1, (DI)
|
||||
ret:
|
||||
RET
|
@ -0,0 +1,36 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64 gccgo appengine
|
||||
|
||||
package hex
|
||||
|
||||
// RawEncode encodes src into EncodedLen(len(src))
|
||||
// bytes of dst. As a convenience, it returns the number
|
||||
// of bytes written to dst, but this value is always EncodedLen(len(src)).
|
||||
// RawEncode implements hexadecimal encoding for a given alphabet.
|
||||
func RawEncode(dst, src, alpha []byte) int {
|
||||
if len(alpha) != 16 {
|
||||
panic("invalid alphabet")
|
||||
}
|
||||
|
||||
encodeGeneric(dst, src, alpha)
|
||||
return len(src) * 2
|
||||
}
|
||||
|
||||
// Decode decodes src into DecodedLen(len(src)) bytes, returning the actual
|
||||
// number of bytes written to dst.
|
||||
//
|
||||
// If Decode encounters invalid input, it returns an error describing the failure.
|
||||
func Decode(dst, src []byte) (int, error) {
|
||||
if len(src)%2 == 1 {
|
||||
return 0, errLength
|
||||
}
|
||||
|
||||
if n, ok := decodeGeneric(dst, src); !ok {
|
||||
return 0, InvalidByteError(src[n])
|
||||
}
|
||||
|
||||
return len(src) / 2, nil
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
# Patterns for files created by this project.
|
||||
# For other files, use global gitignore.
|
||||
*.s3db
|
@ -0,0 +1,6 @@
|
||||
trailingComma: all
|
||||
tabWidth: 2
|
||||
semi: false
|
||||
singleQuote: true
|
||||
proseWrap: always
|
||||
printWidth: 100
|
@ -0,0 +1,596 @@
|
||||
## [1.1.6](https://github.com/uptrace/bun/compare/v1.1.5...v1.1.6) (2022-07-10)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* bunotel add set attributes to query metrics ([dae82cc](https://github.com/uptrace/bun/commit/dae82cc0e3af49be1e474027b55c34364676985d))
|
||||
* **db.ScanRows:** ensure rows.Close is called ([9ffbc6a](https://github.com/uptrace/bun/commit/9ffbc6a46e24b908742b6973f33ef8e5b17cc12b))
|
||||
* merge apply ([3081849](https://github.com/uptrace/bun/commit/30818499eacddd3b1a3e749091ba6a1468125641))
|
||||
* **migrate:** close conn/tx on error ([7b168ea](https://github.com/uptrace/bun/commit/7b168eabfe0f844bcbf8dc89629d04c385b9f58c))
|
||||
* **migrate:** type Migration should be used as a value rather than a pointer ([fb43935](https://github.com/uptrace/bun/commit/fb4393582b49fe528800a66aac5fb1c9a6033048))
|
||||
* **migrate:** type MigrationGroup should be used as a value rather than a pointer ([649da1b](https://github.com/uptrace/bun/commit/649da1b3c158060add9b61b32c289260daafa65a))
|
||||
* mssql cursor pagination ([#589](https://github.com/uptrace/bun/issues/589)) ([b34ec97](https://github.com/uptrace/bun/commit/b34ec97ddda95629f73762721d60fd3e00e7e99f))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* "skipupdate" model field tag ([#565](https://github.com/uptrace/bun/issues/565)) ([9288294](https://github.com/uptrace/bun/commit/928829482c718a0c215aa4f4adfa6f3fb3ed4302))
|
||||
* add pgdriver write error to log ([5ddda3d](https://github.com/uptrace/bun/commit/5ddda3de31cd08ceee4bdea64ceae8d15eace07b))
|
||||
* add query string representation ([520da7e](https://github.com/uptrace/bun/commit/520da7e1d6dbf7b06846f6b39a7f99e8753c1466))
|
||||
* add relation condition with tag ([fe5bbf6](https://github.com/uptrace/bun/commit/fe5bbf64f33d25b310e5510ece7d705b9eb3bfea))
|
||||
* add support for ON UPDATE and ON DELETE rules on belongs-to relationships from struct tags ([#533](https://github.com/uptrace/bun/issues/533)) ([a327b2a](https://github.com/uptrace/bun/commit/a327b2ae216abb55a705626296c0cdbf8d648697))
|
||||
* add tx methods to IDB ([#587](https://github.com/uptrace/bun/issues/587)) ([feab313](https://github.com/uptrace/bun/commit/feab313c0358200b6e270ac70f4551b011ab5276))
|
||||
* added raw query calls ([#596](https://github.com/uptrace/bun/issues/596)) ([127644d](https://github.com/uptrace/bun/commit/127644d2eea443736fbd6bed3417595d439e4639))
|
||||
* **bunotel:** add option to enable formatting of queries ([#547](https://github.com/uptrace/bun/issues/547)) ([b9c768c](https://github.com/uptrace/bun/commit/b9c768cec3b5dea36c3c9c344d1e76e0ffad1369))
|
||||
* **config.go:** add sslrootcert support to DSN parameters ([3bd5d69](https://github.com/uptrace/bun/commit/3bd5d692d7df4f30d07b835d6a46fc7af382489a))
|
||||
* create an extra module for newrelic ([#599](https://github.com/uptrace/bun/issues/599)) ([6c676ce](https://github.com/uptrace/bun/commit/6c676ce13f05fe763471fbec2d5a2db48bc88650))
|
||||
* **migrate:** add WithMarkAppliedOnSuccess ([31b2cc4](https://github.com/uptrace/bun/commit/31b2cc4f5ccd794a436d081073d4974835d3780d))
|
||||
* **pgdialect:** add hstore support ([66b44f7](https://github.com/uptrace/bun/commit/66b44f7c0edc205927fb8be96aaf263b31828fa1))
|
||||
* **pgdialect:** add identity support ([646251e](https://github.com/uptrace/bun/commit/646251ec02a1e2ec717e907e6f128d8b51f17c6d))
|
||||
* **pgdriver:** expose pgdriver.ParseTime ([405a7d7](https://github.com/uptrace/bun/commit/405a7d78d8f60cf27e8f175deaf95db5877d84be))
|
||||
|
||||
|
||||
|
||||
## [1.1.5](https://github.com/uptrace/bun/compare/v1.1.4...v1.1.5) (2022-05-12)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* **driver/sqliteshim:** make it work with recent version of modernc sqlite ([2360584](https://github.com/uptrace/bun/commit/23605846c20684e39bf1eaac50a2147a1b68a729))
|
||||
|
||||
|
||||
|
||||
## [1.1.4](https://github.com/uptrace/bun/compare/v1.1.3...v1.1.4) (2022-04-20)
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* automatically set nullzero when there is default:value option ([72c44ae](https://github.com/uptrace/bun/commit/72c44aebbeec3a83ed97ea25a3262174d744df65))
|
||||
* fix ForceDelete on live/undeleted rows ([1a33250](https://github.com/uptrace/bun/commit/1a33250f27f00e752a735ce10311ac95dcb0c968))
|
||||
* fix OmitZero and value overriding ([087ea07](https://github.com/uptrace/bun/commit/087ea0730551f1e841bacb6ad2fa3afd512a1df8))
|
||||
* rename Query to QueryBuilder ([98d111b](https://github.com/uptrace/bun/commit/98d111b7cc00fa61b6b2cec147f43285f4baadb4))
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add ApplyQueryBuilder ([582eca0](https://github.com/uptrace/bun/commit/582eca09cf2b59e67c2e4a2ad24f1a74cb53addd))
|
||||
* **config.go:** add connect_timeout to DSN parsable params ([998b04d](https://github.com/uptrace/bun/commit/998b04d51a9a4f182ac3458f90db8dbf9185c4ba)), closes [#505](https://github.com/uptrace/bun/issues/505)
|
||||
|
||||
|
||||
|
||||
# [1.1.3](https://github.com/uptrace/bun/compare/v1.1.2...v) (2022-03-29)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- fix panic message when has-many encounter an error
|
||||
([cfd2747](https://github.com/uptrace/bun/commit/cfd27475fac89a1c8cf798bfa64898bd77bbba79))
|
||||
- **migrate:** change rollback to match migrate behavior
|
||||
([df5af9c](https://github.com/uptrace/bun/commit/df5af9c9cbdf54ce243e037bbb2c7b154f8422b3))
|
||||
|
||||
### Features
|
||||
|
||||
- added QueryBuilder interface for SelectQuery, UpdateQuery, DeleteQuery
|
||||
([#499](https://github.com/uptrace/bun/issues/499))
|
||||
([59fef48](https://github.com/uptrace/bun/commit/59fef48f6b3ec7f32bdda779b6693c333ff1dfdb))
|
||||
|
||||
# [1.1.2](https://github.com/uptrace/bun/compare/v1.1.2...v) (2022-03-22)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- correctly handle bun.In([][]byte{...})
|
||||
([800616e](https://github.com/uptrace/bun/commit/800616ed28ca600ad676319a10adb970b2b4daf6))
|
||||
|
||||
### Features
|
||||
|
||||
- accept extend option to allow extending existing models
|
||||
([48b80e4](https://github.com/uptrace/bun/commit/48b80e4f7e3ed8a28fd305f7853ebe7ab984a497))
|
||||
|
||||
# [1.1.0](https://github.com/uptrace/bun/compare/v1.1.0-beta.1...v1.1.0) (2022-02-28)
|
||||
|
||||
### Features
|
||||
|
||||
- Added [MSSQL](https://bun.uptrace.dev/guide/drivers.html#mssql) support as a 4th fully supported
|
||||
DBMS.
|
||||
- Added `SetColumn("col_name", "upper(?)", "hello")` in addition to
|
||||
`Set("col_name = upper(?)", "hello")` which works for all 4 supported DBMS.
|
||||
|
||||
* improve nil ptr values handling
|
||||
([b398e6b](https://github.com/uptrace/bun/commit/b398e6bea840ea2fd3e001b7879c0b00b6dcd6f7))
|
||||
|
||||
### Breaking changes
|
||||
|
||||
- Bun no longer automatically marks some fields like `ID int64` as `pk` and `autoincrement`. You
|
||||
need to manually add those options:
|
||||
|
||||
```diff
|
||||
type Model struct {
|
||||
- ID int64
|
||||
+ ID int64 `bun:",pk,autoincrement"`
|
||||
}
|
||||
```
|
||||
|
||||
Bun [v1.0.25](#1024-2022-02-22) prints warnings for models with missing options so you are
|
||||
recommended to upgrade to v1.0.24 before upgrading to v1.1.x.
|
||||
|
||||
- Also, Bun no longer adds `nullzero` option to `soft_delete` fields.
|
||||
|
||||
- Removed `nopk` and `allowzero` options.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- append slice values
|
||||
([4a65129](https://github.com/uptrace/bun/commit/4a651294fb0f1e73079553024810c3ead9777311))
|
||||
- check for nils when appeding driver.Value
|
||||
([7bb1640](https://github.com/uptrace/bun/commit/7bb1640a00fceca1e1075fe6544b9a4842ab2b26))
|
||||
- cleanup soft deletes for mssql
|
||||
([e72e2c5](https://github.com/uptrace/bun/commit/e72e2c5d0a85f3d26c3fa22c7284c2de1dcfda8e))
|
||||
- **dbfixture:** apply cascade option. Fixes [#447](https://github.com/uptrace/bun/issues/447)
|
||||
([d32d988](https://github.com/uptrace/bun/commit/d32d98840bc23e74c836f8192cb4bc9529aa9233))
|
||||
- create table WithForeignKey() and has-many relation
|
||||
([3cf5649](https://github.com/uptrace/bun/commit/3cf56491706b5652c383dbe007ff2389ad64922e))
|
||||
- do not emit m2m relations in WithForeignKeys()
|
||||
([56c8c5e](https://github.com/uptrace/bun/commit/56c8c5ed44c0d6d734c3d3161c642ce8437e2248))
|
||||
- accept dest in select queries
|
||||
([33b5b6f](https://github.com/uptrace/bun/commit/33b5b6ff660b77238a737a543ca12675c7f0c284))
|
||||
|
||||
## [1.0.25](https://github.com/uptrace/bun/compare/v1.0.23...v1.0.25) (2022-02-22)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
### Deprecated
|
||||
|
||||
In the comming v1.1.x release, Bun will stop automatically adding `,pk,autoincrement` options on
|
||||
`ID int64/int32` fields. This version (v1.0.23) only prints a warning when it encounters such
|
||||
fields, but the code will continue working as before.
|
||||
|
||||
To fix warnings, add missing options:
|
||||
|
||||
```diff
|
||||
type Model struct {
|
||||
- ID int64
|
||||
+ ID int64 `bun:",pk,autoincrement"`
|
||||
}
|
||||
```
|
||||
|
||||
To silence warnings:
|
||||
|
||||
```go
|
||||
bun.SetWarnLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||
```
|
||||
|
||||
Bun will also print a warning on [soft delete](https://bun.uptrace.dev/guide/soft-deletes.html)
|
||||
fields without a `,nullzero` option. You can fix the warning by adding missing `,nullzero` or
|
||||
`,allowzero` options.
|
||||
|
||||
In v1.1.x, such options as `,nopk` and `,allowzero` will not be necessary and will be removed.
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- fix missing autoincrement warning
|
||||
([3bc9c72](https://github.com/uptrace/bun/commit/3bc9c721e1c1c5104c256a0c01c4525df6ecefc2))
|
||||
|
||||
* append slice values
|
||||
([4a65129](https://github.com/uptrace/bun/commit/4a651294fb0f1e73079553024810c3ead9777311))
|
||||
* don't automatically set pk, nullzero, and autoincrement options
|
||||
([519a0df](https://github.com/uptrace/bun/commit/519a0df9707de01a418aba0d6b7482cfe4c9a532))
|
||||
|
||||
### Features
|
||||
|
||||
- add CreateTableQuery.DetectForeignKeys
|
||||
([a958fcb](https://github.com/uptrace/bun/commit/a958fcbab680b0c5ad7980f369c7b73f7673db87))
|
||||
|
||||
## [1.0.22](https://github.com/uptrace/bun/compare/v1.0.21...v1.0.22) (2022-01-28)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- improve scan error message
|
||||
([54048b2](https://github.com/uptrace/bun/commit/54048b296b9648fd62107ce6fa6fd7e6e2a648c7))
|
||||
- properly discover json.Marshaler on ptr field
|
||||
([3b321b0](https://github.com/uptrace/bun/commit/3b321b08601c4b8dc6bcaa24adea20875883ac14))
|
||||
|
||||
### Breaking (MySQL, MariaDB)
|
||||
|
||||
- **insert:** get last insert id only with pk support auto increment
|
||||
([79e7c79](https://github.com/uptrace/bun/commit/79e7c797beea54bfc9dc1cb0141a7520ff941b4d)). Make
|
||||
sure your MySQL models have `bun:",pk,autoincrement"` options if you are using autoincrements.
|
||||
|
||||
### Features
|
||||
|
||||
- refuse to start when version check does not pass
|
||||
([ff8d767](https://github.com/uptrace/bun/commit/ff8d76794894eeaebede840e5199720f3f5cf531))
|
||||
- support Column in ValuesQuery
|
||||
([0707679](https://github.com/uptrace/bun/commit/0707679b075cac57efa8e6fe9019b57b2da4bcc7))
|
||||
|
||||
## [1.0.21](https://github.com/uptrace/bun/compare/v1.0.20...v1.0.21) (2022-01-06)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- append where to index create
|
||||
([1de6cea](https://github.com/uptrace/bun/commit/1de6ceaa8bba59b69fbe0cc6916d1b27da5586d8))
|
||||
- check if slice is nil when calling BeforeAppendModel
|
||||
([938d9da](https://github.com/uptrace/bun/commit/938d9dadb72ceeeb906064d9575278929d20cbbe))
|
||||
- **dbfixture:** directly set matching types via reflect
|
||||
([780504c](https://github.com/uptrace/bun/commit/780504cf1da687fc51a22d002ea66e2ccc41e1a3))
|
||||
- properly handle driver.Valuer and type:json
|
||||
([a17454a](https://github.com/uptrace/bun/commit/a17454ac6b95b2a2e927d0c4e4aee96494108389))
|
||||
- support scanning string into uint64
|
||||
([73cc117](https://github.com/uptrace/bun/commit/73cc117a9f7a623ced1fdaedb4546e8e7470e4d3))
|
||||
- unique module name for opentelemetry example
|
||||
([f2054fe](https://github.com/uptrace/bun/commit/f2054fe1d11cea3b21d69dab6f6d6d7d97ba06bb))
|
||||
|
||||
### Features
|
||||
|
||||
- add anonymous fields with type name
|
||||
([508375b](https://github.com/uptrace/bun/commit/508375b8f2396cb088fd4399a9259584353eb7e5))
|
||||
- add baseQuery.GetConn()
|
||||
([81a9bee](https://github.com/uptrace/bun/commit/81a9beecb74fed7ec3574a1d42acdf10a74e0b00))
|
||||
- create new queries from baseQuery
|
||||
([ae1dd61](https://github.com/uptrace/bun/commit/ae1dd611a91c2b7c79bc2bc12e9a53e857791e71))
|
||||
- support INSERT ... RETURNING for MariaDB >= 10.5.0
|
||||
([b6531c0](https://github.com/uptrace/bun/commit/b6531c00ecbd4c7ec56b4131fab213f9313edc1b))
|
||||
|
||||
## [1.0.20](https://github.com/uptrace/bun/compare/v1.0.19...v1.0.20) (2021-12-19)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- add Event.QueryTemplate and change Event.Query to be always formatted
|
||||
([52b1ccd](https://github.com/uptrace/bun/commit/52b1ccdf3578418aa427adef9dcf942d90ae4fdd))
|
||||
- change GetTableName to return formatted table name in case ModelTableExpr
|
||||
([95144dd](https://github.com/uptrace/bun/commit/95144dde937b4ac88b36b0bd8b01372421069b44))
|
||||
- change ScanAndCount to work with transactions
|
||||
([5b3f2c0](https://github.com/uptrace/bun/commit/5b3f2c021c424da366caffd33589e8adde821403))
|
||||
- **dbfixture:** directly call funcs bypassing template eval
|
||||
([a61974b](https://github.com/uptrace/bun/commit/a61974ba2d24361c5357fb9bda1f3eceec5a45cd))
|
||||
- don't append CASCADE by default in drop table/column queries
|
||||
([26457ea](https://github.com/uptrace/bun/commit/26457ea5cb20862d232e6e5fa4dbdeac5d444bf1))
|
||||
- **migrate:** mark migrations as applied on error so the migration can be rolled back
|
||||
([8ce33fb](https://github.com/uptrace/bun/commit/8ce33fbbac8e33077c20daf19a14c5ff2291bcae))
|
||||
- respect nullzero when appending struct fields. Fixes
|
||||
[#339](https://github.com/uptrace/bun/issues/339)
|
||||
([ffd02f3](https://github.com/uptrace/bun/commit/ffd02f3170b3cccdd670a48d563cfb41094c05d6))
|
||||
- reuse tx for relation join ([#366](https://github.com/uptrace/bun/issues/366))
|
||||
([60bdb1a](https://github.com/uptrace/bun/commit/60bdb1ac84c0a699429eead3b7fdfbf14fe69ac6))
|
||||
|
||||
### Features
|
||||
|
||||
- add `Dialect()` to Transaction and IDB interface
|
||||
([693f1e1](https://github.com/uptrace/bun/commit/693f1e135999fc31cf83b99a2530a695b20f4e1b))
|
||||
- add model embedding via embed:prefix\_
|
||||
([9a2cedc](https://github.com/uptrace/bun/commit/9a2cedc8b08fa8585d4bfced338bd0a40d736b1d))
|
||||
- change the default logoutput to stderr
|
||||
([4bf5773](https://github.com/uptrace/bun/commit/4bf577382f19c64457cbf0d64490401450954654)),
|
||||
closes [#349](https://github.com/uptrace/bun/issues/349)
|
||||
|
||||
## [1.0.19](https://github.com/uptrace/bun/compare/v1.0.18...v1.0.19) (2021-11-30)
|
||||
|
||||
### Features
|
||||
|
||||
- add support for column:name to specify column name
|
||||
([e37b460](https://github.com/uptrace/bun/commit/e37b4602823babc8221970e086cfed90c6ad4cf4))
|
||||
|
||||
## [1.0.18](https://github.com/uptrace/bun/compare/v1.0.17...v1.0.18) (2021-11-24)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- use correct operation for UpdateQuery
|
||||
([687a004](https://github.com/uptrace/bun/commit/687a004ef7ec6fe1ef06c394965dd2c2d822fc82))
|
||||
|
||||
### Features
|
||||
|
||||
- add pgdriver.Notify
|
||||
([7ee443d](https://github.com/uptrace/bun/commit/7ee443d1b869d8ddc4746850f7425d0a9ccd012b))
|
||||
- CreateTableQuery.PartitionBy and CreateTableQuery.TableSpace
|
||||
([cd3ab4d](https://github.com/uptrace/bun/commit/cd3ab4d8f3682f5a30b87c2ebc2d7e551d739078))
|
||||
- **pgdriver:** add CopyFrom and CopyTo
|
||||
([0b97703](https://github.com/uptrace/bun/commit/0b977030b5c05f509e11d13550b5f99dfd62358d))
|
||||
- support InsertQuery.Ignore on PostgreSQL
|
||||
([1aa9d14](https://github.com/uptrace/bun/commit/1aa9d149da8e46e63ff79192e394fde4d18d9b60))
|
||||
|
||||
## [1.0.17](https://github.com/uptrace/bun/compare/v1.0.16...v1.0.17) (2021-11-11)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- don't call rollback when tx is already done
|
||||
([8246c2a](https://github.com/uptrace/bun/commit/8246c2a63e2e6eba314201c6ba87f094edf098b9))
|
||||
- **mysql:** escape backslash char in strings
|
||||
([fb32029](https://github.com/uptrace/bun/commit/fb32029ea7604d066800b16df21f239b71bf121d))
|
||||
|
||||
## [1.0.16](https://github.com/uptrace/bun/compare/v1.0.15...v1.0.16) (2021-11-07)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- call query hook when tx is started, committed, or rolled back
|
||||
([30e85b5](https://github.com/uptrace/bun/commit/30e85b5366b2e51951ef17a0cf362b58f708dab1))
|
||||
- **pgdialect:** auto-enable array support if the sql type is an array
|
||||
([62c1012](https://github.com/uptrace/bun/commit/62c1012b2482e83969e5c6f5faf89e655ce78138))
|
||||
|
||||
### Features
|
||||
|
||||
- support multiple tag options join:left_col1=right_col1,join:left_col2=right_col2
|
||||
([78cd5aa](https://github.com/uptrace/bun/commit/78cd5aa60a5c7d1323bb89081db2b2b811113052))
|
||||
- **tag:** log with bad tag name
|
||||
([4e82d75](https://github.com/uptrace/bun/commit/4e82d75be2dabdba1a510df4e1fbb86092f92f4c))
|
||||
|
||||
## [1.0.15](https://github.com/uptrace/bun/compare/v1.0.14...v1.0.15) (2021-10-29)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- fixed bug creating table when model has no columns
|
||||
([042c50b](https://github.com/uptrace/bun/commit/042c50bfe41caaa6e279e02c887c3a84a3acd84f))
|
||||
- init table with dialect once
|
||||
([9a1ce1e](https://github.com/uptrace/bun/commit/9a1ce1e492602742bb2f587e9ed24e50d7d07cad))
|
||||
|
||||
### Features
|
||||
|
||||
- accept columns in WherePK
|
||||
([b3e7035](https://github.com/uptrace/bun/commit/b3e70356db1aa4891115a10902316090fccbc8bf))
|
||||
- support ADD COLUMN IF NOT EXISTS
|
||||
([ca7357c](https://github.com/uptrace/bun/commit/ca7357cdfe283e2f0b94eb638372e18401c486e9))
|
||||
|
||||
## [1.0.14](https://github.com/uptrace/bun/compare/v1.0.13...v1.0.14) (2021-10-24)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- correct binary serialization for mysql ([#259](https://github.com/uptrace/bun/issues/259))
|
||||
([e899f50](https://github.com/uptrace/bun/commit/e899f50b22ef6759ef8c029a6cd3f25f2bde17ef))
|
||||
- correctly escape single quotes in pg arrays
|
||||
([3010847](https://github.com/uptrace/bun/commit/3010847f5c2c50bce1969689a0b77fd8a6fb7e55))
|
||||
- use BLOB sql type to encode []byte in MySQL and SQLite
|
||||
([725ec88](https://github.com/uptrace/bun/commit/725ec8843824a7fc8f4058ead75ab0e62a78192a))
|
||||
|
||||
### Features
|
||||
|
||||
- warn when there are args but no placeholders
|
||||
([06dde21](https://github.com/uptrace/bun/commit/06dde215c8d0bde2b2364597190729a160e536a1))
|
||||
|
||||
## [1.0.13](https://github.com/uptrace/bun/compare/v1.0.12...v1.0.13) (2021-10-17)
|
||||
|
||||
### Breaking Change
|
||||
|
||||
- **pgdriver:** enable TLS by default with InsecureSkipVerify=true
|
||||
([15ec635](https://github.com/uptrace/bun/commit/15ec6356a04d5cf62d2efbeb189610532dc5eb31))
|
||||
|
||||
### Features
|
||||
|
||||
- add BeforeAppendModelHook
|
||||
([0b55de7](https://github.com/uptrace/bun/commit/0b55de77aaffc1ed0894ef16f45df77bca7d93c1))
|
||||
- **pgdriver:** add support for unix socket DSN
|
||||
([f398cec](https://github.com/uptrace/bun/commit/f398cec1c3873efdf61ac0b94ebe06c657f0cf91))
|
||||
|
||||
## [1.0.12](https://github.com/uptrace/bun/compare/v1.0.11...v1.0.12) (2021-10-14)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- add InsertQuery.ColumnExpr to specify columns
|
||||
([60ffe29](https://github.com/uptrace/bun/commit/60ffe293b37912d95f28e69734ff51edf4b27da7))
|
||||
- **bundebug:** change WithVerbose to accept a bool flag
|
||||
([b2f8b91](https://github.com/uptrace/bun/commit/b2f8b912de1dc29f40c79066de1e9d6379db666c))
|
||||
- **pgdialect:** fix bytea[] handling
|
||||
([a5ca013](https://github.com/uptrace/bun/commit/a5ca013742c5a2e947b43d13f9c2fc0cf6a65d9c))
|
||||
- **pgdriver:** rename DriverOption to Option
|
||||
([51c1702](https://github.com/uptrace/bun/commit/51c1702431787d7369904b2624e346bf3e59c330))
|
||||
- support allowzero on the soft delete field
|
||||
([d0abec7](https://github.com/uptrace/bun/commit/d0abec71a9a546472a83bd70ed4e6a7357659a9b))
|
||||
|
||||
### Features
|
||||
|
||||
- **bundebug:** allow to configure the hook using env var, for example, BUNDEBUG={0,1,2}
|
||||
([ce92852](https://github.com/uptrace/bun/commit/ce928524cab9a83395f3772ae9dd5d7732af281d))
|
||||
- **bunotel:** report DBStats metrics
|
||||
([b9b1575](https://github.com/uptrace/bun/commit/b9b15750f405cdbd345b776f5a56c6f742bc7361))
|
||||
- **pgdriver:** add Error.StatementTimeout
|
||||
([8a7934d](https://github.com/uptrace/bun/commit/8a7934dd788057828bb2b0983732b4394b74e960))
|
||||
- **pgdriver:** allow setting Network in config
|
||||
([b24b5d8](https://github.com/uptrace/bun/commit/b24b5d8014195a56ad7a4c634c10681038e6044d))
|
||||
|
||||
## [1.0.11](https://github.com/uptrace/bun/compare/v1.0.10...v1.0.11) (2021-10-05)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- **mysqldialect:** remove duplicate AppendTime
|
||||
([8d42090](https://github.com/uptrace/bun/commit/8d42090af34a1760004482c7fc0923b114d79937))
|
||||
|
||||
## [1.0.10](https://github.com/uptrace/bun/compare/v1.0.9...v1.0.10) (2021-10-05)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- add UpdateQuery.OmitZero
|
||||
([2294db6](https://github.com/uptrace/bun/commit/2294db61d228711435fff1075409a30086b37555))
|
||||
- make ExcludeColumn work with many-to-many queries
|
||||
([300e12b](https://github.com/uptrace/bun/commit/300e12b993554ff839ec4fa6bbea97e16aca1b55))
|
||||
- **mysqldialect:** append time in local timezone
|
||||
([e763cc8](https://github.com/uptrace/bun/commit/e763cc81eac4b11fff4e074ad3ff6cd970a71697))
|
||||
- **tagparser:** improve parsing options with brackets
|
||||
([0daa61e](https://github.com/uptrace/bun/commit/0daa61edc3c4d927ed260332b99ee09f4bb6b42f))
|
||||
|
||||
### Features
|
||||
|
||||
- add timetz parsing
|
||||
([6e415c4](https://github.com/uptrace/bun/commit/6e415c4c5fa2c8caf4bb4aed4e5897fe5676f5a5))
|
||||
|
||||
## [1.0.9](https://github.com/uptrace/bun/compare/v1.0.8...v1.0.9) (2021-09-27)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- change DBStats to use uint32 instead of uint64 to make it work on i386
|
||||
([caca2a7](https://github.com/uptrace/bun/commit/caca2a7130288dec49fa26b49c8550140ee52f4c))
|
||||
|
||||
### Features
|
||||
|
||||
- add IQuery and QueryEvent.IQuery
|
||||
([b762942](https://github.com/uptrace/bun/commit/b762942fa3b1d8686d0a559f93f2a6847b83d9c1))
|
||||
- add QueryEvent.Model
|
||||
([7688201](https://github.com/uptrace/bun/commit/7688201b485d14d3e393956f09a3200ea4d4e31d))
|
||||
- **bunotel:** add experimental bun.query.timing metric
|
||||
([2cdb384](https://github.com/uptrace/bun/commit/2cdb384678631ccadac0fb75f524bd5e91e96ee2))
|
||||
- **pgdriver:** add Config.ConnParams to session config params
|
||||
([408caf0](https://github.com/uptrace/bun/commit/408caf0bb579e23e26fc6149efd6851814c22517))
|
||||
- **pgdriver:** allow specifying timeout in DSN
|
||||
([7dbc71b](https://github.com/uptrace/bun/commit/7dbc71b3494caddc2e97d113f00067071b9e19da))
|
||||
|
||||
## [1.0.8](https://github.com/uptrace/bun/compare/v1.0.7...v1.0.8) (2021-09-18)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- don't append soft delete where for insert queries with on conflict clause
|
||||
([27c477c](https://github.com/uptrace/bun/commit/27c477ce071d4c49c99a2531d638ed9f20e33461))
|
||||
- improve bun.NullTime to accept string
|
||||
([73ad6f5](https://github.com/uptrace/bun/commit/73ad6f5640a0a9b09f8df2bc4ab9cb510021c50c))
|
||||
- make allowzero work with auto-detected primary keys
|
||||
([82ca87c](https://github.com/uptrace/bun/commit/82ca87c7c49797d507b31fdaacf8343716d4feff))
|
||||
- support soft deletes on nil model
|
||||
([0556e3c](https://github.com/uptrace/bun/commit/0556e3c63692a7f4e48659d52b55ffd9cca0202a))
|
||||
|
||||
## [1.0.7](https://github.com/uptrace/bun/compare/v1.0.6...v1.0.7) (2021-09-15)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- don't append zero time as NULL without nullzero tag
|
||||
([3b8d9cb](https://github.com/uptrace/bun/commit/3b8d9cb4e39eb17f79a618396bbbe0adbc66b07b))
|
||||
- **pgdriver:** return PostgreSQL DATE as a string
|
||||
([40be0e8](https://github.com/uptrace/bun/commit/40be0e8ea85f8932b7a410a6fc2dd3acd2d18ebc))
|
||||
- specify table alias for soft delete where
|
||||
([5fff1dc](https://github.com/uptrace/bun/commit/5fff1dc1dd74fa48623a24fa79e358a544dfac0b))
|
||||
|
||||
### Features
|
||||
|
||||
- add SelectQuery.Exists helper
|
||||
([c3e59c1](https://github.com/uptrace/bun/commit/c3e59c1bc58b43c4b8e33e7d170ad33a08fbc3c7))
|
||||
|
||||
## [1.0.6](https://github.com/uptrace/bun/compare/v1.0.5...v1.0.6) (2021-09-11)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- change unique tag to create a separate unique constraint
|
||||
([8401615](https://github.com/uptrace/bun/commit/84016155a77ca77613cc054277fefadae3098757))
|
||||
- improve zero checker for ptr values
|
||||
([2b3623d](https://github.com/uptrace/bun/commit/2b3623dd665d873911fd20ca707016929921e862))
|
||||
|
||||
## v1.0.5 - Sep 09 2021
|
||||
|
||||
- chore: tweak bundebug colors
|
||||
- fix: check if table is present when appending columns
|
||||
- fix: copy []byte when scanning
|
||||
|
||||
## v1.0.4 - Sep 08 2021
|
||||
|
||||
- Added support for MariaDB.
|
||||
- Restored default `SET` for `ON CONFLICT DO UPDATE` queries.
|
||||
|
||||
## v1.0.3 - Sep 06 2021
|
||||
|
||||
- Fixed bulk soft deletes.
|
||||
- pgdialect: fixed scanning into an array pointer.
|
||||
|
||||
## v1.0.2 - Sep 04 2021
|
||||
|
||||
- Changed to completely ignore fields marked with `bun:"-"`. If you want to be able to scan into
|
||||
such columns, use `bun:",scanonly"`.
|
||||
- pgdriver: fixed SASL authentication handling.
|
||||
|
||||
## v1.0.1 - Sep 02 2021
|
||||
|
||||
- pgdriver: added erroneous zero writes retry.
|
||||
- Improved column handling in Relation callback.
|
||||
|
||||
## v1.0.0 - Sep 01 2021
|
||||
|
||||
- First stable release.
|
||||
|
||||
## v0.4.1 - Aug 18 2021
|
||||
|
||||
- Fixed migrate package to properly rollback migrations.
|
||||
- Added `allowzero` tag option that undoes `nullzero` option.
|
||||
|
||||
## v0.4.0 - Aug 11 2021
|
||||
|
||||
- Changed `WhereGroup` function to accept `*SelectQuery`.
|
||||
- Fixed query hooks for count queries.
|
||||
|
||||
## v0.3.4 - Jul 19 2021
|
||||
|
||||
- Renamed `migrate.CreateGo` to `CreateGoMigration`.
|
||||
- Added `migrate.WithPackageName` to customize the Go package name in generated migrations.
|
||||
- Renamed `migrate.CreateSQL` to `CreateSQLMigrations` and changed `CreateSQLMigrations` to create
|
||||
both up and down migration files.
|
||||
|
||||
## v0.3.1 - Jul 12 2021
|
||||
|
||||
- Renamed `alias` field struct tag to `alt` so it is not confused with column alias.
|
||||
- Reworked migrate package API. See
|
||||
[migrate](https://github.com/uptrace/bun/tree/master/example/migrate) example for details.
|
||||
|
||||
## v0.3.0 - Jul 09 2021
|
||||
|
||||
- Changed migrate package to return structured data instead of logging the progress. See
|
||||
[migrate](https://github.com/uptrace/bun/tree/master/example/migrate) example for details.
|
||||
|
||||
## v0.2.14 - Jul 01 2021
|
||||
|
||||
- Added [sqliteshim](https://pkg.go.dev/github.com/uptrace/bun/driver/sqliteshim) by
|
||||
[Ivan Trubach](https://github.com/tie).
|
||||
- Added support for MySQL 5.7 in addition to MySQL 8.
|
||||
|
||||
## v0.2.12 - Jun 29 2021
|
||||
|
||||
- Fixed scanners for net.IP and net.IPNet.
|
||||
|
||||
## v0.2.10 - Jun 29 2021
|
||||
|
||||
- Fixed pgdriver to format passed query args.
|
||||
|
||||
## v0.2.9 - Jun 27 2021
|
||||
|
||||
- Added support for prepared statements in pgdriver.
|
||||
|
||||
## v0.2.7 - Jun 26 2021
|
||||
|
||||
- Added `UpdateQuery.Bulk` helper to generate bulk-update queries.
|
||||
|
||||
Before:
|
||||
|
||||
```go
|
||||
models := []Model{
|
||||
{42, "hello"},
|
||||
{43, "world"},
|
||||
}
|
||||
return db.NewUpdate().
|
||||
With("_data", db.NewValues(&models)).
|
||||
Model(&models).
|
||||
Table("_data").
|
||||
Set("model.str = _data.str").
|
||||
Where("model.id = _data.id")
|
||||
```
|
||||
|
||||
Now:
|
||||
|
||||
```go
|
||||
db.NewUpdate().
|
||||
Model(&models).
|
||||
Bulk()
|
||||
```
|
||||
|
||||
## v0.2.5 - Jun 25 2021
|
||||
|
||||
- Changed time.Time to always append zero time as `NULL`.
|
||||
- Added `db.RunInTx` helper.
|
||||
|
||||
## v0.2.4 - Jun 21 2021
|
||||
|
||||
- Added SSL support to pgdriver.
|
||||
|
||||
## v0.2.3 - Jun 20 2021
|
||||
|
||||
- Replaced `ForceDelete(ctx)` with `ForceDelete().Exec(ctx)` for soft deletes.
|
||||
|
||||
## v0.2.1 - Jun 17 2021
|
||||
|
||||
- Renamed `DBI` to `IConn`. `IConn` is a common interface for `*sql.DB`, `*sql.Conn`, and `*sql.Tx`.
|
||||
- Added `IDB`. `IDB` is a common interface for `*bun.DB`, `bun.Conn`, and `bun.Tx`.
|
||||
|
||||
## v0.2.0 - Jun 16 2021
|
||||
|
||||
- Changed [model hooks](https://bun.uptrace.dev/guide/hooks.html#model-hooks). See
|
||||
[model-hooks](example/model-hooks) example.
|
||||
- Renamed `has-one` to `belongs-to`. Renamed `belongs-to` to `has-one`. Previously Bun used
|
||||
incorrect names for these relations.
|
@ -0,0 +1,34 @@
|
||||
## Running tests
|
||||
|
||||
To run tests, you need Docker which starts PostgreSQL and MySQL servers:
|
||||
|
||||
```shell
|
||||
cd internal/dbtest
|
||||
./test.sh
|
||||
```
|
||||
|
||||
To ease debugging, you can run tests and print all executed queries:
|
||||
|
||||
```shell
|
||||
BUNDEBUG=2 TZ= go test -run=TestName
|
||||
```
|
||||
|
||||
## Releasing
|
||||
|
||||
1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
|
||||
|
||||
```shell
|
||||
TAG=v1.0.0 ./scripts/release.sh
|
||||
```
|
||||
|
||||
2. Open a pull request and wait for the build to finish.
|
||||
|
||||
3. Merge the pull request and run `tag.sh` to create tags for packages:
|
||||
|
||||
```shell
|
||||
TAG=v1.0.0 ./scripts/tag.sh
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
To contribute to the docs visit https://github.com/go-bun/bun-docs
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,30 @@
|
||||
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||
EXAMPLE_GO_MOD_DIRS := $(shell find ./example/ -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||
|
||||
test:
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||
echo "go test in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
go test && \
|
||||
env GOOS=linux GOARCH=386 go test && \
|
||||
go vet); \
|
||||
done
|
||||
|
||||
go_mod_tidy:
|
||||
go get -u && go mod tidy -go=1.17
|
||||
set -e; for dir in $(ALL_GO_MOD_DIRS); do \
|
||||
echo "go mod tidy in $${dir}"; \
|
||||
(cd "$${dir}" && \
|
||||
go get -u ./... && \
|
||||
go mod tidy -go=1.17); \
|
||||
done
|
||||
|
||||
fmt:
|
||||
gofmt -w -s ./
|
||||
goimports -w -local github.com/uptrace/bun ./
|
||||
|
||||
run-examples:
|
||||
set -e; for dir in $(EXAMPLE_GO_MOD_DIRS); do \
|
||||
echo "go run . in $${dir}"; \
|
||||
(cd "$${dir}" && go run .); \
|
||||
done
|
@ -0,0 +1,320 @@
|
||||
# SQL-first Golang ORM for PostgreSQL, MySQL, MSSQL, and SQLite
|
||||
|
||||
[![build workflow](https://github.com/uptrace/bun/actions/workflows/build.yml/badge.svg)](https://github.com/uptrace/bun/actions)
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun)](https://pkg.go.dev/github.com/uptrace/bun)
|
||||
[![Documentation](https://img.shields.io/badge/bun-documentation-informational)](https://bun.uptrace.dev/)
|
||||
[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
|
||||
|
||||
> Bun is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). Uptrace
|
||||
> is an open source and blazingly fast
|
||||
> [distributed tracing tool](https://get.uptrace.dev/compare/distributed-tracing-tools.html) powered
|
||||
> by OpenTelemetry and ClickHouse. Give it a star as well!
|
||||
|
||||
## Features
|
||||
|
||||
- Works with [PostgreSQL](https://bun.uptrace.dev/guide/drivers.html#postgresql),
|
||||
[MySQL](https://bun.uptrace.dev/guide/drivers.html#mysql) (including MariaDB),
|
||||
[MSSQL](https://bun.uptrace.dev/guide/drivers.html#mssql),
|
||||
[SQLite](https://bun.uptrace.dev/guide/drivers.html#sqlite).
|
||||
- [ORM-like](/example/basic/) experience using good old SQL. Bun supports structs, map, scalars, and
|
||||
slices of map/structs/scalars.
|
||||
- [Bulk inserts](https://bun.uptrace.dev/guide/query-insert.html).
|
||||
- [Bulk updates](https://bun.uptrace.dev/guide/query-update.html) using common table expressions.
|
||||
- [Bulk deletes](https://bun.uptrace.dev/guide/query-delete.html).
|
||||
- [Fixtures](https://bun.uptrace.dev/guide/fixtures.html).
|
||||
- [Migrations](https://bun.uptrace.dev/guide/migrations.html).
|
||||
- [Soft deletes](https://bun.uptrace.dev/guide/soft-deletes.html).
|
||||
|
||||
Resources:
|
||||
|
||||
- [**Get started**](https://bun.uptrace.dev/guide/golang-orm.html)
|
||||
- [Examples](https://github.com/uptrace/bun/tree/master/example)
|
||||
- [Discussions](https://github.com/uptrace/bun/discussions)
|
||||
- [Chat](https://discord.gg/rWtp5Aj)
|
||||
- [Reference](https://pkg.go.dev/github.com/uptrace/bun)
|
||||
- [Starter kit](https://github.com/go-bun/bun-starter-kit)
|
||||
|
||||
Projects using Bun:
|
||||
|
||||
- [gotosocial](https://github.com/superseriousbusiness/gotosocial) - Golang fediverse server.
|
||||
- [alexedwards/scs](https://github.com/alexedwards/scs) - HTTP Session Management for Go.
|
||||
- [emerald-web3-gateway](https://github.com/oasisprotocol/emerald-web3-gateway) - Web3 Gateway for
|
||||
the Oasis Emerald paratime.
|
||||
- [lndhub.go](https://github.com/getAlby/lndhub.go) - accounting wrapper for the Lightning Network.
|
||||
- [RealWorld app](https://github.com/go-bun/bun-realworld-app)
|
||||
- And hundreds more.
|
||||
|
||||
## Benchmark
|
||||
|
||||
[https://github.com/davars/dbeval](https://github.com/davars/dbeval)
|
||||
|
||||
<details>
|
||||
<summary>results</summary>
|
||||
|
||||
```
|
||||
BenchmarkInsert
|
||||
BenchmarkInsert/*dbeval.Memory/Authors
|
||||
BenchmarkInsert/*dbeval.Memory/Authors-4 84450 12104 ns/op 2623 B/op 70 allocs/op
|
||||
BenchmarkInsert/*dbeval.Xorm/Authors
|
||||
BenchmarkInsert/*dbeval.Xorm/Authors-4 7291 153505 ns/op 9024 B/op 311 allocs/op
|
||||
BenchmarkInsert/*dbeval.UpperDB/Authors
|
||||
BenchmarkInsert/*dbeval.UpperDB/Authors-4 4608 223672 ns/op 24160 B/op 1100 allocs/op
|
||||
BenchmarkInsert/*dbeval.Bun/Authors
|
||||
BenchmarkInsert/*dbeval.Bun/Authors-4 6034 186439 ns/op 6818 B/op 80 allocs/op
|
||||
BenchmarkInsert/*dbeval.PQ/Authors
|
||||
BenchmarkInsert/*dbeval.PQ/Authors-4 1141 907494 ns/op 6487 B/op 193 allocs/op
|
||||
BenchmarkInsert/*dbeval.SQLX/Authors
|
||||
BenchmarkInsert/*dbeval.SQLX/Authors-4 1165 916987 ns/op 10089 B/op 271 allocs/op
|
||||
BenchmarkInsert/*dbeval.Ozzo/Authors
|
||||
BenchmarkInsert/*dbeval.Ozzo/Authors-4 1105 1058082 ns/op 27826 B/op 588 allocs/op
|
||||
BenchmarkInsert/*dbeval.PGXStdlib/Authors
|
||||
BenchmarkInsert/*dbeval.PGXStdlib/Authors-4 1228 900207 ns/op 6032 B/op 180 allocs/op
|
||||
BenchmarkInsert/*dbeval.Gorm/Authors
|
||||
BenchmarkInsert/*dbeval.Gorm/Authors-4 946 1184285 ns/op 35634 B/op 918 allocs/op
|
||||
BenchmarkInsert/*dbeval.PGX/Authors
|
||||
BenchmarkInsert/*dbeval.PGX/Authors-4 1116 923728 ns/op 3839 B/op 130 allocs/op
|
||||
BenchmarkInsert/*dbeval.DBR/Authors
|
||||
BenchmarkInsert/*dbeval.DBR/Authors-4 5800 183982 ns/op 8646 B/op 230 allocs/op
|
||||
BenchmarkInsert/*dbeval.GoPG/Authors
|
||||
BenchmarkInsert/*dbeval.GoPG/Authors-4 6110 173923 ns/op 2906 B/op 87 allocs/op
|
||||
|
||||
BenchmarkInsert/*dbeval.DBR/Articles
|
||||
BenchmarkInsert/*dbeval.DBR/Articles-4 1706 684466 ns/op 133346 B/op 1614 allocs/op
|
||||
BenchmarkInsert/*dbeval.PQ/Articles
|
||||
BenchmarkInsert/*dbeval.PQ/Articles-4 884 1249791 ns/op 100403 B/op 1491 allocs/op
|
||||
BenchmarkInsert/*dbeval.PGX/Articles
|
||||
BenchmarkInsert/*dbeval.PGX/Articles-4 916 1288143 ns/op 83539 B/op 1392 allocs/op
|
||||
BenchmarkInsert/*dbeval.GoPG/Articles
|
||||
BenchmarkInsert/*dbeval.GoPG/Articles-4 1726 622639 ns/op 78638 B/op 1359 allocs/op
|
||||
BenchmarkInsert/*dbeval.SQLX/Articles
|
||||
BenchmarkInsert/*dbeval.SQLX/Articles-4 860 1262599 ns/op 92030 B/op 1574 allocs/op
|
||||
BenchmarkInsert/*dbeval.Gorm/Articles
|
||||
BenchmarkInsert/*dbeval.Gorm/Articles-4 782 1421550 ns/op 136534 B/op 2411 allocs/op
|
||||
BenchmarkInsert/*dbeval.PGXStdlib/Articles
|
||||
BenchmarkInsert/*dbeval.PGXStdlib/Articles-4 938 1230576 ns/op 86743 B/op 1441 allocs/op
|
||||
BenchmarkInsert/*dbeval.Bun/Articles
|
||||
BenchmarkInsert/*dbeval.Bun/Articles-4 1843 626681 ns/op 101610 B/op 1323 allocs/op
|
||||
BenchmarkInsert/*dbeval.Xorm/Articles
|
||||
BenchmarkInsert/*dbeval.Xorm/Articles-4 1677 650244 ns/op 126677 B/op 1752 allocs/op
|
||||
BenchmarkInsert/*dbeval.Memory/Articles
|
||||
BenchmarkInsert/*dbeval.Memory/Articles-4 1988 1223308 ns/op 77576 B/op 1310 allocs/op
|
||||
BenchmarkInsert/*dbeval.UpperDB/Articles
|
||||
BenchmarkInsert/*dbeval.UpperDB/Articles-4 1696 687130 ns/op 139680 B/op 2862 allocs/op
|
||||
BenchmarkInsert/*dbeval.Ozzo/Articles
|
||||
BenchmarkInsert/*dbeval.Ozzo/Articles-4 697 1496859 ns/op 114780 B/op 1950 allocs/op
|
||||
|
||||
BenchmarkFindAuthorByID
|
||||
BenchmarkFindAuthorByID/*dbeval.UpperDB
|
||||
BenchmarkFindAuthorByID/*dbeval.UpperDB-4 10184 117527 ns/op 9953 B/op 441 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.Bun
|
||||
BenchmarkFindAuthorByID/*dbeval.Bun-4 20716 54261 ns/op 5096 B/op 15 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.Ozzo
|
||||
BenchmarkFindAuthorByID/*dbeval.Ozzo-4 11166 91043 ns/op 3088 B/op 64 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.PQ
|
||||
BenchmarkFindAuthorByID/*dbeval.PQ-4 13875 86171 ns/op 844 B/op 24 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.PGX
|
||||
BenchmarkFindAuthorByID/*dbeval.PGX-4 13846 79983 ns/op 719 B/op 15 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.Memory
|
||||
BenchmarkFindAuthorByID/*dbeval.Memory-4 14113720 82.33 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.Xorm
|
||||
BenchmarkFindAuthorByID/*dbeval.Xorm-4 12027 98519 ns/op 3633 B/op 106 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.Gorm
|
||||
BenchmarkFindAuthorByID/*dbeval.Gorm-4 11521 102241 ns/op 6592 B/op 143 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.PGXStdlib
|
||||
BenchmarkFindAuthorByID/*dbeval.PGXStdlib-4 13933 82626 ns/op 1174 B/op 28 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.DBR
|
||||
BenchmarkFindAuthorByID/*dbeval.DBR-4 21920 51175 ns/op 1756 B/op 39 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.SQLX
|
||||
BenchmarkFindAuthorByID/*dbeval.SQLX-4 13603 80788 ns/op 1327 B/op 32 allocs/op
|
||||
BenchmarkFindAuthorByID/*dbeval.GoPG
|
||||
BenchmarkFindAuthorByID/*dbeval.GoPG-4 23174 50042 ns/op 869 B/op 17 allocs/op
|
||||
|
||||
BenchmarkFindAuthorByName
|
||||
BenchmarkFindAuthorByName/*dbeval.SQLX
|
||||
BenchmarkFindAuthorByName/*dbeval.SQLX-4 1070 1065272 ns/op 126348 B/op 4018 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.Bun
|
||||
BenchmarkFindAuthorByName/*dbeval.Bun-4 877 1231377 ns/op 115803 B/op 5005 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.Xorm
|
||||
BenchmarkFindAuthorByName/*dbeval.Xorm-4 471 2345445 ns/op 455711 B/op 19080 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.DBR
|
||||
BenchmarkFindAuthorByName/*dbeval.DBR-4 954 1089977 ns/op 120070 B/op 6023 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.PQ
|
||||
BenchmarkFindAuthorByName/*dbeval.PQ-4 1333 784400 ns/op 87159 B/op 4006 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.GoPG
|
||||
BenchmarkFindAuthorByName/*dbeval.GoPG-4 1580 770966 ns/op 87525 B/op 3028 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.UpperDB
|
||||
BenchmarkFindAuthorByName/*dbeval.UpperDB-4 789 1314164 ns/op 190689 B/op 6428 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.Ozzo
|
||||
BenchmarkFindAuthorByName/*dbeval.Ozzo-4 948 1255282 ns/op 238764 B/op 6053 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.PGXStdlib
|
||||
BenchmarkFindAuthorByName/*dbeval.PGXStdlib-4 1279 920391 ns/op 126163 B/op 4014 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.PGX
|
||||
BenchmarkFindAuthorByName/*dbeval.PGX-4 1364 780970 ns/op 101967 B/op 2028 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.Gorm
|
||||
BenchmarkFindAuthorByName/*dbeval.Gorm-4 340 3445818 ns/op 1573637 B/op 27102 allocs/op
|
||||
BenchmarkFindAuthorByName/*dbeval.Memory
|
||||
BenchmarkFindAuthorByName/*dbeval.Memory-4 38081223 31.24 ns/op 0 B/op 0 allocs/op
|
||||
|
||||
BenchmarkRecentArticles
|
||||
BenchmarkRecentArticles/*dbeval.PGXStdlib
|
||||
BenchmarkRecentArticles/*dbeval.PGXStdlib-4 358 3344119 ns/op 3425578 B/op 14177 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.GoPG
|
||||
BenchmarkRecentArticles/*dbeval.GoPG-4 364 3156372 ns/op 1794091 B/op 10032 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.Xorm
|
||||
BenchmarkRecentArticles/*dbeval.Xorm-4 157 7567835 ns/op 5018011 B/op 81425 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.Gorm
|
||||
BenchmarkRecentArticles/*dbeval.Gorm-4 139 7980084 ns/op 6776277 B/op 85418 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.SQLX
|
||||
BenchmarkRecentArticles/*dbeval.SQLX-4 338 3289802 ns/op 3425890 B/op 14181 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.Ozzo
|
||||
BenchmarkRecentArticles/*dbeval.Ozzo-4 320 3508322 ns/op 4025966 B/op 18207 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.DBR
|
||||
BenchmarkRecentArticles/*dbeval.DBR-4 237 5248644 ns/op 3331003 B/op 21370 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.Bun
|
||||
BenchmarkRecentArticles/*dbeval.Bun-4 280 4528582 ns/op 1864362 B/op 15965 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.UpperDB
|
||||
BenchmarkRecentArticles/*dbeval.UpperDB-4 297 3704663 ns/op 3607287 B/op 18542 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.PQ
|
||||
BenchmarkRecentArticles/*dbeval.PQ-4 308 3489229 ns/op 3277050 B/op 17359 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.Memory
|
||||
BenchmarkRecentArticles/*dbeval.Memory-4 29590380 42.27 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkRecentArticles/*dbeval.PGX
|
||||
BenchmarkRecentArticles/*dbeval.PGX-4 356 3345500 ns/op 3297316 B/op 6226 allocs/op
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
[https://github.com/frederikhors/orm-benchmark](https://github.com/frederikhors/orm-benchmark)
|
||||
|
||||
<details>
|
||||
<summary>results</summary>
|
||||
|
||||
```
|
||||
4000 times - Insert
|
||||
raw_stmt: 0.38s 94280 ns/op 718 B/op 14 allocs/op
|
||||
raw: 0.39s 96719 ns/op 718 B/op 13 allocs/op
|
||||
beego_orm: 0.48s 118994 ns/op 2411 B/op 56 allocs/op
|
||||
bun: 0.57s 142285 ns/op 918 B/op 12 allocs/op
|
||||
pg: 0.58s 145496 ns/op 1235 B/op 12 allocs/op
|
||||
gorm: 0.70s 175294 ns/op 6665 B/op 88 allocs/op
|
||||
xorm: 0.76s 189533 ns/op 3032 B/op 94 allocs/op
|
||||
|
||||
4000 times - MultiInsert 100 row
|
||||
raw: 4.59s 1147385 ns/op 135155 B/op 916 allocs/op
|
||||
raw_stmt: 4.59s 1148137 ns/op 131076 B/op 916 allocs/op
|
||||
beego_orm: 5.50s 1375637 ns/op 179962 B/op 2747 allocs/op
|
||||
bun: 6.18s 1544648 ns/op 4265 B/op 214 allocs/op
|
||||
pg: 7.01s 1753495 ns/op 5039 B/op 114 allocs/op
|
||||
gorm: 9.52s 2379219 ns/op 293956 B/op 3729 allocs/op
|
||||
xorm: 11.66s 2915478 ns/op 286140 B/op 7422 allocs/op
|
||||
|
||||
4000 times - Update
|
||||
raw_stmt: 0.26s 65781 ns/op 773 B/op 14 allocs/op
|
||||
raw: 0.31s 77209 ns/op 757 B/op 13 allocs/op
|
||||
beego_orm: 0.43s 107064 ns/op 1802 B/op 47 allocs/op
|
||||
bun: 0.56s 139839 ns/op 589 B/op 4 allocs/op
|
||||
pg: 0.60s 149608 ns/op 896 B/op 11 allocs/op
|
||||
gorm: 0.74s 185970 ns/op 6604 B/op 81 allocs/op
|
||||
xorm: 0.81s 203240 ns/op 2994 B/op 119 allocs/op
|
||||
|
||||
4000 times - Read
|
||||
raw: 0.33s 81671 ns/op 2081 B/op 49 allocs/op
|
||||
raw_stmt: 0.34s 85847 ns/op 2112 B/op 50 allocs/op
|
||||
beego_orm: 0.38s 94777 ns/op 2106 B/op 75 allocs/op
|
||||
pg: 0.42s 106148 ns/op 1526 B/op 22 allocs/op
|
||||
bun: 0.43s 106904 ns/op 1319 B/op 18 allocs/op
|
||||
gorm: 0.65s 162221 ns/op 5240 B/op 108 allocs/op
|
||||
xorm: 1.13s 281738 ns/op 8326 B/op 237 allocs/op
|
||||
|
||||
4000 times - MultiRead limit 100
|
||||
raw: 1.52s 380351 ns/op 38356 B/op 1037 allocs/op
|
||||
raw_stmt: 1.54s 385541 ns/op 38388 B/op 1038 allocs/op
|
||||
pg: 1.86s 465468 ns/op 24045 B/op 631 allocs/op
|
||||
bun: 2.58s 645354 ns/op 30009 B/op 1122 allocs/op
|
||||
beego_orm: 2.93s 732028 ns/op 55280 B/op 3077 allocs/op
|
||||
gorm: 4.97s 1241831 ns/op 71628 B/op 3877 allocs/op
|
||||
xorm: doesn't work
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Why another database client?
|
||||
|
||||
So you can elegantly write complex queries:
|
||||
|
||||
```go
|
||||
regionalSales := db.NewSelect().
|
||||
ColumnExpr("region").
|
||||
ColumnExpr("SUM(amount) AS total_sales").
|
||||
TableExpr("orders").
|
||||
GroupExpr("region")
|
||||
|
||||
topRegions := db.NewSelect().
|
||||
ColumnExpr("region").
|
||||
TableExpr("regional_sales").
|
||||
Where("total_sales > (SELECT SUM(total_sales) / 10 FROM regional_sales)")
|
||||
|
||||
var items []map[string]interface{}
|
||||
err := db.NewSelect().
|
||||
With("regional_sales", regionalSales).
|
||||
With("top_regions", topRegions).
|
||||
ColumnExpr("region").
|
||||
ColumnExpr("product").
|
||||
ColumnExpr("SUM(quantity) AS product_units").
|
||||
ColumnExpr("SUM(amount) AS product_sales").
|
||||
TableExpr("orders").
|
||||
Where("region IN (SELECT region FROM top_regions)").
|
||||
GroupExpr("region").
|
||||
GroupExpr("product").
|
||||
Scan(ctx, &items)
|
||||
```
|
||||
|
||||
```sql
|
||||
WITH regional_sales AS (
|
||||
SELECT region, SUM(amount) AS total_sales
|
||||
FROM orders
|
||||
GROUP BY region
|
||||
), top_regions AS (
|
||||
SELECT region
|
||||
FROM regional_sales
|
||||
WHERE total_sales > (SELECT SUM(total_sales)/10 FROM regional_sales)
|
||||
)
|
||||
SELECT region,
|
||||
product,
|
||||
SUM(quantity) AS product_units,
|
||||
SUM(amount) AS product_sales
|
||||
FROM orders
|
||||
WHERE region IN (SELECT region FROM top_regions)
|
||||
GROUP BY region, product
|
||||
```
|
||||
|
||||
And scan results into scalars, structs, maps, slices of structs/maps/scalars:
|
||||
|
||||
```go
|
||||
users := make([]User, 0)
|
||||
if err := db.NewSelect().Model(&users).OrderExpr("id ASC").Scan(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
user1 := new(User)
|
||||
if err := db.NewSelect().Model(user1).Where("id = ?", 1).Scan(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
||||
See [**Getting started**](https://bun.uptrace.dev/guide/golang-orm.html) guide and check
|
||||
[examples](example).
|
||||
|
||||
## See also
|
||||
|
||||
- [Golang HTTP router](https://github.com/uptrace/bunrouter)
|
||||
- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse)
|
||||
- [Golang msgpack](https://github.com/vmihailenco/msgpack)
|
||||
|
||||
## Contributors
|
||||
|
||||
Thanks to all the people who already contributed!
|
||||
|
||||
<a href="https://github.com/uptrace/bun/graphs/contributors">
|
||||
<img src="https://contributors-img.web.app/image?repo=uptrace/bun" />
|
||||
</a>
|
@ -0,0 +1,84 @@
|
||||
package bun
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/uptrace/bun/internal"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type (
|
||||
Safe = schema.Safe
|
||||
Ident = schema.Ident
|
||||
|
||||
NullTime = schema.NullTime
|
||||
BaseModel = schema.BaseModel
|
||||
Query = schema.Query
|
||||
|
||||
BeforeAppendModelHook = schema.BeforeAppendModelHook
|
||||
|
||||
BeforeScanRowHook = schema.BeforeScanRowHook
|
||||
AfterScanRowHook = schema.AfterScanRowHook
|
||||
|
||||
// DEPRECATED. Use BeforeScanRowHook instead.
|
||||
BeforeScanHook = schema.BeforeScanHook
|
||||
// DEPRECATED. Use AfterScanRowHook instead.
|
||||
AfterScanHook = schema.AfterScanHook
|
||||
)
|
||||
|
||||
type BeforeSelectHook interface {
|
||||
BeforeSelect(ctx context.Context, query *SelectQuery) error
|
||||
}
|
||||
|
||||
type AfterSelectHook interface {
|
||||
AfterSelect(ctx context.Context, query *SelectQuery) error
|
||||
}
|
||||
|
||||
type BeforeInsertHook interface {
|
||||
BeforeInsert(ctx context.Context, query *InsertQuery) error
|
||||
}
|
||||
|
||||
type AfterInsertHook interface {
|
||||
AfterInsert(ctx context.Context, query *InsertQuery) error
|
||||
}
|
||||
|
||||
type BeforeUpdateHook interface {
|
||||
BeforeUpdate(ctx context.Context, query *UpdateQuery) error
|
||||
}
|
||||
|
||||
type AfterUpdateHook interface {
|
||||
AfterUpdate(ctx context.Context, query *UpdateQuery) error
|
||||
}
|
||||
|
||||
type BeforeDeleteHook interface {
|
||||
BeforeDelete(ctx context.Context, query *DeleteQuery) error
|
||||
}
|
||||
|
||||
type AfterDeleteHook interface {
|
||||
AfterDelete(ctx context.Context, query *DeleteQuery) error
|
||||
}
|
||||
|
||||
type BeforeCreateTableHook interface {
|
||||
BeforeCreateTable(ctx context.Context, query *CreateTableQuery) error
|
||||
}
|
||||
|
||||
type AfterCreateTableHook interface {
|
||||
AfterCreateTable(ctx context.Context, query *CreateTableQuery) error
|
||||
}
|
||||
|
||||
type BeforeDropTableHook interface {
|
||||
BeforeDropTable(ctx context.Context, query *DropTableQuery) error
|
||||
}
|
||||
|
||||
type AfterDropTableHook interface {
|
||||
AfterDropTable(ctx context.Context, query *DropTableQuery) error
|
||||
}
|
||||
|
||||
// SetLogger overwriters default Bun logger.
|
||||
func SetLogger(logger internal.Logging) {
|
||||
internal.Logger = logger
|
||||
}
|
||||
|
||||
func In(slice interface{}) schema.QueryAppender {
|
||||
return schema.In(slice)
|
||||
}
|
@ -0,0 +1 @@
|
||||
module.exports = { extends: ['@commitlint/config-conventional'] }
|
@ -0,0 +1,684 @@
|
||||
package bun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/uptrace/bun/dialect/feature"
|
||||
"github.com/uptrace/bun/internal"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
discardUnknownColumns internal.Flag = 1 << iota
|
||||
)
|
||||
|
||||
type DBStats struct {
|
||||
Queries uint32
|
||||
Errors uint32
|
||||
}
|
||||
|
||||
type DBOption func(db *DB)
|
||||
|
||||
func WithDiscardUnknownColumns() DBOption {
|
||||
return func(db *DB) {
|
||||
db.flags = db.flags.Set(discardUnknownColumns)
|
||||
}
|
||||
}
|
||||
|
||||
type DB struct {
|
||||
*sql.DB
|
||||
|
||||
dialect schema.Dialect
|
||||
features feature.Feature
|
||||
|
||||
queryHooks []QueryHook
|
||||
|
||||
fmter schema.Formatter
|
||||
flags internal.Flag
|
||||
|
||||
stats DBStats
|
||||
}
|
||||
|
||||
func NewDB(sqldb *sql.DB, dialect schema.Dialect, opts ...DBOption) *DB {
|
||||
dialect.Init(sqldb)
|
||||
|
||||
db := &DB{
|
||||
DB: sqldb,
|
||||
dialect: dialect,
|
||||
features: dialect.Features(),
|
||||
fmter: schema.NewFormatter(dialect),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(db)
|
||||
}
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *DB) String() string {
|
||||
var b strings.Builder
|
||||
b.WriteString("DB<dialect=")
|
||||
b.WriteString(db.dialect.Name().String())
|
||||
b.WriteString(">")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (db *DB) DBStats() DBStats {
|
||||
return DBStats{
|
||||
Queries: atomic.LoadUint32(&db.stats.Queries),
|
||||
Errors: atomic.LoadUint32(&db.stats.Errors),
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DB) NewValues(model interface{}) *ValuesQuery {
|
||||
return NewValuesQuery(db, model)
|
||||
}
|
||||
|
||||
func (db *DB) NewSelect() *SelectQuery {
|
||||
return NewSelectQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewInsert() *InsertQuery {
|
||||
return NewInsertQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewUpdate() *UpdateQuery {
|
||||
return NewUpdateQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewDelete() *DeleteQuery {
|
||||
return NewDeleteQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewCreateTable() *CreateTableQuery {
|
||||
return NewCreateTableQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewDropTable() *DropTableQuery {
|
||||
return NewDropTableQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewCreateIndex() *CreateIndexQuery {
|
||||
return NewCreateIndexQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewDropIndex() *DropIndexQuery {
|
||||
return NewDropIndexQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewTruncateTable() *TruncateTableQuery {
|
||||
return NewTruncateTableQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewAddColumn() *AddColumnQuery {
|
||||
return NewAddColumnQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) NewDropColumn() *DropColumnQuery {
|
||||
return NewDropColumnQuery(db)
|
||||
}
|
||||
|
||||
func (db *DB) ResetModel(ctx context.Context, models ...interface{}) error {
|
||||
for _, model := range models {
|
||||
if _, err := db.NewDropTable().Model(model).IfExists().Cascade().Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := db.NewCreateTable().Model(model).Exec(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *DB) Dialect() schema.Dialect {
|
||||
return db.dialect
|
||||
}
|
||||
|
||||
func (db *DB) ScanRows(ctx context.Context, rows *sql.Rows, dest ...interface{}) error {
|
||||
defer rows.Close()
|
||||
|
||||
model, err := newModel(db, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = model.ScanRows(ctx, rows)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func (db *DB) ScanRow(ctx context.Context, rows *sql.Rows, dest ...interface{}) error {
|
||||
model, err := newModel(db, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rs, ok := model.(rowScanner)
|
||||
if !ok {
|
||||
return fmt.Errorf("bun: %T does not support ScanRow", model)
|
||||
}
|
||||
|
||||
return rs.ScanRow(ctx, rows)
|
||||
}
|
||||
|
||||
type queryHookIniter interface {
|
||||
Init(db *DB)
|
||||
}
|
||||
|
||||
func (db *DB) AddQueryHook(hook QueryHook) {
|
||||
if initer, ok := hook.(queryHookIniter); ok {
|
||||
initer.Init(db)
|
||||
}
|
||||
db.queryHooks = append(db.queryHooks, hook)
|
||||
}
|
||||
|
||||
func (db *DB) Table(typ reflect.Type) *schema.Table {
|
||||
return db.dialect.Tables().Get(typ)
|
||||
}
|
||||
|
||||
// RegisterModel registers models by name so they can be referenced in table relations
|
||||
// and fixtures.
|
||||
func (db *DB) RegisterModel(models ...interface{}) {
|
||||
db.dialect.Tables().Register(models...)
|
||||
}
|
||||
|
||||
func (db *DB) clone() *DB {
|
||||
clone := *db
|
||||
|
||||
l := len(clone.queryHooks)
|
||||
clone.queryHooks = clone.queryHooks[:l:l]
|
||||
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (db *DB) WithNamedArg(name string, value interface{}) *DB {
|
||||
clone := db.clone()
|
||||
clone.fmter = clone.fmter.WithNamedArg(name, value)
|
||||
return clone
|
||||
}
|
||||
|
||||
func (db *DB) Formatter() schema.Formatter {
|
||||
return db.fmter
|
||||
}
|
||||
|
||||
// UpdateFQN returns a fully qualified column name. For MySQL, it returns the column name with
|
||||
// the table alias. For other RDBMS, it returns just the column name.
|
||||
func (db *DB) UpdateFQN(alias, column string) Ident {
|
||||
if db.HasFeature(feature.UpdateMultiTable) {
|
||||
return Ident(alias + "." + column)
|
||||
}
|
||||
return Ident(column)
|
||||
}
|
||||
|
||||
// HasFeature uses feature package to report whether the underlying DBMS supports this feature.
|
||||
func (db *DB) HasFeature(feat feature.Feature) bool {
|
||||
return db.fmter.HasFeature(feat)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (db *DB) Exec(query string, args ...interface{}) (sql.Result, error) {
|
||||
return db.ExecContext(context.Background(), query, args...)
|
||||
}
|
||||
|
||||
func (db *DB) ExecContext(
|
||||
ctx context.Context, query string, args ...interface{},
|
||||
) (sql.Result, error) {
|
||||
formattedQuery := db.format(query, args)
|
||||
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
res, err := db.DB.ExecContext(ctx, formattedQuery)
|
||||
db.afterQuery(ctx, event, res, err)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (db *DB) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
||||
return db.QueryContext(context.Background(), query, args...)
|
||||
}
|
||||
|
||||
func (db *DB) QueryContext(
|
||||
ctx context.Context, query string, args ...interface{},
|
||||
) (*sql.Rows, error) {
|
||||
formattedQuery := db.format(query, args)
|
||||
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
rows, err := db.DB.QueryContext(ctx, formattedQuery)
|
||||
db.afterQuery(ctx, event, nil, err)
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (db *DB) QueryRow(query string, args ...interface{}) *sql.Row {
|
||||
return db.QueryRowContext(context.Background(), query, args...)
|
||||
}
|
||||
|
||||
func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
||||
formattedQuery := db.format(query, args)
|
||||
ctx, event := db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
row := db.DB.QueryRowContext(ctx, formattedQuery)
|
||||
db.afterQuery(ctx, event, nil, row.Err())
|
||||
return row
|
||||
}
|
||||
|
||||
func (db *DB) format(query string, args []interface{}) string {
|
||||
return db.fmter.FormatQuery(query, args...)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Conn struct {
|
||||
db *DB
|
||||
*sql.Conn
|
||||
}
|
||||
|
||||
func (db *DB) Conn(ctx context.Context) (Conn, error) {
|
||||
conn, err := db.DB.Conn(ctx)
|
||||
if err != nil {
|
||||
return Conn{}, err
|
||||
}
|
||||
return Conn{
|
||||
db: db,
|
||||
Conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c Conn) ExecContext(
|
||||
ctx context.Context, query string, args ...interface{},
|
||||
) (sql.Result, error) {
|
||||
formattedQuery := c.db.format(query, args)
|
||||
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
res, err := c.Conn.ExecContext(ctx, formattedQuery)
|
||||
c.db.afterQuery(ctx, event, res, err)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (c Conn) QueryContext(
|
||||
ctx context.Context, query string, args ...interface{},
|
||||
) (*sql.Rows, error) {
|
||||
formattedQuery := c.db.format(query, args)
|
||||
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
rows, err := c.Conn.QueryContext(ctx, formattedQuery)
|
||||
c.db.afterQuery(ctx, event, nil, err)
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (c Conn) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
||||
formattedQuery := c.db.format(query, args)
|
||||
ctx, event := c.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
row := c.Conn.QueryRowContext(ctx, formattedQuery)
|
||||
c.db.afterQuery(ctx, event, nil, row.Err())
|
||||
return row
|
||||
}
|
||||
|
||||
func (c Conn) Dialect() schema.Dialect {
|
||||
return c.db.Dialect()
|
||||
}
|
||||
|
||||
func (c Conn) NewValues(model interface{}) *ValuesQuery {
|
||||
return NewValuesQuery(c.db, model).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewSelect() *SelectQuery {
|
||||
return NewSelectQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewInsert() *InsertQuery {
|
||||
return NewInsertQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewUpdate() *UpdateQuery {
|
||||
return NewUpdateQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewDelete() *DeleteQuery {
|
||||
return NewDeleteQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewCreateTable() *CreateTableQuery {
|
||||
return NewCreateTableQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewDropTable() *DropTableQuery {
|
||||
return NewDropTableQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewCreateIndex() *CreateIndexQuery {
|
||||
return NewCreateIndexQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewDropIndex() *DropIndexQuery {
|
||||
return NewDropIndexQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewTruncateTable() *TruncateTableQuery {
|
||||
return NewTruncateTableQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewAddColumn() *AddColumnQuery {
|
||||
return NewAddColumnQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
func (c Conn) NewDropColumn() *DropColumnQuery {
|
||||
return NewDropColumnQuery(c.db).Conn(c)
|
||||
}
|
||||
|
||||
// RunInTx runs the function in a transaction. If the function returns an error,
|
||||
// the transaction is rolled back. Otherwise, the transaction is committed.
|
||||
func (c Conn) RunInTx(
|
||||
ctx context.Context, opts *sql.TxOptions, fn func(ctx context.Context, tx Tx) error,
|
||||
) error {
|
||||
tx, err := c.BeginTx(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var done bool
|
||||
|
||||
defer func() {
|
||||
if !done {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := fn(ctx, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done = true
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (c Conn) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) {
|
||||
ctx, event := c.db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil)
|
||||
tx, err := c.Conn.BeginTx(ctx, opts)
|
||||
c.db.afterQuery(ctx, event, nil, err)
|
||||
if err != nil {
|
||||
return Tx{}, err
|
||||
}
|
||||
return Tx{
|
||||
ctx: ctx,
|
||||
db: c.db,
|
||||
Tx: tx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Stmt struct {
|
||||
*sql.Stmt
|
||||
}
|
||||
|
||||
func (db *DB) Prepare(query string) (Stmt, error) {
|
||||
return db.PrepareContext(context.Background(), query)
|
||||
}
|
||||
|
||||
func (db *DB) PrepareContext(ctx context.Context, query string) (Stmt, error) {
|
||||
stmt, err := db.DB.PrepareContext(ctx, query)
|
||||
if err != nil {
|
||||
return Stmt{}, err
|
||||
}
|
||||
return Stmt{Stmt: stmt}, nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Tx struct {
|
||||
ctx context.Context
|
||||
db *DB
|
||||
// name is the name of a savepoint
|
||||
name string
|
||||
*sql.Tx
|
||||
}
|
||||
|
||||
// RunInTx runs the function in a transaction. If the function returns an error,
|
||||
// the transaction is rolled back. Otherwise, the transaction is committed.
|
||||
func (db *DB) RunInTx(
|
||||
ctx context.Context, opts *sql.TxOptions, fn func(ctx context.Context, tx Tx) error,
|
||||
) error {
|
||||
tx, err := db.BeginTx(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var done bool
|
||||
|
||||
defer func() {
|
||||
if !done {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := fn(ctx, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done = true
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (db *DB) Begin() (Tx, error) {
|
||||
return db.BeginTx(context.Background(), nil)
|
||||
}
|
||||
|
||||
func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (Tx, error) {
|
||||
ctx, event := db.beforeQuery(ctx, nil, "BEGIN", nil, "BEGIN", nil)
|
||||
tx, err := db.DB.BeginTx(ctx, opts)
|
||||
db.afterQuery(ctx, event, nil, err)
|
||||
if err != nil {
|
||||
return Tx{}, err
|
||||
}
|
||||
return Tx{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
Tx: tx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tx Tx) Commit() error {
|
||||
if tx.name == "" {
|
||||
return tx.commitTX()
|
||||
}
|
||||
return tx.commitSP()
|
||||
}
|
||||
|
||||
func (tx Tx) commitTX() error {
|
||||
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "COMMIT", nil, "COMMIT", nil)
|
||||
err := tx.Tx.Commit()
|
||||
tx.db.afterQuery(ctx, event, nil, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx Tx) commitSP() error {
|
||||
if tx.Dialect().Features().Has(feature.MSSavepoint) {
|
||||
return nil
|
||||
}
|
||||
query := "RELEASE SAVEPOINT " + tx.name
|
||||
_, err := tx.ExecContext(tx.ctx, query)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx Tx) Rollback() error {
|
||||
if tx.name == "" {
|
||||
return tx.rollbackTX()
|
||||
}
|
||||
return tx.rollbackSP()
|
||||
}
|
||||
|
||||
func (tx Tx) rollbackTX() error {
|
||||
ctx, event := tx.db.beforeQuery(tx.ctx, nil, "ROLLBACK", nil, "ROLLBACK", nil)
|
||||
err := tx.Tx.Rollback()
|
||||
tx.db.afterQuery(ctx, event, nil, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx Tx) rollbackSP() error {
|
||||
query := "ROLLBACK TO SAVEPOINT " + tx.name
|
||||
if tx.Dialect().Features().Has(feature.MSSavepoint) {
|
||||
query = "ROLLBACK TRANSACTION " + tx.name
|
||||
}
|
||||
_, err := tx.ExecContext(tx.ctx, query)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx Tx) Exec(query string, args ...interface{}) (sql.Result, error) {
|
||||
return tx.ExecContext(context.TODO(), query, args...)
|
||||
}
|
||||
|
||||
func (tx Tx) ExecContext(
|
||||
ctx context.Context, query string, args ...interface{},
|
||||
) (sql.Result, error) {
|
||||
formattedQuery := tx.db.format(query, args)
|
||||
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
res, err := tx.Tx.ExecContext(ctx, formattedQuery)
|
||||
tx.db.afterQuery(ctx, event, res, err)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (tx Tx) Query(query string, args ...interface{}) (*sql.Rows, error) {
|
||||
return tx.QueryContext(context.TODO(), query, args...)
|
||||
}
|
||||
|
||||
func (tx Tx) QueryContext(
|
||||
ctx context.Context, query string, args ...interface{},
|
||||
) (*sql.Rows, error) {
|
||||
formattedQuery := tx.db.format(query, args)
|
||||
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
rows, err := tx.Tx.QueryContext(ctx, formattedQuery)
|
||||
tx.db.afterQuery(ctx, event, nil, err)
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (tx Tx) QueryRow(query string, args ...interface{}) *sql.Row {
|
||||
return tx.QueryRowContext(context.TODO(), query, args...)
|
||||
}
|
||||
|
||||
func (tx Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row {
|
||||
formattedQuery := tx.db.format(query, args)
|
||||
ctx, event := tx.db.beforeQuery(ctx, nil, query, args, formattedQuery, nil)
|
||||
row := tx.Tx.QueryRowContext(ctx, formattedQuery)
|
||||
tx.db.afterQuery(ctx, event, nil, row.Err())
|
||||
return row
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (tx Tx) Begin() (Tx, error) {
|
||||
return tx.BeginTx(tx.ctx, nil)
|
||||
}
|
||||
|
||||
// BeginTx will save a point in the running transaction.
|
||||
func (tx Tx) BeginTx(ctx context.Context, _ *sql.TxOptions) (Tx, error) {
|
||||
// mssql savepoint names are limited to 32 characters
|
||||
sp := make([]byte, 14)
|
||||
_, err := rand.Read(sp)
|
||||
if err != nil {
|
||||
return Tx{}, err
|
||||
}
|
||||
|
||||
qName := "SP_" + hex.EncodeToString(sp)
|
||||
query := "SAVEPOINT " + qName
|
||||
if tx.Dialect().Features().Has(feature.MSSavepoint) {
|
||||
query = "SAVE TRANSACTION " + qName
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, query)
|
||||
if err != nil {
|
||||
return Tx{}, err
|
||||
}
|
||||
return Tx{
|
||||
ctx: ctx,
|
||||
db: tx.db,
|
||||
Tx: tx.Tx,
|
||||
name: qName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (tx Tx) RunInTx(
|
||||
ctx context.Context, _ *sql.TxOptions, fn func(ctx context.Context, tx Tx) error,
|
||||
) error {
|
||||
sp, err := tx.BeginTx(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var done bool
|
||||
|
||||
defer func() {
|
||||
if !done {
|
||||
_ = sp.Rollback()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := fn(ctx, sp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
done = true
|
||||
return sp.Commit()
|
||||
}
|
||||
|
||||
func (tx Tx) Dialect() schema.Dialect {
|
||||
return tx.db.Dialect()
|
||||
}
|
||||
|
||||
func (tx Tx) NewValues(model interface{}) *ValuesQuery {
|
||||
return NewValuesQuery(tx.db, model).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewSelect() *SelectQuery {
|
||||
return NewSelectQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewInsert() *InsertQuery {
|
||||
return NewInsertQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewUpdate() *UpdateQuery {
|
||||
return NewUpdateQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewDelete() *DeleteQuery {
|
||||
return NewDeleteQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewCreateTable() *CreateTableQuery {
|
||||
return NewCreateTableQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewDropTable() *DropTableQuery {
|
||||
return NewDropTableQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewCreateIndex() *CreateIndexQuery {
|
||||
return NewCreateIndexQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewDropIndex() *DropIndexQuery {
|
||||
return NewDropIndexQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewTruncateTable() *TruncateTableQuery {
|
||||
return NewTruncateTableQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewAddColumn() *AddColumnQuery {
|
||||
return NewAddColumnQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
func (tx Tx) NewDropColumn() *DropColumnQuery {
|
||||
return NewDropColumnQuery(tx.db).Conn(tx)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (db *DB) makeQueryBytes() []byte {
|
||||
// TODO: make this configurable?
|
||||
return make([]byte, 0, 4096)
|
||||
}
|
@ -0,0 +1,88 @@
|
||||
package dialect
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/uptrace/bun/internal"
|
||||
)
|
||||
|
||||
func AppendError(b []byte, err error) []byte {
|
||||
b = append(b, "?!("...)
|
||||
b = append(b, err.Error()...)
|
||||
b = append(b, ')')
|
||||
return b
|
||||
}
|
||||
|
||||
func AppendNull(b []byte) []byte {
|
||||
return append(b, "NULL"...)
|
||||
}
|
||||
|
||||
func AppendBool(b []byte, v bool) []byte {
|
||||
if v {
|
||||
return append(b, "TRUE"...)
|
||||
}
|
||||
return append(b, "FALSE"...)
|
||||
}
|
||||
|
||||
func AppendFloat32(b []byte, v float32) []byte {
|
||||
return appendFloat(b, float64(v), 32)
|
||||
}
|
||||
|
||||
func AppendFloat64(b []byte, v float64) []byte {
|
||||
return appendFloat(b, v, 64)
|
||||
}
|
||||
|
||||
func appendFloat(b []byte, v float64, bitSize int) []byte {
|
||||
switch {
|
||||
case math.IsNaN(v):
|
||||
return append(b, "'NaN'"...)
|
||||
case math.IsInf(v, 1):
|
||||
return append(b, "'Infinity'"...)
|
||||
case math.IsInf(v, -1):
|
||||
return append(b, "'-Infinity'"...)
|
||||
default:
|
||||
return strconv.AppendFloat(b, v, 'f', -1, bitSize)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func AppendIdent(b []byte, field string, quote byte) []byte {
|
||||
return appendIdent(b, internal.Bytes(field), quote)
|
||||
}
|
||||
|
||||
func appendIdent(b, src []byte, quote byte) []byte {
|
||||
var quoted bool
|
||||
loop:
|
||||
for _, c := range src {
|
||||
switch c {
|
||||
case '*':
|
||||
if !quoted {
|
||||
b = append(b, '*')
|
||||
continue loop
|
||||
}
|
||||
case '.':
|
||||
if quoted {
|
||||
b = append(b, quote)
|
||||
quoted = false
|
||||
}
|
||||
b = append(b, '.')
|
||||
continue loop
|
||||
}
|
||||
|
||||
if !quoted {
|
||||
b = append(b, quote)
|
||||
quoted = true
|
||||
}
|
||||
if c == quote {
|
||||
b = append(b, quote, quote)
|
||||
} else {
|
||||
b = append(b, c)
|
||||
}
|
||||
}
|
||||
if quoted {
|
||||
b = append(b, quote)
|
||||
}
|
||||
return b
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
package dialect
|
||||
|
||||
type Name int
|
||||
|
||||
func (n Name) String() string {
|
||||
switch n {
|
||||
case PG:
|
||||
return "pg"
|
||||
case SQLite:
|
||||
return "sqlite"
|
||||
case MySQL:
|
||||
return "mysql"
|
||||
case MSSQL:
|
||||
return "mssql"
|
||||
default:
|
||||
return "invalid"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
Invalid Name = iota
|
||||
PG
|
||||
SQLite
|
||||
MySQL
|
||||
MSSQL
|
||||
)
|
@ -0,0 +1,34 @@
|
||||
package feature
|
||||
|
||||
import "github.com/uptrace/bun/internal"
|
||||
|
||||
type Feature = internal.Flag
|
||||
|
||||
const (
|
||||
CTE Feature = 1 << iota
|
||||
WithValues
|
||||
Returning
|
||||
InsertReturning
|
||||
Output // mssql
|
||||
DefaultPlaceholder
|
||||
DoubleColonCast
|
||||
ValuesRow
|
||||
UpdateMultiTable
|
||||
InsertTableAlias
|
||||
UpdateTableAlias
|
||||
DeleteTableAlias
|
||||
AutoIncrement
|
||||
Identity
|
||||
TableCascade
|
||||
TableIdentity
|
||||
TableTruncate
|
||||
InsertOnConflict // INSERT ... ON CONFLICT
|
||||
InsertOnDuplicateKey // INSERT ... ON DUPLICATE KEY
|
||||
InsertIgnore // INSERT IGNORE ...
|
||||
TableNotExists
|
||||
OffsetFetch
|
||||
SelectExists
|
||||
UpdateFromTable
|
||||
MSSavepoint
|
||||
GeneratedIdentity
|
||||
)
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,133 @@
|
||||
package mssqldialect
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/dialect/feature"
|
||||
"github.com/uptrace/bun/dialect/sqltype"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
datetimeType = "DATETIME"
|
||||
bitType = "BIT"
|
||||
nvarcharType = "NVARCHAR(MAX)"
|
||||
varbinaryType = "VARBINARY(MAX)"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if Version() != bun.Version() {
|
||||
panic(fmt.Errorf("mssqldialect and Bun must have the same version: v%s != v%s",
|
||||
Version(), bun.Version()))
|
||||
}
|
||||
}
|
||||
|
||||
type Dialect struct {
|
||||
schema.BaseDialect
|
||||
|
||||
tables *schema.Tables
|
||||
features feature.Feature
|
||||
}
|
||||
|
||||
func New() *Dialect {
|
||||
d := new(Dialect)
|
||||
d.tables = schema.NewTables(d)
|
||||
d.features = feature.CTE |
|
||||
feature.DefaultPlaceholder |
|
||||
feature.Identity |
|
||||
feature.Output |
|
||||
feature.OffsetFetch |
|
||||
feature.UpdateFromTable |
|
||||
feature.MSSavepoint
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Dialect) Init(db *sql.DB) {
|
||||
var version string
|
||||
if err := db.QueryRow("SELECT @@VERSION").Scan(&version); err != nil {
|
||||
log.Printf("can't discover MSSQL version: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
version = semver.MajorMinor("v" + cleanupVersion(version))
|
||||
}
|
||||
|
||||
func cleanupVersion(v string) string {
|
||||
if s := strings.Index(v, " - "); s != -1 {
|
||||
if e := strings.Index(v[s+3:], " "); e != -1 {
|
||||
return v[s+3 : s+3+e]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (d *Dialect) Name() dialect.Name {
|
||||
return dialect.MSSQL
|
||||
}
|
||||
|
||||
func (d *Dialect) Features() feature.Feature {
|
||||
return d.features
|
||||
}
|
||||
|
||||
func (d *Dialect) Tables() *schema.Tables {
|
||||
return d.tables
|
||||
}
|
||||
|
||||
func (d *Dialect) OnTable(table *schema.Table) {
|
||||
for _, field := range table.FieldMap {
|
||||
field.DiscoveredSQLType = sqlType(field)
|
||||
if strings.ToUpper(field.UserSQLType) == sqltype.JSON {
|
||||
field.UserSQLType = nvarcharType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dialect) IdentQuote() byte {
|
||||
return '"'
|
||||
}
|
||||
|
||||
func (*Dialect) AppendTime(b []byte, tm time.Time) []byte {
|
||||
b = append(b, '\'')
|
||||
b = tm.AppendFormat(b, "2006-01-02 15:04:05.999")
|
||||
b = append(b, '\'')
|
||||
return b
|
||||
}
|
||||
|
||||
func (*Dialect) AppendBytes(b, bs []byte) []byte {
|
||||
if bs == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, "0x"...)
|
||||
|
||||
s := len(b)
|
||||
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
|
||||
hex.Encode(b[s:], bs)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func sqlType(field *schema.Field) string {
|
||||
switch field.DiscoveredSQLType {
|
||||
case sqltype.VarChar:
|
||||
return field.DiscoveredSQLType + "(255)"
|
||||
case sqltype.Timestamp:
|
||||
return datetimeType
|
||||
case sqltype.Boolean:
|
||||
return bitType
|
||||
case sqltype.JSON:
|
||||
return nvarcharType
|
||||
case sqltype.Blob:
|
||||
return varbinaryType
|
||||
}
|
||||
return field.DiscoveredSQLType
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
package mssqldialect
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func scanner(typ reflect.Type) schema.ScannerFunc {
|
||||
return schema.Scanner(typ)
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package mssqldialect
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.6"
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,183 @@
|
||||
package mysqldialect
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/dialect/feature"
|
||||
"github.com/uptrace/bun/dialect/sqltype"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
const datetimeType = "DATETIME"
|
||||
|
||||
func init() {
|
||||
if Version() != bun.Version() {
|
||||
panic(fmt.Errorf("mysqldialect and Bun must have the same version: v%s != v%s",
|
||||
Version(), bun.Version()))
|
||||
}
|
||||
}
|
||||
|
||||
type Dialect struct {
|
||||
schema.BaseDialect
|
||||
|
||||
tables *schema.Tables
|
||||
features feature.Feature
|
||||
}
|
||||
|
||||
func New() *Dialect {
|
||||
d := new(Dialect)
|
||||
d.tables = schema.NewTables(d)
|
||||
d.features = feature.AutoIncrement |
|
||||
feature.DefaultPlaceholder |
|
||||
feature.UpdateMultiTable |
|
||||
feature.ValuesRow |
|
||||
feature.TableTruncate |
|
||||
feature.TableNotExists |
|
||||
feature.InsertIgnore |
|
||||
feature.InsertOnDuplicateKey |
|
||||
feature.SelectExists
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Dialect) Init(db *sql.DB) {
|
||||
var version string
|
||||
if err := db.QueryRow("SELECT version()").Scan(&version); err != nil {
|
||||
log.Printf("can't discover MySQL version: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(version, "MariaDB") {
|
||||
version = semver.MajorMinor("v" + cleanupVersion(version))
|
||||
if semver.Compare(version, "v10.5.0") >= 0 {
|
||||
d.features |= feature.InsertReturning
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
version = semver.MajorMinor("v" + cleanupVersion(version))
|
||||
if semver.Compare(version, "v8.0") >= 0 {
|
||||
d.features |= feature.CTE | feature.WithValues | feature.DeleteTableAlias
|
||||
}
|
||||
}
|
||||
|
||||
func cleanupVersion(s string) string {
|
||||
if i := strings.IndexByte(s, '-'); i >= 0 {
|
||||
return s[:i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (d *Dialect) Name() dialect.Name {
|
||||
return dialect.MySQL
|
||||
}
|
||||
|
||||
func (d *Dialect) Features() feature.Feature {
|
||||
return d.features
|
||||
}
|
||||
|
||||
func (d *Dialect) Tables() *schema.Tables {
|
||||
return d.tables
|
||||
}
|
||||
|
||||
func (d *Dialect) OnTable(table *schema.Table) {
|
||||
for _, field := range table.FieldMap {
|
||||
field.DiscoveredSQLType = sqlType(field)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dialect) IdentQuote() byte {
|
||||
return '`'
|
||||
}
|
||||
|
||||
func (*Dialect) AppendTime(b []byte, tm time.Time) []byte {
|
||||
b = append(b, '\'')
|
||||
b = tm.AppendFormat(b, "2006-01-02 15:04:05.999999")
|
||||
b = append(b, '\'')
|
||||
return b
|
||||
}
|
||||
|
||||
func (*Dialect) AppendString(b []byte, s string) []byte {
|
||||
b = append(b, '\'')
|
||||
loop:
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case '\000':
|
||||
continue loop
|
||||
case '\'':
|
||||
b = append(b, "''"...)
|
||||
continue loop
|
||||
case '\\':
|
||||
b = append(b, '\\', '\\')
|
||||
continue loop
|
||||
}
|
||||
|
||||
if r < utf8.RuneSelf {
|
||||
b = append(b, byte(r))
|
||||
continue
|
||||
}
|
||||
|
||||
l := len(b)
|
||||
if cap(b)-l < utf8.UTFMax {
|
||||
b = append(b, make([]byte, utf8.UTFMax)...)
|
||||
}
|
||||
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
|
||||
b = b[:l+n]
|
||||
}
|
||||
b = append(b, '\'')
|
||||
return b
|
||||
}
|
||||
|
||||
func (*Dialect) AppendBytes(b []byte, bs []byte) []byte {
|
||||
if bs == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, `X'`...)
|
||||
|
||||
s := len(b)
|
||||
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
|
||||
hex.Encode(b[s:], bs)
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (*Dialect) AppendJSON(b, jsonb []byte) []byte {
|
||||
b = append(b, '\'')
|
||||
|
||||
for _, c := range jsonb {
|
||||
switch c {
|
||||
case '\'':
|
||||
b = append(b, "''"...)
|
||||
case '\\':
|
||||
b = append(b, `\\`...)
|
||||
default:
|
||||
b = append(b, c)
|
||||
}
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func sqlType(field *schema.Field) string {
|
||||
switch field.DiscoveredSQLType {
|
||||
case sqltype.VarChar:
|
||||
return field.DiscoveredSQLType + "(255)"
|
||||
case sqltype.Timestamp:
|
||||
return datetimeType
|
||||
}
|
||||
return field.DiscoveredSQLType
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
package mysqldialect
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func scanner(typ reflect.Type) schema.ScannerFunc {
|
||||
return schema.Scanner(typ)
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package mysqldialect
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.6"
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,364 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
|
||||
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
sliceStringType = reflect.TypeOf([]string(nil))
|
||||
|
||||
intType = reflect.TypeOf((*int)(nil)).Elem()
|
||||
sliceIntType = reflect.TypeOf([]int(nil))
|
||||
|
||||
int64Type = reflect.TypeOf((*int64)(nil)).Elem()
|
||||
sliceInt64Type = reflect.TypeOf([]int64(nil))
|
||||
|
||||
float64Type = reflect.TypeOf((*float64)(nil)).Elem()
|
||||
sliceFloat64Type = reflect.TypeOf([]float64(nil))
|
||||
)
|
||||
|
||||
func arrayAppend(fmter schema.Formatter, b []byte, v interface{}) []byte {
|
||||
switch v := v.(type) {
|
||||
case int64:
|
||||
return strconv.AppendInt(b, v, 10)
|
||||
case float64:
|
||||
return dialect.AppendFloat64(b, v)
|
||||
case bool:
|
||||
return dialect.AppendBool(b, v)
|
||||
case []byte:
|
||||
return arrayAppendBytes(b, v)
|
||||
case string:
|
||||
return arrayAppendString(b, v)
|
||||
case time.Time:
|
||||
return fmter.Dialect().AppendTime(b, v)
|
||||
default:
|
||||
err := fmt.Errorf("pgdialect: can't append %T", v)
|
||||
return dialect.AppendError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func arrayAppendStringValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
return arrayAppendString(b, v.String())
|
||||
}
|
||||
|
||||
func arrayAppendBytesValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
return arrayAppendBytes(b, v.Bytes())
|
||||
}
|
||||
|
||||
func arrayAppendDriverValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
iface, err := v.Interface().(driver.Valuer).Value()
|
||||
if err != nil {
|
||||
return dialect.AppendError(b, err)
|
||||
}
|
||||
return arrayAppend(fmter, b, iface)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func (d *Dialect) arrayAppender(typ reflect.Type) schema.AppenderFunc {
|
||||
kind := typ.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Ptr:
|
||||
if fn := d.arrayAppender(typ.Elem()); fn != nil {
|
||||
return schema.PtrAppender(fn)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
// ok:
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
elemType := typ.Elem()
|
||||
|
||||
if kind == reflect.Slice {
|
||||
switch elemType {
|
||||
case stringType:
|
||||
return appendStringSliceValue
|
||||
case intType:
|
||||
return appendIntSliceValue
|
||||
case int64Type:
|
||||
return appendInt64SliceValue
|
||||
case float64Type:
|
||||
return appendFloat64SliceValue
|
||||
}
|
||||
}
|
||||
|
||||
appendElem := d.arrayElemAppender(elemType)
|
||||
if appendElem == nil {
|
||||
panic(fmt.Errorf("pgdialect: %s is not supported", typ))
|
||||
}
|
||||
|
||||
return func(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
kind := v.Kind()
|
||||
switch kind {
|
||||
case reflect.Ptr, reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
}
|
||||
|
||||
if kind == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
b = append(b, '{')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
elem := v.Index(i)
|
||||
b = appendElem(fmter, b, elem)
|
||||
b = append(b, ',')
|
||||
}
|
||||
if v.Len() > 0 {
|
||||
b[len(b)-1] = '}' // Replace trailing comma.
|
||||
} else {
|
||||
b = append(b, '}')
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dialect) arrayElemAppender(typ reflect.Type) schema.AppenderFunc {
|
||||
if typ.Implements(driverValuerType) {
|
||||
return arrayAppendDriverValue
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.String:
|
||||
return arrayAppendStringValue
|
||||
case reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 {
|
||||
return arrayAppendBytesValue
|
||||
}
|
||||
}
|
||||
return schema.Appender(d, typ)
|
||||
}
|
||||
|
||||
func appendStringSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
ss := v.Convert(sliceStringType).Interface().([]string)
|
||||
return appendStringSlice(b, ss)
|
||||
}
|
||||
|
||||
func appendStringSlice(b []byte, ss []string) []byte {
|
||||
if ss == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
b = append(b, '{')
|
||||
for _, s := range ss {
|
||||
b = arrayAppendString(b, s)
|
||||
b = append(b, ',')
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
b[len(b)-1] = '}' // Replace trailing comma.
|
||||
} else {
|
||||
b = append(b, '}')
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func appendIntSliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
ints := v.Convert(sliceIntType).Interface().([]int)
|
||||
return appendIntSlice(b, ints)
|
||||
}
|
||||
|
||||
func appendIntSlice(b []byte, ints []int) []byte {
|
||||
if ints == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
b = append(b, '{')
|
||||
for _, n := range ints {
|
||||
b = strconv.AppendInt(b, int64(n), 10)
|
||||
b = append(b, ',')
|
||||
}
|
||||
if len(ints) > 0 {
|
||||
b[len(b)-1] = '}' // Replace trailing comma.
|
||||
} else {
|
||||
b = append(b, '}')
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func appendInt64SliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
ints := v.Convert(sliceInt64Type).Interface().([]int64)
|
||||
return appendInt64Slice(b, ints)
|
||||
}
|
||||
|
||||
func appendInt64Slice(b []byte, ints []int64) []byte {
|
||||
if ints == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
b = append(b, '{')
|
||||
for _, n := range ints {
|
||||
b = strconv.AppendInt(b, n, 10)
|
||||
b = append(b, ',')
|
||||
}
|
||||
if len(ints) > 0 {
|
||||
b[len(b)-1] = '}' // Replace trailing comma.
|
||||
} else {
|
||||
b = append(b, '}')
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func appendFloat64SliceValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
floats := v.Convert(sliceFloat64Type).Interface().([]float64)
|
||||
return appendFloat64Slice(b, floats)
|
||||
}
|
||||
|
||||
func appendFloat64Slice(b []byte, floats []float64) []byte {
|
||||
if floats == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
b = append(b, '{')
|
||||
for _, n := range floats {
|
||||
b = dialect.AppendFloat64(b, n)
|
||||
b = append(b, ',')
|
||||
}
|
||||
if len(floats) > 0 {
|
||||
b[len(b)-1] = '}' // Replace trailing comma.
|
||||
} else {
|
||||
b = append(b, '}')
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func arrayAppendBytes(b []byte, bs []byte) []byte {
|
||||
if bs == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, `"\\x`...)
|
||||
|
||||
s := len(b)
|
||||
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
|
||||
hex.Encode(b[s:], bs)
|
||||
|
||||
b = append(b, '"')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func arrayAppendString(b []byte, s string) []byte {
|
||||
b = append(b, '"')
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case 0:
|
||||
// ignore
|
||||
case '\'':
|
||||
b = append(b, "''"...)
|
||||
case '"':
|
||||
b = append(b, '\\', '"')
|
||||
case '\\':
|
||||
b = append(b, '\\', '\\')
|
||||
default:
|
||||
if r < utf8.RuneSelf {
|
||||
b = append(b, byte(r))
|
||||
break
|
||||
}
|
||||
l := len(b)
|
||||
if cap(b)-l < utf8.UTFMax {
|
||||
b = append(b, make([]byte, utf8.UTFMax)...)
|
||||
}
|
||||
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
|
||||
b = b[:l+n]
|
||||
}
|
||||
}
|
||||
b = append(b, '"')
|
||||
return b
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
var mapStringStringType = reflect.TypeOf(map[string]string(nil))
|
||||
|
||||
func (d *Dialect) hstoreAppender(typ reflect.Type) schema.AppenderFunc {
|
||||
kind := typ.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Ptr:
|
||||
if fn := d.hstoreAppender(typ.Elem()); fn != nil {
|
||||
return schema.PtrAppender(fn)
|
||||
}
|
||||
case reflect.Map:
|
||||
// ok:
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if typ.Key() == stringType && typ.Elem() == stringType {
|
||||
return appendMapStringStringValue
|
||||
}
|
||||
|
||||
return func(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
err := fmt.Errorf("bun: Hstore(unsupported %s)", v.Type())
|
||||
return dialect.AppendError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
func appendMapStringString(b []byte, m map[string]string) []byte {
|
||||
if m == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
for key, value := range m {
|
||||
b = arrayAppendString(b, key)
|
||||
b = append(b, '=', '>')
|
||||
b = arrayAppendString(b, value)
|
||||
b = append(b, ',')
|
||||
}
|
||||
if len(m) > 0 {
|
||||
b = b[:len(b)-1] // Strip trailing comma.
|
||||
}
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func appendMapStringStringValue(fmter schema.Formatter, b []byte, v reflect.Value) []byte {
|
||||
m := v.Convert(mapStringStringType).Interface().(map[string]string)
|
||||
return appendMapStringString(b, m)
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type ArrayValue struct {
|
||||
v reflect.Value
|
||||
|
||||
append schema.AppenderFunc
|
||||
scan schema.ScannerFunc
|
||||
}
|
||||
|
||||
// Array accepts a slice and returns a wrapper for working with PostgreSQL
|
||||
// array data type.
|
||||
//
|
||||
// For struct fields you can use array tag:
|
||||
//
|
||||
// Emails []string `bun:",array"`
|
||||
func Array(vi interface{}) *ArrayValue {
|
||||
v := reflect.ValueOf(vi)
|
||||
if !v.IsValid() {
|
||||
panic(fmt.Errorf("bun: Array(nil)"))
|
||||
}
|
||||
|
||||
return &ArrayValue{
|
||||
v: v,
|
||||
|
||||
append: pgDialect.arrayAppender(v.Type()),
|
||||
scan: arrayScanner(v.Type()),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ schema.QueryAppender = (*ArrayValue)(nil)
|
||||
_ sql.Scanner = (*ArrayValue)(nil)
|
||||
)
|
||||
|
||||
func (a *ArrayValue) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error) {
|
||||
if a.append == nil {
|
||||
panic(fmt.Errorf("bun: Array(unsupported %s)", a.v.Type()))
|
||||
}
|
||||
return a.append(fmter, b, a.v), nil
|
||||
}
|
||||
|
||||
func (a *ArrayValue) Scan(src interface{}) error {
|
||||
if a.scan == nil {
|
||||
return fmt.Errorf("bun: Array(unsupported %s)", a.v.Type())
|
||||
}
|
||||
if a.v.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("bun: Array(non-pointer %s)", a.v.Type())
|
||||
}
|
||||
return a.scan(a.v, src)
|
||||
}
|
||||
|
||||
func (a *ArrayValue) Value() interface{} {
|
||||
if a.v.IsValid() {
|
||||
return a.v.Interface()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,133 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type arrayParser struct {
|
||||
*streamParser
|
||||
err error
|
||||
}
|
||||
|
||||
func newArrayParser(b []byte) *arrayParser {
|
||||
p := &arrayParser{
|
||||
streamParser: newStreamParser(b, 1),
|
||||
}
|
||||
if len(b) < 2 || b[0] != '{' || b[len(b)-1] != '}' {
|
||||
p.err = fmt.Errorf("bun: can't parse array: %q", b)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *arrayParser) NextElem() ([]byte, error) {
|
||||
if p.err != nil {
|
||||
return nil, p.err
|
||||
}
|
||||
|
||||
c, err := p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '}':
|
||||
return nil, io.EOF
|
||||
case '"':
|
||||
b, err := p.readSubstring()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.peek() == ',' {
|
||||
p.skipNext()
|
||||
}
|
||||
|
||||
return b, nil
|
||||
default:
|
||||
b := p.readSimple()
|
||||
if bytes.Equal(b, []byte("NULL")) {
|
||||
b = nil
|
||||
}
|
||||
|
||||
if p.peek() == ',' {
|
||||
p.skipNext()
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *arrayParser) readSimple() []byte {
|
||||
p.unreadByte()
|
||||
|
||||
if i := bytes.IndexByte(p.b[p.i:], ','); i >= 0 {
|
||||
b := p.b[p.i : p.i+i]
|
||||
p.i += i
|
||||
return b
|
||||
}
|
||||
|
||||
b := p.b[p.i : len(p.b)-1]
|
||||
p.i = len(p.b) - 1
|
||||
return b
|
||||
}
|
||||
|
||||
func (p *arrayParser) readSubstring() ([]byte, error) {
|
||||
c, err := p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.buf = p.buf[:0]
|
||||
for {
|
||||
if c == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
next, err := p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c == '\\' {
|
||||
switch next {
|
||||
case '\\', '"':
|
||||
p.buf = append(p.buf, next)
|
||||
|
||||
c, err = p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
p.buf = append(p.buf, '\\')
|
||||
c = next
|
||||
}
|
||||
continue
|
||||
}
|
||||
if c == '\'' && next == '\'' {
|
||||
p.buf = append(p.buf, next)
|
||||
c, err = p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
p.buf = append(p.buf, c)
|
||||
c = next
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(p.buf, []byte("\\x")) && len(p.buf)%2 == 0 {
|
||||
data := p.buf[2:]
|
||||
buf := make([]byte, hex.DecodedLen(len(data)))
|
||||
n, err := hex.Decode(buf, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf[:n], nil
|
||||
}
|
||||
|
||||
return p.buf, nil
|
||||
}
|
@ -0,0 +1,302 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"github.com/uptrace/bun/internal"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func arrayScanner(typ reflect.Type) schema.ScannerFunc {
|
||||
kind := typ.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Ptr:
|
||||
if fn := arrayScanner(typ.Elem()); fn != nil {
|
||||
return schema.PtrScanner(fn)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
// ok:
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
elemType := typ.Elem()
|
||||
|
||||
if kind == reflect.Slice {
|
||||
switch elemType {
|
||||
case stringType:
|
||||
return scanStringSliceValue
|
||||
case intType:
|
||||
return scanIntSliceValue
|
||||
case int64Type:
|
||||
return scanInt64SliceValue
|
||||
case float64Type:
|
||||
return scanFloat64SliceValue
|
||||
}
|
||||
}
|
||||
|
||||
scanElem := schema.Scanner(elemType)
|
||||
return func(dest reflect.Value, src interface{}) error {
|
||||
dest = reflect.Indirect(dest)
|
||||
if !dest.CanSet() {
|
||||
return fmt.Errorf("bun: Scan(non-settable %s)", dest.Type())
|
||||
}
|
||||
|
||||
kind := dest.Kind()
|
||||
|
||||
if src == nil {
|
||||
if kind != reflect.Slice || !dest.IsNil() {
|
||||
dest.Set(reflect.Zero(dest.Type()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if kind == reflect.Slice {
|
||||
if dest.IsNil() {
|
||||
dest.Set(reflect.MakeSlice(dest.Type(), 0, 0))
|
||||
} else if dest.Len() > 0 {
|
||||
dest.Set(dest.Slice(0, 0))
|
||||
}
|
||||
}
|
||||
|
||||
b, err := toBytes(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := newArrayParser(b)
|
||||
nextValue := internal.MakeSliceNextElemFunc(dest)
|
||||
for {
|
||||
elem, err := p.NextElem()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
elemValue := nextValue()
|
||||
if err := scanElem(elemValue, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func scanStringSliceValue(dest reflect.Value, src interface{}) error {
|
||||
dest = reflect.Indirect(dest)
|
||||
if !dest.CanSet() {
|
||||
return fmt.Errorf("bun: Scan(non-settable %s)", dest.Type())
|
||||
}
|
||||
|
||||
slice, err := decodeStringSlice(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest.Set(reflect.ValueOf(slice))
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStringSlice(src interface{}) ([]string, error) {
|
||||
if src == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b, err := toBytes(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice := make([]string, 0)
|
||||
|
||||
p := newArrayParser(b)
|
||||
for {
|
||||
elem, err := p.NextElem()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
slice = append(slice, string(elem))
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func scanIntSliceValue(dest reflect.Value, src interface{}) error {
|
||||
dest = reflect.Indirect(dest)
|
||||
if !dest.CanSet() {
|
||||
return fmt.Errorf("bun: Scan(non-settable %s)", dest.Type())
|
||||
}
|
||||
|
||||
slice, err := decodeIntSlice(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest.Set(reflect.ValueOf(slice))
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeIntSlice(src interface{}) ([]int, error) {
|
||||
if src == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b, err := toBytes(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice := make([]int, 0)
|
||||
|
||||
p := newArrayParser(b)
|
||||
for {
|
||||
elem, err := p.NextElem()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if elem == nil {
|
||||
slice = append(slice, 0)
|
||||
continue
|
||||
}
|
||||
|
||||
n, err := strconv.Atoi(bytesToString(elem))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice = append(slice, n)
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func scanInt64SliceValue(dest reflect.Value, src interface{}) error {
|
||||
dest = reflect.Indirect(dest)
|
||||
if !dest.CanSet() {
|
||||
return fmt.Errorf("bun: Scan(non-settable %s)", dest.Type())
|
||||
}
|
||||
|
||||
slice, err := decodeInt64Slice(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest.Set(reflect.ValueOf(slice))
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt64Slice(src interface{}) ([]int64, error) {
|
||||
if src == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b, err := toBytes(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice := make([]int64, 0)
|
||||
|
||||
p := newArrayParser(b)
|
||||
for {
|
||||
elem, err := p.NextElem()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if elem == nil {
|
||||
slice = append(slice, 0)
|
||||
continue
|
||||
}
|
||||
|
||||
n, err := strconv.ParseInt(bytesToString(elem), 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice = append(slice, n)
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func scanFloat64SliceValue(dest reflect.Value, src interface{}) error {
|
||||
dest = reflect.Indirect(dest)
|
||||
if !dest.CanSet() {
|
||||
return fmt.Errorf("bun: Scan(non-settable %s)", dest.Type())
|
||||
}
|
||||
|
||||
slice, err := scanFloat64Slice(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest.Set(reflect.ValueOf(slice))
|
||||
return nil
|
||||
}
|
||||
|
||||
func scanFloat64Slice(src interface{}) ([]float64, error) {
|
||||
if src == -1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b, err := toBytes(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice := make([]float64, 0)
|
||||
|
||||
p := newArrayParser(b)
|
||||
for {
|
||||
elem, err := p.NextElem()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if elem == nil {
|
||||
slice = append(slice, 0)
|
||||
continue
|
||||
}
|
||||
|
||||
n, err := strconv.ParseFloat(bytesToString(elem), 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
slice = append(slice, n)
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func toBytes(src interface{}) ([]byte, error) {
|
||||
switch src := src.(type) {
|
||||
case string:
|
||||
return stringToBytes(src), nil
|
||||
case []byte:
|
||||
return src, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("bun: got %T, wanted []byte or string", src)
|
||||
}
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/dialect/feature"
|
||||
"github.com/uptrace/bun/dialect/sqltype"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
var pgDialect = New()
|
||||
|
||||
func init() {
|
||||
if Version() != bun.Version() {
|
||||
panic(fmt.Errorf("pgdialect and Bun must have the same version: v%s != v%s",
|
||||
Version(), bun.Version()))
|
||||
}
|
||||
}
|
||||
|
||||
type Dialect struct {
|
||||
schema.BaseDialect
|
||||
|
||||
tables *schema.Tables
|
||||
features feature.Feature
|
||||
}
|
||||
|
||||
func New() *Dialect {
|
||||
d := new(Dialect)
|
||||
d.tables = schema.NewTables(d)
|
||||
d.features = feature.CTE |
|
||||
feature.WithValues |
|
||||
feature.Returning |
|
||||
feature.InsertReturning |
|
||||
feature.DefaultPlaceholder |
|
||||
feature.DoubleColonCast |
|
||||
feature.InsertTableAlias |
|
||||
feature.UpdateTableAlias |
|
||||
feature.DeleteTableAlias |
|
||||
feature.TableCascade |
|
||||
feature.TableIdentity |
|
||||
feature.TableTruncate |
|
||||
feature.TableNotExists |
|
||||
feature.InsertOnConflict |
|
||||
feature.SelectExists |
|
||||
feature.GeneratedIdentity
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Dialect) Init(*sql.DB) {}
|
||||
|
||||
func (d *Dialect) Name() dialect.Name {
|
||||
return dialect.PG
|
||||
}
|
||||
|
||||
func (d *Dialect) Features() feature.Feature {
|
||||
return d.features
|
||||
}
|
||||
|
||||
func (d *Dialect) Tables() *schema.Tables {
|
||||
return d.tables
|
||||
}
|
||||
|
||||
func (d *Dialect) OnTable(table *schema.Table) {
|
||||
for _, field := range table.FieldMap {
|
||||
d.onField(field)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dialect) onField(field *schema.Field) {
|
||||
field.DiscoveredSQLType = fieldSQLType(field)
|
||||
|
||||
if field.AutoIncrement && !field.Identity {
|
||||
switch field.DiscoveredSQLType {
|
||||
case sqltype.SmallInt:
|
||||
field.CreateTableSQLType = pgTypeSmallSerial
|
||||
case sqltype.Integer:
|
||||
field.CreateTableSQLType = pgTypeSerial
|
||||
case sqltype.BigInt:
|
||||
field.CreateTableSQLType = pgTypeBigSerial
|
||||
}
|
||||
}
|
||||
|
||||
if field.Tag.HasOption("array") || strings.HasSuffix(field.UserSQLType, "[]") {
|
||||
field.Append = d.arrayAppender(field.StructField.Type)
|
||||
field.Scan = arrayScanner(field.StructField.Type)
|
||||
}
|
||||
|
||||
if field.DiscoveredSQLType == sqltype.HSTORE {
|
||||
field.Append = d.hstoreAppender(field.StructField.Type)
|
||||
field.Scan = hstoreScanner(field.StructField.Type)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dialect) IdentQuote() byte {
|
||||
return '"'
|
||||
}
|
||||
|
||||
func (d *Dialect) AppendUint32(b []byte, n uint32) []byte {
|
||||
return strconv.AppendInt(b, int64(int32(n)), 10)
|
||||
}
|
||||
|
||||
func (d *Dialect) AppendUint64(b []byte, n uint64) []byte {
|
||||
return strconv.AppendInt(b, int64(n), 10)
|
||||
}
|
@ -0,0 +1,73 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type HStoreValue struct {
|
||||
v reflect.Value
|
||||
|
||||
append schema.AppenderFunc
|
||||
scan schema.ScannerFunc
|
||||
}
|
||||
|
||||
// HStore accepts a map[string]string and returns a wrapper for working with PostgreSQL
|
||||
// hstore data type.
|
||||
//
|
||||
// For struct fields you can use hstore tag:
|
||||
//
|
||||
// Attrs map[string]string `bun:",hstore"`
|
||||
func HStore(vi interface{}) *HStoreValue {
|
||||
v := reflect.ValueOf(vi)
|
||||
if !v.IsValid() {
|
||||
panic(fmt.Errorf("bun: HStore(nil)"))
|
||||
}
|
||||
|
||||
typ := v.Type()
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
if typ.Kind() != reflect.Map {
|
||||
panic(fmt.Errorf("bun: Hstore(unsupported %s)", typ))
|
||||
}
|
||||
|
||||
return &HStoreValue{
|
||||
v: v,
|
||||
|
||||
append: pgDialect.hstoreAppender(v.Type()),
|
||||
scan: hstoreScanner(v.Type()),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ schema.QueryAppender = (*HStoreValue)(nil)
|
||||
_ sql.Scanner = (*HStoreValue)(nil)
|
||||
)
|
||||
|
||||
func (h *HStoreValue) AppendQuery(fmter schema.Formatter, b []byte) ([]byte, error) {
|
||||
if h.append == nil {
|
||||
panic(fmt.Errorf("bun: HStore(unsupported %s)", h.v.Type()))
|
||||
}
|
||||
return h.append(fmter, b, h.v), nil
|
||||
}
|
||||
|
||||
func (h *HStoreValue) Scan(src interface{}) error {
|
||||
if h.scan == nil {
|
||||
return fmt.Errorf("bun: HStore(unsupported %s)", h.v.Type())
|
||||
}
|
||||
if h.v.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("bun: HStore(non-pointer %s)", h.v.Type())
|
||||
}
|
||||
return h.scan(h.v.Elem(), src)
|
||||
}
|
||||
|
||||
func (h *HStoreValue) Value() interface{} {
|
||||
if h.v.IsValid() {
|
||||
return h.v.Interface()
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,142 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type hstoreParser struct {
|
||||
*streamParser
|
||||
err error
|
||||
}
|
||||
|
||||
func newHStoreParser(b []byte) *hstoreParser {
|
||||
p := &hstoreParser{
|
||||
streamParser: newStreamParser(b, 0),
|
||||
}
|
||||
if len(b) < 6 || b[0] != '"' {
|
||||
p.err = fmt.Errorf("bun: can't parse hstore: %q", b)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *hstoreParser) NextKey() (string, error) {
|
||||
if p.err != nil {
|
||||
return "", p.err
|
||||
}
|
||||
|
||||
err := p.skipByte('"')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
key, err := p.readSubstring()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
const separator = "=>"
|
||||
|
||||
for i := range separator {
|
||||
err = p.skipByte(separator[i])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
return string(key), nil
|
||||
}
|
||||
|
||||
func (p *hstoreParser) NextValue() (string, error) {
|
||||
if p.err != nil {
|
||||
return "", p.err
|
||||
}
|
||||
|
||||
c, err := p.readByte()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '"':
|
||||
value, err := p.readSubstring()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if p.peek() == ',' {
|
||||
p.skipNext()
|
||||
}
|
||||
|
||||
if p.peek() == ' ' {
|
||||
p.skipNext()
|
||||
}
|
||||
|
||||
return string(value), nil
|
||||
default:
|
||||
value := p.readSimple()
|
||||
if bytes.Equal(value, []byte("NULL")) {
|
||||
value = nil
|
||||
}
|
||||
|
||||
if p.peek() == ',' {
|
||||
p.skipNext()
|
||||
}
|
||||
|
||||
return string(value), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *hstoreParser) readSimple() []byte {
|
||||
p.unreadByte()
|
||||
|
||||
if i := bytes.IndexByte(p.b[p.i:], ','); i >= 0 {
|
||||
b := p.b[p.i : p.i+i]
|
||||
p.i += i
|
||||
return b
|
||||
}
|
||||
|
||||
b := p.b[p.i:len(p.b)]
|
||||
p.i = len(p.b)
|
||||
return b
|
||||
}
|
||||
|
||||
func (p *hstoreParser) readSubstring() ([]byte, error) {
|
||||
c, err := p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.buf = p.buf[:0]
|
||||
for {
|
||||
if c == '"' {
|
||||
break
|
||||
}
|
||||
|
||||
next, err := p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c == '\\' {
|
||||
switch next {
|
||||
case '\\', '"':
|
||||
p.buf = append(p.buf, next)
|
||||
|
||||
c, err = p.readByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
p.buf = append(p.buf, '\\')
|
||||
c = next
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
p.buf = append(p.buf, c)
|
||||
c = next
|
||||
}
|
||||
|
||||
return p.buf, nil
|
||||
}
|
@ -0,0 +1,82 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func hstoreScanner(typ reflect.Type) schema.ScannerFunc {
|
||||
kind := typ.Kind()
|
||||
|
||||
switch kind {
|
||||
case reflect.Ptr:
|
||||
if fn := hstoreScanner(typ.Elem()); fn != nil {
|
||||
return schema.PtrScanner(fn)
|
||||
}
|
||||
case reflect.Map:
|
||||
// ok:
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if typ.Key() == stringType && typ.Elem() == stringType {
|
||||
return scanMapStringStringValue
|
||||
}
|
||||
return func(dest reflect.Value, src interface{}) error {
|
||||
return fmt.Errorf("bun: Hstore(unsupported %s)", dest.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func scanMapStringStringValue(dest reflect.Value, src interface{}) error {
|
||||
dest = reflect.Indirect(dest)
|
||||
if !dest.CanSet() {
|
||||
return fmt.Errorf("bun: Scan(non-settable %s)", dest.Type())
|
||||
}
|
||||
|
||||
m, err := decodeMapStringString(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dest.Set(reflect.ValueOf(m))
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeMapStringString(src interface{}) (map[string]string, error) {
|
||||
if src == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
b, err := toBytes(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := make(map[string]string)
|
||||
|
||||
p := newHStoreParser(b)
|
||||
for {
|
||||
key, err := p.NextKey()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
value, err := p.NextValue()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m[key] = value
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
// +build appengine
|
||||
|
||||
package pgdialect
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func stringToBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func scanner(typ reflect.Type) schema.ScannerFunc {
|
||||
return schema.Scanner(typ)
|
||||
}
|
@ -0,0 +1,105 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/dialect/sqltype"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
// Date / Time
|
||||
pgTypeTimestampTz = "TIMESTAMPTZ" // Timestamp with a time zone
|
||||
pgTypeDate = "DATE" // Date
|
||||
pgTypeTime = "TIME" // Time without a time zone
|
||||
pgTypeTimeTz = "TIME WITH TIME ZONE" // Time with a time zone
|
||||
pgTypeInterval = "INTERVAL" // Time Interval
|
||||
|
||||
// Network Addresses
|
||||
pgTypeInet = "INET" // IPv4 or IPv6 hosts and networks
|
||||
pgTypeCidr = "CIDR" // IPv4 or IPv6 networks
|
||||
pgTypeMacaddr = "MACADDR" // MAC addresses
|
||||
|
||||
// Serial Types
|
||||
pgTypeSmallSerial = "SMALLSERIAL" // 2 byte autoincrementing integer
|
||||
pgTypeSerial = "SERIAL" // 4 byte autoincrementing integer
|
||||
pgTypeBigSerial = "BIGSERIAL" // 8 byte autoincrementing integer
|
||||
|
||||
// Character Types
|
||||
pgTypeChar = "CHAR" // fixed length string (blank padded)
|
||||
pgTypeText = "TEXT" // variable length string without limit
|
||||
|
||||
// JSON Types
|
||||
pgTypeJSON = "JSON" // text representation of json data
|
||||
pgTypeJSONB = "JSONB" // binary representation of json data
|
||||
|
||||
// Binary Data Types
|
||||
pgTypeBytea = "BYTEA" // binary string
|
||||
)
|
||||
|
||||
var (
|
||||
ipType = reflect.TypeOf((*net.IP)(nil)).Elem()
|
||||
ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem()
|
||||
jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem()
|
||||
)
|
||||
|
||||
func fieldSQLType(field *schema.Field) string {
|
||||
if field.UserSQLType != "" {
|
||||
return field.UserSQLType
|
||||
}
|
||||
|
||||
if v, ok := field.Tag.Option("composite"); ok {
|
||||
return v
|
||||
}
|
||||
if field.Tag.HasOption("hstore") {
|
||||
return sqltype.HSTORE
|
||||
}
|
||||
|
||||
if field.Tag.HasOption("array") {
|
||||
switch field.IndirectType.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
sqlType := sqlType(field.IndirectType.Elem())
|
||||
return sqlType + "[]"
|
||||
}
|
||||
}
|
||||
|
||||
if field.DiscoveredSQLType == sqltype.Blob {
|
||||
return pgTypeBytea
|
||||
}
|
||||
|
||||
return sqlType(field.IndirectType)
|
||||
}
|
||||
|
||||
func sqlType(typ reflect.Type) string {
|
||||
switch typ {
|
||||
case ipType:
|
||||
return pgTypeInet
|
||||
case ipNetType:
|
||||
return pgTypeCidr
|
||||
case jsonRawMessageType:
|
||||
return pgTypeJSONB
|
||||
}
|
||||
|
||||
sqlType := schema.DiscoverSQLType(typ)
|
||||
switch sqlType {
|
||||
case sqltype.Timestamp:
|
||||
sqlType = pgTypeTimestampTz
|
||||
}
|
||||
|
||||
switch typ.Kind() {
|
||||
case reflect.Map, reflect.Struct:
|
||||
if sqlType == sqltype.VarChar {
|
||||
return pgTypeJSONB
|
||||
}
|
||||
return sqlType
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 {
|
||||
return pgTypeBytea
|
||||
}
|
||||
return pgTypeJSONB
|
||||
}
|
||||
|
||||
return sqlType
|
||||
}
|
@ -0,0 +1,60 @@
|
||||
package pgdialect
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type streamParser struct {
|
||||
b []byte
|
||||
i int
|
||||
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func newStreamParser(b []byte, start int) *streamParser {
|
||||
return &streamParser{
|
||||
b: b,
|
||||
i: start,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *streamParser) valid() bool {
|
||||
return p.i < len(p.b)
|
||||
}
|
||||
|
||||
func (p *streamParser) skipByte(skip byte) error {
|
||||
c, err := p.readByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c == skip {
|
||||
return nil
|
||||
}
|
||||
p.unreadByte()
|
||||
return fmt.Errorf("got %q, wanted %q", c, skip)
|
||||
}
|
||||
|
||||
func (p *streamParser) readByte() (byte, error) {
|
||||
if p.valid() {
|
||||
c := p.b[p.i]
|
||||
p.i++
|
||||
return c, nil
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func (p *streamParser) unreadByte() {
|
||||
p.i--
|
||||
}
|
||||
|
||||
func (p *streamParser) peek() byte {
|
||||
if p.valid() {
|
||||
return p.b[p.i]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *streamParser) skipNext() {
|
||||
p.i++
|
||||
}
|
@ -0,0 +1,18 @@
|
||||
// +build !appengine
|
||||
|
||||
package pgdialect
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
func stringToBytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package pgdialect
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.6"
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,98 @@
|
||||
package sqlitedialect
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
"github.com/uptrace/bun/dialect"
|
||||
"github.com/uptrace/bun/dialect/feature"
|
||||
"github.com/uptrace/bun/dialect/sqltype"
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if Version() != bun.Version() {
|
||||
panic(fmt.Errorf("sqlitedialect and Bun must have the same version: v%s != v%s",
|
||||
Version(), bun.Version()))
|
||||
}
|
||||
}
|
||||
|
||||
type Dialect struct {
|
||||
schema.BaseDialect
|
||||
|
||||
tables *schema.Tables
|
||||
features feature.Feature
|
||||
}
|
||||
|
||||
func New() *Dialect {
|
||||
d := new(Dialect)
|
||||
d.tables = schema.NewTables(d)
|
||||
d.features = feature.CTE |
|
||||
feature.WithValues |
|
||||
feature.Returning |
|
||||
feature.InsertReturning |
|
||||
feature.InsertTableAlias |
|
||||
feature.UpdateTableAlias |
|
||||
feature.DeleteTableAlias |
|
||||
feature.InsertOnConflict |
|
||||
feature.TableNotExists |
|
||||
feature.SelectExists
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *Dialect) Init(*sql.DB) {}
|
||||
|
||||
func (d *Dialect) Name() dialect.Name {
|
||||
return dialect.SQLite
|
||||
}
|
||||
|
||||
func (d *Dialect) Features() feature.Feature {
|
||||
return d.features
|
||||
}
|
||||
|
||||
func (d *Dialect) Tables() *schema.Tables {
|
||||
return d.tables
|
||||
}
|
||||
|
||||
func (d *Dialect) OnTable(table *schema.Table) {
|
||||
for _, field := range table.FieldMap {
|
||||
d.onField(field)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Dialect) onField(field *schema.Field) {
|
||||
field.DiscoveredSQLType = fieldSQLType(field)
|
||||
}
|
||||
|
||||
func (d *Dialect) IdentQuote() byte {
|
||||
return '"'
|
||||
}
|
||||
|
||||
func (d *Dialect) AppendBytes(b []byte, bs []byte) []byte {
|
||||
if bs == nil {
|
||||
return dialect.AppendNull(b)
|
||||
}
|
||||
|
||||
b = append(b, `X'`...)
|
||||
|
||||
s := len(b)
|
||||
b = append(b, make([]byte, hex.EncodedLen(len(bs)))...)
|
||||
hex.Encode(b[s:], bs)
|
||||
|
||||
b = append(b, '\'')
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func fieldSQLType(field *schema.Field) string {
|
||||
switch field.DiscoveredSQLType {
|
||||
case sqltype.SmallInt, sqltype.BigInt:
|
||||
// INTEGER PRIMARY KEY is an alias for the ROWID.
|
||||
// It is safe to convert all ints to INTEGER, because SQLite types don't have size.
|
||||
return sqltype.Integer
|
||||
default:
|
||||
return field.DiscoveredSQLType
|
||||
}
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
package sqlitedialect
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
func scanner(typ reflect.Type) schema.ScannerFunc {
|
||||
return schema.Scanner(typ)
|
||||
}
|
@ -0,0 +1,6 @@
|
||||
package sqlitedialect
|
||||
|
||||
// Version is the current release version.
|
||||
func Version() string {
|
||||
return "1.1.6"
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
package sqltype
|
||||
|
||||
const (
|
||||
Boolean = "BOOLEAN"
|
||||
SmallInt = "SMALLINT"
|
||||
Integer = "INTEGER"
|
||||
BigInt = "BIGINT"
|
||||
Real = "REAL"
|
||||
DoublePrecision = "DOUBLE PRECISION"
|
||||
VarChar = "VARCHAR"
|
||||
Blob = "BLOB"
|
||||
Timestamp = "TIMESTAMP"
|
||||
JSON = "JSON"
|
||||
JSONB = "JSONB"
|
||||
HSTORE = "HSTORE"
|
||||
)
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,38 @@
|
||||
# pgdriver
|
||||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun/driver/pgdriver)](https://pkg.go.dev/github.com/uptrace/bun/driver/pgdriver)
|
||||
|
||||
pgdriver is a database/sql driver for PostgreSQL based on [go-pg](https://github.com/go-pg/pg) code.
|
||||
|
||||
You can install it with:
|
||||
|
||||
```shell
|
||||
go get github.com/uptrace/bun/driver/pgdriver
|
||||
```
|
||||
|
||||
And then create a `sql.DB` using it:
|
||||
|
||||
```go
|
||||
import _ "github.com/uptrace/bun/driver/pgdriver"
|
||||
|
||||
dsn := "postgres://postgres:@localhost:5432/test"
|
||||
db, err := sql.Open("pg", dsn)
|
||||
```
|
||||
|
||||
Alternatively:
|
||||
|
||||
```go
|
||||
dsn := "postgres://postgres:@localhost:5432/test"
|
||||
db := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(dsn)))
|
||||
```
|
||||
|
||||
[Benchmark](https://github.com/go-bun/bun-benchmark):
|
||||
|
||||
```
|
||||
BenchmarkInsert/pg-12 7254 148380 ns/op 900 B/op 13 allocs/op
|
||||
BenchmarkInsert/pgx-12 6494 166391 ns/op 2076 B/op 26 allocs/op
|
||||
BenchmarkSelect/pg-12 9100 132952 ns/op 1417 B/op 18 allocs/op
|
||||
BenchmarkSelect/pgx-12 8199 154920 ns/op 3679 B/op 60 allocs/op
|
||||
```
|
||||
|
||||
See [documentation](https://bun.uptrace.dev/postgres/) for more details.
|
@ -0,0 +1,193 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
pgBool = 16
|
||||
|
||||
pgInt2 = 21
|
||||
pgInt4 = 23
|
||||
pgInt8 = 20
|
||||
|
||||
pgFloat4 = 700
|
||||
pgFloat8 = 701
|
||||
|
||||
pgText = 25
|
||||
pgVarchar = 1043
|
||||
pgBytea = 17
|
||||
|
||||
pgDate = 1082
|
||||
pgTimestamp = 1114
|
||||
pgTimestamptz = 1184
|
||||
)
|
||||
|
||||
func readColumnValue(rd *reader, dataType int32, dataLen int) (interface{}, error) {
|
||||
if dataLen == -1 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
switch dataType {
|
||||
case pgBool:
|
||||
return readBoolCol(rd, dataLen)
|
||||
case pgInt2:
|
||||
return readIntCol(rd, dataLen, 16)
|
||||
case pgInt4:
|
||||
return readIntCol(rd, dataLen, 32)
|
||||
case pgInt8:
|
||||
return readIntCol(rd, dataLen, 64)
|
||||
case pgFloat4:
|
||||
return readFloatCol(rd, dataLen, 32)
|
||||
case pgFloat8:
|
||||
return readFloatCol(rd, dataLen, 64)
|
||||
case pgTimestamp:
|
||||
return readTimeCol(rd, dataLen)
|
||||
case pgTimestamptz:
|
||||
return readTimeCol(rd, dataLen)
|
||||
case pgDate:
|
||||
// Return a string and let the scanner to convert string to time.Time if necessary.
|
||||
return readStringCol(rd, dataLen)
|
||||
case pgText, pgVarchar:
|
||||
return readStringCol(rd, dataLen)
|
||||
case pgBytea:
|
||||
return readBytesCol(rd, dataLen)
|
||||
}
|
||||
|
||||
b := make([]byte, dataLen)
|
||||
if _, err := io.ReadFull(rd, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func readBoolCol(rd *reader, n int) (interface{}, error) {
|
||||
tmp, err := rd.ReadTemp(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return len(tmp) == 1 && (tmp[0] == 't' || tmp[0] == '1'), nil
|
||||
}
|
||||
|
||||
func readIntCol(rd *reader, n int, bitSize int) (interface{}, error) {
|
||||
if n <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
tmp, err := rd.ReadTemp(n)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return strconv.ParseInt(bytesToString(tmp), 10, bitSize)
|
||||
}
|
||||
|
||||
func readFloatCol(rd *reader, n int, bitSize int) (interface{}, error) {
|
||||
if n <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
tmp, err := rd.ReadTemp(n)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return strconv.ParseFloat(bytesToString(tmp), bitSize)
|
||||
}
|
||||
|
||||
func readStringCol(rd *reader, n int) (interface{}, error) {
|
||||
if n <= 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
b := make([]byte, n)
|
||||
|
||||
if _, err := io.ReadFull(rd, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bytesToString(b), nil
|
||||
}
|
||||
|
||||
func readBytesCol(rd *reader, n int) (interface{}, error) {
|
||||
if n <= 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
tmp, err := rd.ReadTemp(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(tmp) < 2 || tmp[0] != '\\' || tmp[1] != 'x' {
|
||||
return nil, fmt.Errorf("pgdriver: can't parse bytea: %q", tmp)
|
||||
}
|
||||
tmp = tmp[2:] // Cut off "\x".
|
||||
|
||||
b := make([]byte, hex.DecodedLen(len(tmp)))
|
||||
if _, err := hex.Decode(b, tmp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func readTimeCol(rd *reader, n int) (interface{}, error) {
|
||||
if n <= 0 {
|
||||
return time.Time{}, nil
|
||||
}
|
||||
|
||||
tmp, err := rd.ReadTemp(n)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
tm, err := ParseTime(bytesToString(tmp))
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return tm, nil
|
||||
}
|
||||
|
||||
const (
|
||||
dateFormat = "2006-01-02"
|
||||
timeFormat = "15:04:05.999999999"
|
||||
timestampFormat = "2006-01-02 15:04:05.999999999"
|
||||
timestamptzFormat = "2006-01-02 15:04:05.999999999-07:00:00"
|
||||
timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00"
|
||||
timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07"
|
||||
)
|
||||
|
||||
func ParseTime(s string) (time.Time, error) {
|
||||
switch l := len(s); {
|
||||
case l < len("15:04:05"):
|
||||
return time.Time{}, fmt.Errorf("pgdriver: can't parse time=%q", s)
|
||||
case l <= len(timeFormat):
|
||||
if s[2] == ':' {
|
||||
return time.ParseInLocation(timeFormat, s, time.UTC)
|
||||
}
|
||||
return time.ParseInLocation(dateFormat, s, time.UTC)
|
||||
default:
|
||||
if s[10] == 'T' {
|
||||
return time.Parse(time.RFC3339Nano, s)
|
||||
}
|
||||
if c := s[l-9]; c == '+' || c == '-' {
|
||||
return time.Parse(timestamptzFormat, s)
|
||||
}
|
||||
if c := s[l-6]; c == '+' || c == '-' {
|
||||
return time.Parse(timestamptzFormat2, s)
|
||||
}
|
||||
if c := s[l-3]; c == '+' || c == '-' {
|
||||
if strings.HasSuffix(s, "+00") {
|
||||
s = s[:len(s)-3]
|
||||
return time.ParseInLocation(timestampFormat, s, time.UTC)
|
||||
}
|
||||
return time.Parse(timestamptzFormat3, s)
|
||||
}
|
||||
return time.ParseInLocation(timestampFormat, s, time.UTC)
|
||||
}
|
||||
}
|
@ -0,0 +1,407 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// Network type, either tcp or unix.
|
||||
// Default is tcp.
|
||||
Network string
|
||||
// TCP host:port or Unix socket depending on Network.
|
||||
Addr string
|
||||
// Dial timeout for establishing new connections.
|
||||
// Default is 5 seconds.
|
||||
DialTimeout time.Duration
|
||||
// Dialer creates new network connection and has priority over
|
||||
// Network and Addr options.
|
||||
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
|
||||
// TLS config for secure connections.
|
||||
TLSConfig *tls.Config
|
||||
|
||||
User string
|
||||
Password string
|
||||
Database string
|
||||
AppName string
|
||||
// PostgreSQL session parameters updated with `SET` command when a connection is created.
|
||||
ConnParams map[string]interface{}
|
||||
|
||||
// Timeout for socket reads. If reached, commands fail with a timeout instead of blocking.
|
||||
ReadTimeout time.Duration
|
||||
// Timeout for socket writes. If reached, commands fail with a timeout instead of blocking.
|
||||
WriteTimeout time.Duration
|
||||
}
|
||||
|
||||
func newDefaultConfig() *Config {
|
||||
host := env("PGHOST", "localhost")
|
||||
port := env("PGPORT", "5432")
|
||||
|
||||
cfg := &Config{
|
||||
Network: "tcp",
|
||||
Addr: net.JoinHostPort(host, port),
|
||||
DialTimeout: 5 * time.Second,
|
||||
TLSConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
|
||||
User: env("PGUSER", "postgres"),
|
||||
Database: env("PGDATABASE", "postgres"),
|
||||
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
cfg.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
netDialer := &net.Dialer{
|
||||
Timeout: cfg.DialTimeout,
|
||||
KeepAlive: 5 * time.Minute,
|
||||
}
|
||||
return netDialer.DialContext(ctx, network, addr)
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
type Option func(cfg *Config)
|
||||
|
||||
// Deprecated. Use Option instead.
|
||||
type DriverOption = Option
|
||||
|
||||
func WithNetwork(network string) Option {
|
||||
if network == "" {
|
||||
panic("network is empty")
|
||||
}
|
||||
return func(cfg *Config) {
|
||||
cfg.Network = network
|
||||
}
|
||||
}
|
||||
|
||||
func WithAddr(addr string) Option {
|
||||
if addr == "" {
|
||||
panic("addr is empty")
|
||||
}
|
||||
return func(cfg *Config) {
|
||||
cfg.Addr = addr
|
||||
}
|
||||
}
|
||||
|
||||
func WithTLSConfig(tlsConfig *tls.Config) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.TLSConfig = tlsConfig
|
||||
}
|
||||
}
|
||||
|
||||
func WithInsecure(on bool) Option {
|
||||
return func(cfg *Config) {
|
||||
if on {
|
||||
cfg.TLSConfig = nil
|
||||
} else {
|
||||
cfg.TLSConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func WithUser(user string) Option {
|
||||
if user == "" {
|
||||
panic("user is empty")
|
||||
}
|
||||
return func(cfg *Config) {
|
||||
cfg.User = user
|
||||
}
|
||||
}
|
||||
|
||||
func WithPassword(password string) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.Password = password
|
||||
}
|
||||
}
|
||||
|
||||
func WithDatabase(database string) Option {
|
||||
if database == "" {
|
||||
panic("database is empty")
|
||||
}
|
||||
return func(cfg *Config) {
|
||||
cfg.Database = database
|
||||
}
|
||||
}
|
||||
|
||||
func WithApplicationName(appName string) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.AppName = appName
|
||||
}
|
||||
}
|
||||
|
||||
func WithConnParams(params map[string]interface{}) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.ConnParams = params
|
||||
}
|
||||
}
|
||||
|
||||
func WithTimeout(timeout time.Duration) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.DialTimeout = timeout
|
||||
cfg.ReadTimeout = timeout
|
||||
cfg.WriteTimeout = timeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithDialTimeout(dialTimeout time.Duration) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.DialTimeout = dialTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithReadTimeout(readTimeout time.Duration) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.ReadTimeout = readTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithWriteTimeout(writeTimeout time.Duration) Option {
|
||||
return func(cfg *Config) {
|
||||
cfg.WriteTimeout = writeTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func WithDSN(dsn string) Option {
|
||||
return func(cfg *Config) {
|
||||
opts, err := parseDSN(dsn)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(cfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func env(key, defValue string) string {
|
||||
if s := os.Getenv(key); s != "" {
|
||||
return s
|
||||
}
|
||||
return defValue
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func parseDSN(dsn string) ([]Option, error) {
|
||||
u, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
q := queryOptions{q: u.Query()}
|
||||
var opts []Option
|
||||
|
||||
switch u.Scheme {
|
||||
case "postgres", "postgresql":
|
||||
if u.Host != "" {
|
||||
addr := u.Host
|
||||
if !strings.Contains(addr, ":") {
|
||||
addr += ":5432"
|
||||
}
|
||||
opts = append(opts, WithAddr(addr))
|
||||
}
|
||||
|
||||
if len(u.Path) > 1 {
|
||||
opts = append(opts, WithDatabase(u.Path[1:]))
|
||||
}
|
||||
|
||||
if host := q.string("host"); host != "" {
|
||||
opts = append(opts, WithAddr(host))
|
||||
if host[0] == '/' {
|
||||
opts = append(opts, WithNetwork("unix"))
|
||||
}
|
||||
}
|
||||
case "unix":
|
||||
if len(u.Path) == 0 {
|
||||
return nil, fmt.Errorf("unix socket DSN requires a path: %s", dsn)
|
||||
}
|
||||
|
||||
opts = append(opts, WithNetwork("unix"))
|
||||
if u.Host != "" {
|
||||
opts = append(opts, WithDatabase(u.Host))
|
||||
}
|
||||
opts = append(opts, WithAddr(u.Path))
|
||||
default:
|
||||
return nil, errors.New("pgdriver: invalid scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
opts = append(opts, WithUser(u.User.Username()))
|
||||
if password, ok := u.User.Password(); ok {
|
||||
opts = append(opts, WithPassword(password))
|
||||
}
|
||||
}
|
||||
|
||||
if appName := q.string("application_name"); appName != "" {
|
||||
opts = append(opts, WithApplicationName(appName))
|
||||
}
|
||||
|
||||
if sslMode, sslRootCert := q.string("sslmode"), q.string("sslrootcert"); sslMode != "" || sslRootCert != "" {
|
||||
tlsConfig := &tls.Config{}
|
||||
switch sslMode {
|
||||
case "disable":
|
||||
tlsConfig = nil
|
||||
case "allow", "prefer", "":
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
case "require":
|
||||
if sslRootCert == "" {
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
break
|
||||
}
|
||||
// For backwards compatibility reasons, in the presence of `sslrootcert`,
|
||||
// `sslmode` = `require` must act as if `sslmode` = `verify-ca`. See the note at
|
||||
// https://www.postgresql.org/docs/current/libpq-ssl.html#LIBQ-SSL-CERTIFICATES .
|
||||
fallthrough
|
||||
case "verify-ca":
|
||||
// The default certificate verification will also verify the host name
|
||||
// which is not the behavior of `verify-ca`. As such, we need to manually
|
||||
// check the certificate chain.
|
||||
// At the time of writing, tls.Config has no option for this behavior
|
||||
// (verify chain, but skip server name).
|
||||
// See https://github.com/golang/go/issues/21971 .
|
||||
tlsConfig.InsecureSkipVerify = true
|
||||
tlsConfig.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
|
||||
certs := make([]*x509.Certificate, 0, len(rawCerts))
|
||||
for _, rawCert := range rawCerts {
|
||||
cert, err := x509.ParseCertificate(rawCert)
|
||||
if err != nil {
|
||||
return fmt.Errorf("pgdriver: failed to parse certificate: %w", err)
|
||||
}
|
||||
certs = append(certs, cert)
|
||||
}
|
||||
intermediates := x509.NewCertPool()
|
||||
for _, cert := range certs[1:] {
|
||||
intermediates.AddCert(cert)
|
||||
}
|
||||
_, err := certs[0].Verify(x509.VerifyOptions{
|
||||
Roots: tlsConfig.RootCAs,
|
||||
Intermediates: intermediates,
|
||||
})
|
||||
return err
|
||||
}
|
||||
case "verify-full":
|
||||
tlsConfig.ServerName = u.Host
|
||||
if host, _, err := net.SplitHostPort(u.Host); err == nil {
|
||||
tlsConfig.ServerName = host
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("pgdriver: sslmode '%s' is not supported", sslMode)
|
||||
}
|
||||
if tlsConfig != nil && sslRootCert != "" {
|
||||
rawCA, err := ioutil.ReadFile(sslRootCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("pgdriver: failed to read root CA: %w", err)
|
||||
}
|
||||
certPool := x509.NewCertPool()
|
||||
if !certPool.AppendCertsFromPEM(rawCA) {
|
||||
return nil, fmt.Errorf("pgdriver: failed to append root CA")
|
||||
}
|
||||
tlsConfig.RootCAs = certPool
|
||||
}
|
||||
opts = append(opts, WithTLSConfig(tlsConfig))
|
||||
}
|
||||
|
||||
if d := q.duration("timeout"); d != 0 {
|
||||
opts = append(opts, WithTimeout(d))
|
||||
}
|
||||
if d := q.duration("dial_timeout"); d != 0 {
|
||||
opts = append(opts, WithDialTimeout(d))
|
||||
}
|
||||
if d := q.duration("connect_timeout"); d != 0 {
|
||||
opts = append(opts, WithDialTimeout(d))
|
||||
}
|
||||
if d := q.duration("read_timeout"); d != 0 {
|
||||
opts = append(opts, WithReadTimeout(d))
|
||||
}
|
||||
if d := q.duration("write_timeout"); d != 0 {
|
||||
opts = append(opts, WithWriteTimeout(d))
|
||||
}
|
||||
|
||||
rem, err := q.remaining()
|
||||
if err != nil {
|
||||
return nil, q.err
|
||||
}
|
||||
|
||||
if len(rem) > 0 {
|
||||
params := make(map[string]interface{}, len(rem))
|
||||
for k, v := range rem {
|
||||
params[k] = v
|
||||
}
|
||||
opts = append(opts, WithConnParams(params))
|
||||
}
|
||||
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// verify is a method to make sure if the config is legitimate
|
||||
// in the case it detects any errors, it returns with a non-nil error
|
||||
// it can be extended to check other parameters
|
||||
func (c *Config) verify() error {
|
||||
if c.User == "" {
|
||||
return errors.New("pgdriver: User option is empty (to configure, use WithUser).")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type queryOptions struct {
|
||||
q url.Values
|
||||
err error
|
||||
}
|
||||
|
||||
func (o *queryOptions) string(name string) string {
|
||||
vs := o.q[name]
|
||||
if len(vs) == 0 {
|
||||
return ""
|
||||
}
|
||||
delete(o.q, name) // enable detection of unknown parameters
|
||||
return vs[len(vs)-1]
|
||||
}
|
||||
|
||||
func (o *queryOptions) duration(name string) time.Duration {
|
||||
s := o.string(name)
|
||||
if s == "" {
|
||||
return 0
|
||||
}
|
||||
// try plain number first
|
||||
if i, err := strconv.Atoi(s); err == nil {
|
||||
if i <= 0 {
|
||||
// disable timeouts
|
||||
return -1
|
||||
}
|
||||
return time.Duration(i) * time.Second
|
||||
}
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err == nil {
|
||||
return dur
|
||||
}
|
||||
if o.err == nil {
|
||||
o.err = fmt.Errorf("pgdriver: invalid %s duration: %w", name, err)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (o *queryOptions) remaining() (map[string]string, error) {
|
||||
if o.err != nil {
|
||||
return nil, o.err
|
||||
}
|
||||
if len(o.q) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
m := make(map[string]string, len(o.q))
|
||||
for k, ss := range o.q {
|
||||
m[k] = ss[len(ss)-1]
|
||||
}
|
||||
return m, nil
|
||||
}
|
@ -0,0 +1,249 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
// CopyFrom copies data from the reader to the query destination.
|
||||
func CopyFrom(
|
||||
ctx context.Context, conn bun.Conn, r io.Reader, query string, args ...interface{},
|
||||
) (res sql.Result, err error) {
|
||||
query, err = formatQueryArgs(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := conn.Raw(func(driverConn interface{}) error {
|
||||
cn := driverConn.(*Conn)
|
||||
|
||||
if err := writeQuery(ctx, cn, query); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := readCopyIn(ctx, cn); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeCopyData(ctx, cn, r); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeCopyDone(ctx, cn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err = readQuery(ctx, cn)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func readCopyIn(ctx context.Context, cn *Conn) error {
|
||||
rd := cn.reader(ctx, -1)
|
||||
var firstErr error
|
||||
for {
|
||||
c, msgLen, err := readMessageType(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case errorResponseMsg:
|
||||
e, err := readError(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = e
|
||||
}
|
||||
case readyForQueryMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return err
|
||||
}
|
||||
return firstErr
|
||||
case copyInResponseMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return err
|
||||
}
|
||||
return firstErr
|
||||
case noticeResponseMsg, parameterStatusMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("pgdriver: readCopyIn: unexpected message %q", c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeCopyData(ctx context.Context, cn *Conn, r io.Reader) error {
|
||||
wb := getWriteBuffer()
|
||||
defer putWriteBuffer(wb)
|
||||
|
||||
for {
|
||||
wb.StartMessage(copyDataMsg)
|
||||
if _, err := wb.ReadFrom(r); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
wb.FinishMessage()
|
||||
|
||||
if err := cn.write(ctx, wb); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeCopyDone(ctx context.Context, cn *Conn) error {
|
||||
wb := getWriteBuffer()
|
||||
defer putWriteBuffer(wb)
|
||||
|
||||
wb.StartMessage(copyDoneMsg)
|
||||
wb.FinishMessage()
|
||||
|
||||
return cn.write(ctx, wb)
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// CopyTo copies data from the query source to the writer.
|
||||
func CopyTo(
|
||||
ctx context.Context, conn bun.Conn, w io.Writer, query string, args ...interface{},
|
||||
) (res sql.Result, err error) {
|
||||
query, err = formatQueryArgs(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := conn.Raw(func(driverConn interface{}) error {
|
||||
cn := driverConn.(*Conn)
|
||||
|
||||
if err := writeQuery(ctx, cn, query); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := readCopyOut(ctx, cn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res, err = readCopyData(ctx, cn, w)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func readCopyOut(ctx context.Context, cn *Conn) error {
|
||||
rd := cn.reader(ctx, -1)
|
||||
var firstErr error
|
||||
for {
|
||||
c, msgLen, err := readMessageType(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case errorResponseMsg:
|
||||
e, err := readError(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = e
|
||||
}
|
||||
case readyForQueryMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return err
|
||||
}
|
||||
return firstErr
|
||||
case copyOutResponseMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case noticeResponseMsg, parameterStatusMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("pgdriver: readCopyOut: unexpected message %q", c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readCopyData(ctx context.Context, cn *Conn, w io.Writer) (res sql.Result, err error) {
|
||||
rd := cn.reader(ctx, -1)
|
||||
var firstErr error
|
||||
for {
|
||||
c, msgLen, err := readMessageType(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case errorResponseMsg:
|
||||
e, err := readError(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = e
|
||||
}
|
||||
case copyDataMsg:
|
||||
for msgLen > 0 {
|
||||
b, err := rd.ReadTemp(msgLen)
|
||||
if err != nil && err != bufio.ErrBufferFull {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := w.Write(b); err != nil {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
msgLen -= len(b)
|
||||
}
|
||||
case copyDoneMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case commandCompleteMsg:
|
||||
tmp, err := rd.ReadTemp(msgLen)
|
||||
if err != nil {
|
||||
firstErr = err
|
||||
break
|
||||
}
|
||||
|
||||
r, err := parseResult(tmp)
|
||||
if err != nil {
|
||||
firstErr = err
|
||||
} else {
|
||||
res = r
|
||||
}
|
||||
case readyForQueryMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, firstErr
|
||||
case noticeResponseMsg, parameterStatusMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("pgdriver: readCopyData: unexpected message %q", c)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,586 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sql.Register("pg", NewDriver())
|
||||
}
|
||||
|
||||
type logging interface {
|
||||
Printf(ctx context.Context, format string, v ...interface{})
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
|
||||
_ = l.log.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
var Logger logging = &logger{
|
||||
log: log.New(os.Stderr, "pgdriver: ", log.LstdFlags|log.Lshortfile),
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Driver struct {
|
||||
connector *Connector
|
||||
}
|
||||
|
||||
var _ driver.DriverContext = (*Driver)(nil)
|
||||
|
||||
func NewDriver() Driver {
|
||||
return Driver{}
|
||||
}
|
||||
|
||||
func (d Driver) OpenConnector(name string) (driver.Connector, error) {
|
||||
opts, err := parseDSN(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewConnector(opts...), nil
|
||||
}
|
||||
|
||||
func (d Driver) Open(name string) (driver.Conn, error) {
|
||||
connector, err := d.OpenConnector(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return connector.Connect(context.TODO())
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Connector struct {
|
||||
cfg *Config
|
||||
}
|
||||
|
||||
func NewConnector(opts ...Option) *Connector {
|
||||
c := &Connector{cfg: newDefaultConfig()}
|
||||
for _, opt := range opts {
|
||||
opt(c.cfg)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
var _ driver.Connector = (*Connector)(nil)
|
||||
|
||||
func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) {
|
||||
if err := c.cfg.verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newConn(ctx, c.cfg)
|
||||
}
|
||||
|
||||
func (c *Connector) Driver() driver.Driver {
|
||||
return Driver{connector: c}
|
||||
}
|
||||
|
||||
func (c *Connector) Config() *Config {
|
||||
return c.cfg
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type Conn struct {
|
||||
cfg *Config
|
||||
|
||||
netConn net.Conn
|
||||
rd *reader
|
||||
|
||||
processID int32
|
||||
secretKey int32
|
||||
|
||||
stmtCount int
|
||||
|
||||
closed int32
|
||||
}
|
||||
|
||||
func newConn(ctx context.Context, cfg *Config) (*Conn, error) {
|
||||
netConn, err := cfg.Dialer(ctx, cfg.Network, cfg.Addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cn := &Conn{
|
||||
cfg: cfg,
|
||||
netConn: netConn,
|
||||
rd: newReader(netConn),
|
||||
}
|
||||
|
||||
if cfg.TLSConfig != nil {
|
||||
if err := enableSSL(ctx, cn, cfg.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := startup(ctx, cn); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for k, v := range cfg.ConnParams {
|
||||
if v != nil {
|
||||
_, err = cn.ExecContext(ctx, fmt.Sprintf("SET %s TO $1", k), []driver.NamedValue{
|
||||
{Value: v},
|
||||
})
|
||||
} else {
|
||||
_, err = cn.ExecContext(ctx, fmt.Sprintf("SET %s TO DEFAULT", k), nil)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (cn *Conn) reader(ctx context.Context, timeout time.Duration) *reader {
|
||||
cn.setReadDeadline(ctx, timeout)
|
||||
return cn.rd
|
||||
}
|
||||
|
||||
func (cn *Conn) write(ctx context.Context, wb *writeBuffer) error {
|
||||
cn.setWriteDeadline(ctx, -1)
|
||||
|
||||
n, err := cn.netConn.Write(wb.Bytes)
|
||||
wb.Reset()
|
||||
|
||||
if err != nil {
|
||||
if n == 0 {
|
||||
Logger.Printf(ctx, "pgdriver: Conn.Write failed (zero-length): %s", err)
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ driver.Conn = (*Conn)(nil)
|
||||
|
||||
func (cn *Conn) Prepare(query string) (driver.Stmt, error) {
|
||||
if cn.isClosed() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
name := fmt.Sprintf("pgdriver-%d", cn.stmtCount)
|
||||
cn.stmtCount++
|
||||
|
||||
if err := writeParseDescribeSync(ctx, cn, name, query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rowDesc, err := readParseDescribeSync(ctx, cn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newStmt(cn, name, rowDesc), nil
|
||||
}
|
||||
|
||||
func (cn *Conn) Close() error {
|
||||
if !atomic.CompareAndSwapInt32(&cn.closed, 0, 1) {
|
||||
return nil
|
||||
}
|
||||
return cn.netConn.Close()
|
||||
}
|
||||
|
||||
func (cn *Conn) isClosed() bool {
|
||||
return atomic.LoadInt32(&cn.closed) == 1
|
||||
}
|
||||
|
||||
func (cn *Conn) Begin() (driver.Tx, error) {
|
||||
return cn.BeginTx(context.Background(), driver.TxOptions{})
|
||||
}
|
||||
|
||||
var _ driver.ConnBeginTx = (*Conn)(nil)
|
||||
|
||||
func (cn *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
// No need to check if the conn is closed. ExecContext below handles that.
|
||||
|
||||
if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
|
||||
return nil, errors.New("pgdriver: custom IsolationLevel is not supported")
|
||||
}
|
||||
if opts.ReadOnly {
|
||||
return nil, errors.New("pgdriver: ReadOnly transactions are not supported")
|
||||
}
|
||||
|
||||
if _, err := cn.ExecContext(ctx, "BEGIN", nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tx{cn: cn}, nil
|
||||
}
|
||||
|
||||
var _ driver.ExecerContext = (*Conn)(nil)
|
||||
|
||||
func (cn *Conn) ExecContext(
|
||||
ctx context.Context, query string, args []driver.NamedValue,
|
||||
) (driver.Result, error) {
|
||||
if cn.isClosed() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
res, err := cn.exec(ctx, query, args)
|
||||
if err != nil {
|
||||
return nil, cn.checkBadConn(err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (cn *Conn) exec(
|
||||
ctx context.Context, query string, args []driver.NamedValue,
|
||||
) (driver.Result, error) {
|
||||
query, err := formatQuery(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeQuery(ctx, cn, query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readQuery(ctx, cn)
|
||||
}
|
||||
|
||||
var _ driver.QueryerContext = (*Conn)(nil)
|
||||
|
||||
func (cn *Conn) QueryContext(
|
||||
ctx context.Context, query string, args []driver.NamedValue,
|
||||
) (driver.Rows, error) {
|
||||
if cn.isClosed() {
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
rows, err := cn.query(ctx, query, args)
|
||||
if err != nil {
|
||||
return nil, cn.checkBadConn(err)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func (cn *Conn) query(
|
||||
ctx context.Context, query string, args []driver.NamedValue,
|
||||
) (driver.Rows, error) {
|
||||
query, err := formatQuery(query, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := writeQuery(ctx, cn, query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readQueryData(ctx, cn)
|
||||
}
|
||||
|
||||
var _ driver.Pinger = (*Conn)(nil)
|
||||
|
||||
func (cn *Conn) Ping(ctx context.Context) error {
|
||||
_, err := cn.ExecContext(ctx, "SELECT 1", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (cn *Conn) setReadDeadline(ctx context.Context, timeout time.Duration) {
|
||||
if timeout == -1 {
|
||||
timeout = cn.cfg.ReadTimeout
|
||||
}
|
||||
_ = cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout))
|
||||
}
|
||||
|
||||
func (cn *Conn) setWriteDeadline(ctx context.Context, timeout time.Duration) {
|
||||
if timeout == -1 {
|
||||
timeout = cn.cfg.WriteTimeout
|
||||
}
|
||||
_ = cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout))
|
||||
}
|
||||
|
||||
func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
|
||||
deadline, ok := ctx.Deadline()
|
||||
if !ok {
|
||||
if timeout == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Now().Add(timeout)
|
||||
}
|
||||
|
||||
if timeout == 0 {
|
||||
return deadline
|
||||
}
|
||||
if tm := time.Now().Add(timeout); tm.Before(deadline) {
|
||||
return tm
|
||||
}
|
||||
return deadline
|
||||
}
|
||||
|
||||
var _ driver.Validator = (*Conn)(nil)
|
||||
|
||||
func (cn *Conn) IsValid() bool {
|
||||
return !cn.isClosed()
|
||||
}
|
||||
|
||||
func (cn *Conn) checkBadConn(err error) error {
|
||||
if isBadConn(err, false) {
|
||||
// Close and return driver.ErrBadConn next time the conn is used.
|
||||
_ = cn.Close()
|
||||
}
|
||||
// Always return the original error.
|
||||
return err
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type rows struct {
|
||||
cn *Conn
|
||||
rowDesc *rowDescription
|
||||
reusable bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
var _ driver.Rows = (*rows)(nil)
|
||||
|
||||
func newRows(cn *Conn, rowDesc *rowDescription, reusable bool) *rows {
|
||||
return &rows{
|
||||
cn: cn,
|
||||
rowDesc: rowDesc,
|
||||
reusable: reusable,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rows) Columns() []string {
|
||||
if r.closed || r.rowDesc == nil {
|
||||
return nil
|
||||
}
|
||||
return r.rowDesc.names
|
||||
}
|
||||
|
||||
func (r *rows) Close() error {
|
||||
if r.closed {
|
||||
return nil
|
||||
}
|
||||
defer r.close()
|
||||
|
||||
for {
|
||||
switch err := r.Next(nil); err {
|
||||
case nil, io.EOF:
|
||||
return nil
|
||||
default: // unexpected error
|
||||
_ = r.cn.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rows) close() {
|
||||
r.closed = true
|
||||
|
||||
if r.rowDesc != nil {
|
||||
if r.reusable {
|
||||
rowDescPool.Put(r.rowDesc)
|
||||
}
|
||||
r.rowDesc = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rows) Next(dest []driver.Value) error {
|
||||
if r.closed {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
eof, err := r.next(dest)
|
||||
if err == io.EOF {
|
||||
return io.ErrUnexpectedEOF
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if eof {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rows) next(dest []driver.Value) (eof bool, _ error) {
|
||||
rd := r.cn.reader(context.TODO(), -1)
|
||||
var firstErr error
|
||||
for {
|
||||
c, msgLen, err := readMessageType(rd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
switch c {
|
||||
case dataRowMsg:
|
||||
return false, r.readDataRow(rd, dest)
|
||||
case commandCompleteMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return false, err
|
||||
}
|
||||
case readyForQueryMsg:
|
||||
r.close()
|
||||
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if firstErr != nil {
|
||||
return false, firstErr
|
||||
}
|
||||
return true, nil
|
||||
case parameterStatusMsg, noticeResponseMsg:
|
||||
if err := rd.Discard(msgLen); err != nil {
|
||||
return false, err
|
||||
}
|
||||
case errorResponseMsg:
|
||||
e, err := readError(rd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if firstErr == nil {
|
||||
firstErr = e
|
||||
}
|
||||
default:
|
||||
return false, fmt.Errorf("pgdriver: Next: unexpected message %q", c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *rows) readDataRow(rd *reader, dest []driver.Value) error {
|
||||
numCol, err := readInt16(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dest) != int(numCol) {
|
||||
return fmt.Errorf("pgdriver: query returned %d columns, but Scan dest has %d items",
|
||||
numCol, len(dest))
|
||||
}
|
||||
|
||||
for colIdx := int16(0); colIdx < numCol; colIdx++ {
|
||||
dataLen, err := readInt32(rd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
value, err := readColumnValue(rd, r.rowDesc.types[colIdx], int(dataLen))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dest != nil {
|
||||
dest[colIdx] = value
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func parseResult(b []byte) (driver.RowsAffected, error) {
|
||||
i := bytes.LastIndexByte(b, ' ')
|
||||
if i == -1 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
b = b[i+1 : len(b)-1]
|
||||
affected, err := strconv.ParseUint(bytesToString(b), 10, 64)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
return driver.RowsAffected(affected), nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type tx struct {
|
||||
cn *Conn
|
||||
}
|
||||
|
||||
var _ driver.Tx = (*tx)(nil)
|
||||
|
||||
func (tx tx) Commit() error {
|
||||
_, err := tx.cn.ExecContext(context.Background(), "COMMIT", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx tx) Rollback() error {
|
||||
_, err := tx.cn.ExecContext(context.Background(), "ROLLBACK", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
type stmt struct {
|
||||
cn *Conn
|
||||
name string
|
||||
rowDesc *rowDescription
|
||||
}
|
||||
|
||||
var (
|
||||
_ driver.Stmt = (*stmt)(nil)
|
||||
_ driver.StmtExecContext = (*stmt)(nil)
|
||||
_ driver.StmtQueryContext = (*stmt)(nil)
|
||||
)
|
||||
|
||||
func newStmt(cn *Conn, name string, rowDesc *rowDescription) *stmt {
|
||||
return &stmt{
|
||||
cn: cn,
|
||||
name: name,
|
||||
rowDesc: rowDesc,
|
||||
}
|
||||
}
|
||||
|
||||
func (stmt *stmt) Close() error {
|
||||
if stmt.rowDesc != nil {
|
||||
rowDescPool.Put(stmt.rowDesc)
|
||||
stmt.rowDesc = nil
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
if err := writeCloseStmt(ctx, stmt.cn, stmt.name); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := readCloseStmtComplete(ctx, stmt.cn); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stmt *stmt) NumInput() int {
|
||||
if stmt.rowDesc == nil {
|
||||
return -1
|
||||
}
|
||||
return int(stmt.rowDesc.numInput)
|
||||
}
|
||||
|
||||
func (stmt *stmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (stmt *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
if err := writeBindExecute(ctx, stmt.cn, stmt.name, args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readExtQuery(ctx, stmt.cn)
|
||||
}
|
||||
|
||||
func (stmt *stmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
func (stmt *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
if err := writeBindExecute(ctx, stmt.cn, stmt.name, args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return readExtQueryData(ctx, stmt.cn, stmt.rowDesc)
|
||||
}
|
@ -0,0 +1,75 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Error represents an error returned by PostgreSQL server
|
||||
// using PostgreSQL ErrorResponse protocol.
|
||||
//
|
||||
// https://www.postgresql.org/docs/current/static/protocol-message-formats.html
|
||||
type Error struct {
|
||||
m map[byte]string
|
||||
}
|
||||
|
||||
// Field returns a string value associated with an error field.
|
||||
//
|
||||
// https://www.postgresql.org/docs/current/static/protocol-error-fields.html
|
||||
func (err Error) Field(k byte) string {
|
||||
return err.m[k]
|
||||
}
|
||||
|
||||
// IntegrityViolation reports whether the error is a part of
|
||||
// Integrity Constraint Violation class of errors.
|
||||
//
|
||||
// https://www.postgresql.org/docs/current/static/errcodes-appendix.html
|
||||
func (err Error) IntegrityViolation() bool {
|
||||
switch err.Field('C') {
|
||||
case "23000", "23001", "23502", "23503", "23505", "23514", "23P01":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// StatementTimeout reports whether the error is a statement timeout error.
|
||||
func (err Error) StatementTimeout() bool {
|
||||
return err.Field('C') == "57014"
|
||||
}
|
||||
|
||||
func (err Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s (SQLSTATE=%s)",
|
||||
err.Field('S'), err.Field('M'), err.Field('C'))
|
||||
}
|
||||
|
||||
func isBadConn(err error, allowTimeout bool) bool {
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
case driver.ErrBadConn:
|
||||
return true
|
||||
}
|
||||
|
||||
if err, ok := err.(Error); ok {
|
||||
switch err.Field('V') {
|
||||
case "FATAL", "PANIC":
|
||||
return true
|
||||
}
|
||||
switch err.Field('C') {
|
||||
case "25P02", // current transaction is aborted
|
||||
"57014": // canceling statement due to user request
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if allowTimeout {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return !err.Temporary()
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
@ -0,0 +1,199 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func formatQueryArgs(query string, args []interface{}) (string, error) {
|
||||
namedArgs := make([]driver.NamedValue, len(args))
|
||||
for i, arg := range args {
|
||||
namedArgs[i] = driver.NamedValue{Value: arg}
|
||||
}
|
||||
return formatQuery(query, namedArgs)
|
||||
}
|
||||
|
||||
func formatQuery(query string, args []driver.NamedValue) (string, error) {
|
||||
if len(args) == 0 {
|
||||
return query, nil
|
||||
}
|
||||
|
||||
dst := make([]byte, 0, 2*len(query))
|
||||
|
||||
p := newParser(query)
|
||||
for p.Valid() {
|
||||
switch c := p.Next(); c {
|
||||
case '$':
|
||||
if i, ok := p.Number(); ok {
|
||||
if i < 1 {
|
||||
return "", fmt.Errorf("pgdriver: got $%d, but the minimal arg index is 1", i)
|
||||
}
|
||||
if i > len(args) {
|
||||
return "", fmt.Errorf("pgdriver: got %d args, wanted %d", len(args), i)
|
||||
}
|
||||
|
||||
var err error
|
||||
dst, err = appendArg(dst, args[i-1].Value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
dst = append(dst, '$')
|
||||
}
|
||||
case '\'':
|
||||
if b, ok := p.QuotedString(); ok {
|
||||
dst = append(dst, b...)
|
||||
} else {
|
||||
dst = append(dst, '\'')
|
||||
}
|
||||
default:
|
||||
dst = append(dst, c)
|
||||
}
|
||||
}
|
||||
|
||||
return bytesToString(dst), nil
|
||||
}
|
||||
|
||||
func appendArg(b []byte, v interface{}) ([]byte, error) {
|
||||
switch v := v.(type) {
|
||||
case nil:
|
||||
return append(b, "NULL"...), nil
|
||||
case int64:
|
||||
return strconv.AppendInt(b, v, 10), nil
|
||||
case float64:
|
||||
switch {
|
||||
case math.IsNaN(v):
|
||||
return append(b, "'NaN'"...), nil
|
||||
case math.IsInf(v, 1):
|
||||
return append(b, "'Infinity'"...), nil
|
||||
case math.IsInf(v, -1):
|
||||
return append(b, "'-Infinity'"...), nil
|
||||
default:
|
||||
return strconv.AppendFloat(b, v, 'f', -1, 64), nil
|
||||
}
|
||||
case bool:
|
||||
if v {
|
||||
return append(b, "TRUE"...), nil
|
||||
}
|
||||
return append(b, "FALSE"...), nil
|
||||
case []byte:
|
||||
if v == nil {
|
||||
return append(b, "NULL"...), nil
|
||||
}
|
||||
|
||||
b = append(b, `'\x`...)
|
||||
|
||||
s := len(b)
|
||||
b = append(b, make([]byte, hex.EncodedLen(len(v)))...)
|
||||
hex.Encode(b[s:], v)
|
||||
|
||||
b = append(b, "'"...)
|
||||
|
||||
return b, nil
|
||||
case string:
|
||||
b = append(b, '\'')
|
||||
for _, r := range v {
|
||||
if r == '\000' {
|
||||
continue
|
||||
}
|
||||
|
||||
if r == '\'' {
|
||||
b = append(b, '\'', '\'')
|
||||
continue
|
||||
}
|
||||
|
||||
if r < utf8.RuneSelf {
|
||||
b = append(b, byte(r))
|
||||
continue
|
||||
}
|
||||
l := len(b)
|
||||
if cap(b)-l < utf8.UTFMax {
|
||||
b = append(b, make([]byte, utf8.UTFMax)...)
|
||||
}
|
||||
n := utf8.EncodeRune(b[l:l+utf8.UTFMax], r)
|
||||
b = b[:l+n]
|
||||
}
|
||||
b = append(b, '\'')
|
||||
return b, nil
|
||||
case time.Time:
|
||||
if v.IsZero() {
|
||||
return append(b, "NULL"...), nil
|
||||
}
|
||||
return v.UTC().AppendFormat(b, "'2006-01-02 15:04:05.999999-07:00'"), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("pgdriver: unexpected arg: %T", v)
|
||||
}
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
b []byte
|
||||
i int
|
||||
}
|
||||
|
||||
func newParser(s string) *parser {
|
||||
return &parser{
|
||||
b: stringToBytes(s),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) Valid() bool {
|
||||
return p.i < len(p.b)
|
||||
}
|
||||
|
||||
func (p *parser) Next() byte {
|
||||
c := p.b[p.i]
|
||||
p.i++
|
||||
return c
|
||||
}
|
||||
|
||||
func (p *parser) Number() (int, bool) {
|
||||
start := p.i
|
||||
end := len(p.b)
|
||||
|
||||
for i := p.i; i < len(p.b); i++ {
|
||||
c := p.b[i]
|
||||
if !isNum(c) {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
p.i = end
|
||||
b := p.b[start:end]
|
||||
|
||||
n, err := strconv.Atoi(bytesToString(b))
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return n, true
|
||||
}
|
||||
|
||||
func (p *parser) QuotedString() ([]byte, bool) {
|
||||
start := p.i - 1
|
||||
end := len(p.b)
|
||||
|
||||
var c byte
|
||||
for i := p.i; i < len(p.b); i++ {
|
||||
next := p.b[i]
|
||||
if c == '\'' && next != '\'' {
|
||||
end = i
|
||||
break
|
||||
}
|
||||
c = next
|
||||
}
|
||||
|
||||
p.i = end
|
||||
b := p.b[start:end]
|
||||
|
||||
return b, true
|
||||
}
|
||||
|
||||
func isNum(c byte) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
@ -0,0 +1,380 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/uptrace/bun"
|
||||
)
|
||||
|
||||
const pingChannel = "bun:ping"
|
||||
|
||||
var (
|
||||
errListenerClosed = errors.New("bun: listener is closed")
|
||||
errPingTimeout = errors.New("bun: ping timeout")
|
||||
)
|
||||
|
||||
// Notify sends a notification on the channel using `NOTIFY` command.
|
||||
func Notify(ctx context.Context, db *bun.DB, channel, payload string) error {
|
||||
_, err := db.ExecContext(ctx, "NOTIFY ?, ?", bun.Ident(channel), payload)
|
||||
return err
|
||||
}
|
||||
|
||||
type Listener struct {
|
||||
db *bun.DB
|
||||
driver *Connector
|
||||
|
||||
channels []string
|
||||
|
||||
mu sync.Mutex
|
||||
cn *Conn
|
||||
closed bool
|
||||
exit chan struct{}
|
||||
}
|
||||
|
||||
func NewListener(db *bun.DB) *Listener {
|
||||
return &Listener{
|
||||
db: db,
|
||||
driver: db.Driver().(Driver).connector,
|
||||
exit: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the listener, releasing any open resources.
|
||||
func (ln *Listener) Close() error {
|
||||
return ln.withLock(func() error {
|
||||
if ln.closed {
|
||||
return errListenerClosed
|
||||
}
|
||||
|
||||
ln.closed = true
|
||||
close(ln.exit)
|
||||
|
||||
return ln.closeConn(errListenerClosed)
|
||||
})
|
||||
}
|
||||
|
||||
func (ln *Listener) withLock(fn func() error) error {
|
||||
ln.mu.Lock()
|
||||
defer ln.mu.Unlock()
|
||||
return fn()
|
||||
}
|
||||
|
||||
func (ln *Listener) conn(ctx context.Context) (*Conn, error) {
|
||||
if ln.closed {
|
||||
return nil, errListenerClosed
|
||||
}
|
||||
if ln.cn != nil {
|
||||
return ln.cn, nil
|
||||
}
|
||||
|
||||
cn, err := ln._conn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ln.cn = cn
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (ln *Listener) _conn(ctx context.Context) (*Conn, error) {
|
||||
driverConn, err := ln.driver.Connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cn := driverConn.(*Conn)
|
||||
|
||||
if len(ln.channels) > 0 {
|
||||
err := ln.listen(ctx, cn, ln.channels...)
|
||||
if err != nil {
|
||||
_ = cn.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return cn, nil
|
||||
}
|
||||
|
||||
func (ln *Listener) checkConn(ctx context.Context, cn *Conn, err error, allowTimeout bool) {
|
||||
_ = ln.withLock(func() error {
|
||||
if ln.closed || ln.cn != cn {
|
||||
return nil
|
||||
}
|
||||
if isBadConn(err, allowTimeout) {
|
||||
ln.reconnect(ctx, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (ln *Listener) reconnect(ctx context.Context, reason error) {
|
||||
if ln.cn != nil {
|
||||
Logger.Printf(ctx, "bun: discarding bad listener connection: %s", reason)
|
||||
_ = ln.closeConn(reason)
|
||||
}
|
||||
_, _ = ln.conn(ctx)
|
||||
}
|
||||
|
||||
func (ln *Listener) closeConn(reason error) error {
|
||||
if ln.cn == nil {
|
||||
return nil
|
||||
}
|
||||
err := ln.cn.Close()
|
||||
ln.cn = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// Listen starts listening for notifications on channels.
|
||||
func (ln *Listener) Listen(ctx context.Context, channels ...string) error {
|
||||
var cn *Conn
|
||||
|
||||
if err := ln.withLock(func() error {
|
||||
ln.channels = appendIfNotExists(ln.channels, channels...)
|
||||
|
||||
var err error
|
||||
cn, err = ln.conn(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ln.listen(ctx, cn, channels...); err != nil {
|
||||
ln.checkConn(ctx, cn, err, false)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ln *Listener) listen(ctx context.Context, cn *Conn, channels ...string) error {
|
||||
for _, channel := range channels {
|
||||
if err := writeQuery(ctx, cn, "LISTEN "+strconv.Quote(channel)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unlisten stops listening for notifications on channels.
|
||||
func (ln *Listener) Unlisten(ctx context.Context, channels ...string) error {
|
||||
var cn *Conn
|
||||
|
||||
if err := ln.withLock(func() error {
|
||||
ln.channels = removeIfExists(ln.channels, channels...)
|
||||
|
||||
var err error
|
||||
cn, err = ln.conn(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ln.unlisten(ctx, cn, channels...); err != nil {
|
||||
ln.checkConn(ctx, cn, err, false)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ln *Listener) unlisten(ctx context.Context, cn *Conn, channels ...string) error {
|
||||
for _, channel := range channels {
|
||||
if err := writeQuery(ctx, cn, "UNLISTEN "+strconv.Quote(channel)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Receive indefinitely waits for a notification. This is low-level API
|
||||
// and in most cases Channel should be used instead.
|
||||
func (ln *Listener) Receive(ctx context.Context) (channel string, payload string, err error) {
|
||||
return ln.ReceiveTimeout(ctx, 0)
|
||||
}
|
||||
|
||||
// ReceiveTimeout waits for a notification until timeout is reached.
|
||||
// This is low-level API and in most cases Channel should be used instead.
|
||||
func (ln *Listener) ReceiveTimeout(
|
||||
ctx context.Context, timeout time.Duration,
|
||||
) (channel, payload string, err error) {
|
||||
var cn *Conn
|
||||
|
||||
if err := ln.withLock(func() error {
|
||||
var err error
|
||||
cn, err = ln.conn(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
rd := cn.reader(ctx, timeout)
|
||||
channel, payload, err = readNotification(ctx, rd)
|
||||
if err != nil {
|
||||
ln.checkConn(ctx, cn, err, timeout > 0)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return channel, payload, nil
|
||||
}
|
||||
|
||||
// Channel returns a channel for concurrently receiving notifications.
|
||||
// It periodically sends Ping notification to test connection health.
|
||||
//
|
||||
// The channel is closed with Listener. Receive* APIs can not be used
|
||||
// after channel is created.
|
||||
func (ln *Listener) Channel(opts ...ChannelOption) <-chan Notification {
|
||||
return newChannel(ln, opts).ch
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
// Notification received with LISTEN command.
|
||||
type Notification struct {
|
||||
Channel string
|
||||
Payload string
|
||||
}
|
||||
|
||||
type ChannelOption func(c *channel)
|
||||
|
||||
func WithChannelSize(size int) ChannelOption {
|
||||
return func(c *channel) {
|
||||
c.size = size
|
||||
}
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
ctx context.Context
|
||||
ln *Listener
|
||||
|
||||
size int
|
||||
pingTimeout time.Duration
|
||||
|
||||
ch chan Notification
|
||||
pingCh chan struct{}
|
||||
}
|
||||
|
||||
func newChannel(ln *Listener, opts []ChannelOption) *channel {
|
||||
c := &channel{
|
||||
ctx: context.TODO(),
|
||||
ln: ln,
|
||||
|
||||
size: 1000,
|
||||
pingTimeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
|
||||
c.ch = make(chan Notification, c.size)
|
||||
c.pingCh = make(chan struct{}, 1)
|
||||
_ = c.ln.Listen(c.ctx, pingChannel)
|
||||
go c.startReceive()
|
||||
go c.startPing()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *channel) startReceive() {
|
||||
var errCount int
|
||||
for {
|
||||
channel, payload, err := c.ln.Receive(c.ctx)
|
||||
if err != nil {
|
||||
if err == errListenerClosed {
|
||||
close(c.ch)
|
||||
return
|
||||
}
|
||||
|
||||
if errCount > 0 {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
errCount++
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
|
||||
// Any notification is as good as a ping.
|
||||
select {
|
||||
case c.pingCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
switch channel {
|
||||
case pingChannel:
|
||||
// ignore
|
||||
default:
|
||||
select {
|
||||
case c.ch <- Notification{channel, payload}:
|
||||
default:
|
||||
Logger.Printf(c.ctx, "pgdriver: Listener buffer is full (message is dropped)")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) startPing() {
|
||||
timer := time.NewTimer(time.Minute)
|
||||
timer.Stop()
|
||||
|
||||
healthy := true
|
||||
for {
|
||||
timer.Reset(c.pingTimeout)
|
||||
select {
|
||||
case <-c.pingCh:
|
||||
healthy = true
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
case <-timer.C:
|
||||
pingErr := c.ping(c.ctx)
|
||||
if healthy {
|
||||
healthy = false
|
||||
} else {
|
||||
if pingErr == nil {
|
||||
pingErr = errPingTimeout
|
||||
}
|
||||
_ = c.ln.withLock(func() error {
|
||||
c.ln.reconnect(c.ctx, pingErr)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
case <-c.ln.exit:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) ping(ctx context.Context) error {
|
||||
_, err := c.ln.db.ExecContext(ctx, "NOTIFY "+strconv.Quote(pingChannel))
|
||||
return err
|
||||
}
|
||||
|
||||
func appendIfNotExists(ss []string, es ...string) []string {
|
||||
loop:
|
||||
for _, e := range es {
|
||||
for _, s := range ss {
|
||||
if s == e {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
ss = append(ss, e)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
func removeIfExists(ss []string, es ...string) []string {
|
||||
for _, e := range es {
|
||||
for i, s := range ss {
|
||||
if s == e {
|
||||
last := len(ss) - 1
|
||||
ss[i] = ss[last]
|
||||
ss = ss[:last]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return ss
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,11 @@
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func stringToBytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
// +build !appengine
|
||||
|
||||
package pgdriver
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func bytesToString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
//nolint:deadcode,unused
|
||||
func stringToBytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
@ -0,0 +1,112 @@
|
||||
package pgdriver
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var wbPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return newWriteBuffer()
|
||||
},
|
||||
}
|
||||
|
||||
func getWriteBuffer() *writeBuffer {
|
||||
wb := wbPool.Get().(*writeBuffer)
|
||||
return wb
|
||||
}
|
||||
|
||||
func putWriteBuffer(wb *writeBuffer) {
|
||||
wb.Reset()
|
||||
wbPool.Put(wb)
|
||||
}
|
||||
|
||||
type writeBuffer struct {
|
||||
Bytes []byte
|
||||
|
||||
msgStart int
|
||||
paramStart int
|
||||
}
|
||||
|
||||
func newWriteBuffer() *writeBuffer {
|
||||
return &writeBuffer{
|
||||
Bytes: make([]byte, 0, 1024),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *writeBuffer) Reset() {
|
||||
b.Bytes = b.Bytes[:0]
|
||||
}
|
||||
|
||||
func (b *writeBuffer) StartMessage(c byte) {
|
||||
if c == 0 {
|
||||
b.msgStart = len(b.Bytes)
|
||||
b.Bytes = append(b.Bytes, 0, 0, 0, 0)
|
||||
} else {
|
||||
b.msgStart = len(b.Bytes) + 1
|
||||
b.Bytes = append(b.Bytes, c, 0, 0, 0, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *writeBuffer) FinishMessage() {
|
||||
binary.BigEndian.PutUint32(
|
||||
b.Bytes[b.msgStart:], uint32(len(b.Bytes)-b.msgStart))
|
||||
}
|
||||
|
||||
func (b *writeBuffer) Query() []byte {
|
||||
return b.Bytes[b.msgStart+4 : len(b.Bytes)-1]
|
||||
}
|
||||
|
||||
func (b *writeBuffer) StartParam() {
|
||||
b.paramStart = len(b.Bytes)
|
||||
b.Bytes = append(b.Bytes, 0, 0, 0, 0)
|
||||
}
|
||||
|
||||
func (b *writeBuffer) FinishParam() {
|
||||
binary.BigEndian.PutUint32(
|
||||
b.Bytes[b.paramStart:], uint32(len(b.Bytes)-b.paramStart-4))
|
||||
}
|
||||
|
||||
var nullParamLength = int32(-1)
|
||||
|
||||
func (b *writeBuffer) FinishNullParam() {
|
||||
binary.BigEndian.PutUint32(
|
||||
b.Bytes[b.paramStart:], uint32(nullParamLength))
|
||||
}
|
||||
|
||||
func (b *writeBuffer) Write(data []byte) (int, error) {
|
||||
b.Bytes = append(b.Bytes, data...)
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (b *writeBuffer) WriteInt16(num int16) {
|
||||
b.Bytes = append(b.Bytes, 0, 0)
|
||||
binary.BigEndian.PutUint16(b.Bytes[len(b.Bytes)-2:], uint16(num))
|
||||
}
|
||||
|
||||
func (b *writeBuffer) WriteInt32(num int32) {
|
||||
b.Bytes = append(b.Bytes, 0, 0, 0, 0)
|
||||
binary.BigEndian.PutUint32(b.Bytes[len(b.Bytes)-4:], uint32(num))
|
||||
}
|
||||
|
||||
func (b *writeBuffer) WriteString(s string) {
|
||||
b.Bytes = append(b.Bytes, s...)
|
||||
b.Bytes = append(b.Bytes, 0)
|
||||
}
|
||||
|
||||
func (b *writeBuffer) WriteBytes(data []byte) {
|
||||
b.Bytes = append(b.Bytes, data...)
|
||||
b.Bytes = append(b.Bytes, 0)
|
||||
}
|
||||
|
||||
func (b *writeBuffer) WriteByte(c byte) error {
|
||||
b.Bytes = append(b.Bytes, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *writeBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
n, err := r.Read(b.Bytes[len(b.Bytes):cap(b.Bytes)])
|
||||
b.Bytes = b.Bytes[:len(b.Bytes)+n]
|
||||
return int64(n), err
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
Copyright (c) 2021 Vladimir Mihailenco. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
@ -0,0 +1,33 @@
|
||||
# sqliteshim
|
||||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/uptrace/bun/driver/sqliteshim)](https://pkg.go.dev/github.com/uptrace/bun/driver/sqliteshim)
|
||||
|
||||
sqliteshim automatically imports [modernc.org/sqlite](https://modernc.org/sqlite/) or
|
||||
[mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) depending on your platform.
|
||||
|
||||
Currently sqliteshim uses packages in the following order:
|
||||
|
||||
- [modernc.org/sqlite](https://modernc.org/sqlite/) on supported platforms.
|
||||
- [mattn/go-sqlite3](https://github.com/mattn/go-sqlite3) if Cgo is enabled.
|
||||
|
||||
Otherwise it registers a driver that returns an error on unsupported platforms.
|
||||
|
||||
You can install sqliteshim with:
|
||||
|
||||
```shell
|
||||
go get github.com/uptrace/bun/driver/sqliteshim
|
||||
```
|
||||
|
||||
And then create a `sql.DB`:
|
||||
|
||||
```go
|
||||
sqldb, err := sql.Open(sqliteshim.ShimName, "file::memory:?cache=shared")
|
||||
```
|
||||
|
||||
Alternatively you can also use `sqliteshim.DriverName`:
|
||||
|
||||
```go
|
||||
if sqliteshim.HasDriver() {
|
||||
sqldb, err := sql.Open(sqliteshim.DriverName(), "file::memory:?cache=shared")
|
||||
}
|
||||
```
|
@ -0,0 +1,22 @@
|
||||
// Use Cgo sqlite if Cgo is enabled and either modernc is unavailable or Cgo was
|
||||
// explicitly requested via build tag.
|
||||
|
||||
// +build cgo,cgosqlite cgo
|
||||
// +build cgosqlite !darwin !amd64
|
||||
// +build cgosqlite !darwin !arm64
|
||||
// +build cgosqlite !linux !386
|
||||
// +build cgosqlite !linux !amd64
|
||||
// +build cgosqlite !linux !arm
|
||||
// +build cgosqlite !linux !arm64
|
||||
// +build cgosqlite !windows !amd64
|
||||
|
||||
package sqliteshim
|
||||
|
||||
import "github.com/mattn/go-sqlite3"
|
||||
|
||||
const (
|
||||
hasDriver = true
|
||||
driverName = "sqlite3"
|
||||
)
|
||||
|
||||
var shimDriver = &sqlite3.SQLiteDriver{}
|
@ -0,0 +1,19 @@
|
||||
// Use modernc.org/sqlite on all supported platforms unless Cgo driver
|
||||
// was explicitly requested.
|
||||
//
|
||||
// See also https://pkg.go.dev/modernc.org/sqlite#hdr-Supported_platforms_and_architectures
|
||||
|
||||
//go:build !cgosqlite && ((darwin && amd64) || (darwin && arm64) || (linux && 386) || (linux && amd64) || (linux && arm) || (linux && arm64) || (windows && amd64))
|
||||
// +build !cgosqlite
|
||||
// +build darwin,amd64 darwin,arm64 linux,386 linux,amd64 linux,arm linux,arm64 windows,amd64
|
||||
|
||||
package sqliteshim
|
||||
|
||||
import "modernc.org/sqlite"
|
||||
|
||||
const (
|
||||
hasDriver = true
|
||||
driverName = "sqlite"
|
||||
)
|
||||
|
||||
var shimDriver = &sqlite.Driver{}
|
@ -0,0 +1,30 @@
|
||||
// Return error if both Cgo and modernc sqlite implementations are unavailable.
|
||||
// That includes the case where cgosqlite is set but Cgo is disabled.
|
||||
|
||||
// +build !cgo
|
||||
// +build cgosqlite !darwin !amd64
|
||||
// +build cgosqlite !darwin !arm64
|
||||
// +build cgosqlite !linux !386
|
||||
// +build cgosqlite !linux !amd64
|
||||
// +build cgosqlite !linux !arm
|
||||
// +build cgosqlite !linux !arm64
|
||||
// +build cgosqlite !windows !amd64
|
||||
|
||||
package sqliteshim
|
||||
|
||||
import "database/sql/driver"
|
||||
|
||||
const (
|
||||
hasDriver = false
|
||||
driverName = ShimName
|
||||
)
|
||||
|
||||
var shimDriver = (*errorDriver)(nil)
|
||||
|
||||
type errorDriver struct{}
|
||||
|
||||
func (*errorDriver) Open(dsn string) (driver.Conn, error) {
|
||||
return nil, &errUnsupported
|
||||
}
|
||||
|
||||
var errUnsupported UnsupportedError
|
@ -0,0 +1,45 @@
|
||||
// Package sqliteshim is a shim package that imports an appropriate sqlite
|
||||
// driver for the build target and registers it under ShimName.
|
||||
//
|
||||
// Currently it uses packages in the following order:
|
||||
// • modernc.org/sqlite on supported platforms,
|
||||
// • github.com/mattn/go-sqlite3 if Cgo is enabled,
|
||||
// Otherwise registers a driver that returns an error on unsupported platforms.
|
||||
//
|
||||
package sqliteshim
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sql.Register(ShimName, shimDriver)
|
||||
}
|
||||
|
||||
// ShimName is the name of the shim database/sql driver registration.
|
||||
const ShimName = "sqliteshim"
|
||||
|
||||
// UnsupportedError is returned from driver on unsupported platforms.
|
||||
type UnsupportedError struct{}
|
||||
|
||||
func (e *UnsupportedError) Error() string {
|
||||
return "sqlite driver is not available on the current platform"
|
||||
}
|
||||
|
||||
// HasDriver indicates that SQLite driver implementation is available.
|
||||
func HasDriver() bool {
|
||||
return hasDriver
|
||||
}
|
||||
|
||||
// Driver returns the shim driver registered under ShimName name.
|
||||
func Driver() driver.Driver {
|
||||
return shimDriver
|
||||
}
|
||||
|
||||
// DriverName is the name of the database/sql driver. Note that unlike ShimName
|
||||
// the value depends on the build target. That is, DriverName returns the name
|
||||
// of the underlying database driver.
|
||||
func DriverName() string {
|
||||
return driverName
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
package bunjson
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
var _ Provider = (*StdProvider)(nil)
|
||||
|
||||
type StdProvider struct{}
|
||||
|
||||
func (StdProvider) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (StdProvider) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func (StdProvider) NewEncoder(w io.Writer) Encoder {
|
||||
return json.NewEncoder(w)
|
||||
}
|
||||
|
||||
func (StdProvider) NewDecoder(r io.Reader) Decoder {
|
||||
return json.NewDecoder(r)
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
package bunjson
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var provider Provider = StdProvider{}
|
||||
|
||||
func SetProvider(p Provider) {
|
||||
provider = p
|
||||
}
|
||||
|
||||
type Provider interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
NewEncoder(w io.Writer) Encoder
|
||||
NewDecoder(r io.Reader) Decoder
|
||||
}
|
||||
|
||||
type Decoder interface {
|
||||
Decode(v interface{}) error
|
||||
UseNumber()
|
||||
}
|
||||
|
||||
type Encoder interface {
|
||||
Encode(v interface{}) error
|
||||
}
|
||||
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
return provider.Marshal(v)
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
return provider.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
func NewEncoder(w io.Writer) Encoder {
|
||||
return provider.NewEncoder(w)
|
||||
}
|
||||
|
||||
func NewDecoder(r io.Reader) Decoder {
|
||||
return provider.NewDecoder(r)
|
||||
}
|
@ -0,0 +1,113 @@
|
||||
package bun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type QueryEvent struct {
|
||||
DB *DB
|
||||
|
||||
QueryAppender schema.QueryAppender // DEPRECATED: use IQuery instead
|
||||
IQuery Query
|
||||
Query string
|
||||
QueryTemplate string
|
||||
QueryArgs []interface{}
|
||||
Model Model
|
||||
|
||||
StartTime time.Time
|
||||
Result sql.Result
|
||||
Err error
|
||||
|
||||
Stash map[interface{}]interface{}
|
||||
}
|
||||
|
||||
func (e *QueryEvent) Operation() string {
|
||||
if e.IQuery != nil {
|
||||
return e.IQuery.Operation()
|
||||
}
|
||||
return queryOperation(e.Query)
|
||||
}
|
||||
|
||||
func queryOperation(query string) string {
|
||||
if idx := strings.IndexByte(query, ' '); idx > 0 {
|
||||
query = query[:idx]
|
||||
}
|
||||
if len(query) > 16 {
|
||||
query = query[:16]
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
type QueryHook interface {
|
||||
BeforeQuery(context.Context, *QueryEvent) context.Context
|
||||
AfterQuery(context.Context, *QueryEvent)
|
||||
}
|
||||
|
||||
func (db *DB) beforeQuery(
|
||||
ctx context.Context,
|
||||
iquery Query,
|
||||
queryTemplate string,
|
||||
queryArgs []interface{},
|
||||
query string,
|
||||
model Model,
|
||||
) (context.Context, *QueryEvent) {
|
||||
atomic.AddUint32(&db.stats.Queries, 1)
|
||||
|
||||
if len(db.queryHooks) == 0 {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
event := &QueryEvent{
|
||||
DB: db,
|
||||
|
||||
Model: model,
|
||||
QueryAppender: iquery,
|
||||
IQuery: iquery,
|
||||
Query: query,
|
||||
QueryTemplate: queryTemplate,
|
||||
QueryArgs: queryArgs,
|
||||
|
||||
StartTime: time.Now(),
|
||||
}
|
||||
|
||||
for _, hook := range db.queryHooks {
|
||||
ctx = hook.BeforeQuery(ctx, event)
|
||||
}
|
||||
|
||||
return ctx, event
|
||||
}
|
||||
|
||||
func (db *DB) afterQuery(
|
||||
ctx context.Context,
|
||||
event *QueryEvent,
|
||||
res sql.Result,
|
||||
err error,
|
||||
) {
|
||||
switch err {
|
||||
case nil, sql.ErrNoRows:
|
||||
// nothing
|
||||
default:
|
||||
atomic.AddUint32(&db.stats.Errors, 1)
|
||||
}
|
||||
|
||||
if event == nil {
|
||||
return
|
||||
}
|
||||
|
||||
event.Result = res
|
||||
event.Err = err
|
||||
|
||||
db.afterQueryFromIndex(ctx, event, len(db.queryHooks)-1)
|
||||
}
|
||||
|
||||
func (db *DB) afterQueryFromIndex(ctx context.Context, event *QueryEvent, hookIndex int) {
|
||||
for ; hookIndex >= 0; hookIndex-- {
|
||||
db.queryHooks[hookIndex].AfterQuery(ctx, event)
|
||||
}
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
package internal
|
||||
|
||||
type Flag uint64
|
||||
|
||||
func (flag Flag) Has(other Flag) bool {
|
||||
return flag&other != 0
|
||||
}
|
||||
|
||||
func (flag Flag) Set(other Flag) Flag {
|
||||
return flag | other
|
||||
}
|
||||
|
||||
func (flag Flag) Remove(other Flag) Flag {
|
||||
flag &= ^other
|
||||
return flag
|
||||
}
|
@ -0,0 +1,43 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
fasthex "github.com/tmthrgd/go-hex"
|
||||
)
|
||||
|
||||
type HexEncoder struct {
|
||||
b []byte
|
||||
written bool
|
||||
}
|
||||
|
||||
func NewHexEncoder(b []byte) *HexEncoder {
|
||||
return &HexEncoder{
|
||||
b: b,
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *HexEncoder) Bytes() []byte {
|
||||
return enc.b
|
||||
}
|
||||
|
||||
func (enc *HexEncoder) Write(b []byte) (int, error) {
|
||||
if !enc.written {
|
||||
enc.b = append(enc.b, '\'')
|
||||
enc.b = append(enc.b, `\x`...)
|
||||
enc.written = true
|
||||
}
|
||||
|
||||
i := len(enc.b)
|
||||
enc.b = append(enc.b, make([]byte, fasthex.EncodedLen(len(b)))...)
|
||||
fasthex.Encode(enc.b[i:], b)
|
||||
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (enc *HexEncoder) Close() error {
|
||||
if enc.written {
|
||||
enc.b = append(enc.b, '\'')
|
||||
} else {
|
||||
enc.b = append(enc.b, "NULL"...)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var Warn = log.New(os.Stderr, "WARN: bun: ", log.LstdFlags)
|
||||
|
||||
var Deprecated = log.New(os.Stderr, "DEPRECATED: bun: ", log.LstdFlags)
|
||||
|
||||
type Logging interface {
|
||||
Printf(format string, v ...interface{})
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
log *log.Logger
|
||||
}
|
||||
|
||||
func (l *logger) Printf(format string, v ...interface{}) {
|
||||
_ = l.log.Output(2, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
var Logger Logging = &logger{
|
||||
log: log.New(os.Stderr, "bun: ", log.LstdFlags|log.Lshortfile),
|
||||
}
|
@ -0,0 +1,67 @@
|
||||
package internal
|
||||
|
||||
import "reflect"
|
||||
|
||||
var ifaceType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
|
||||
type MapKey struct {
|
||||
iface interface{}
|
||||
}
|
||||
|
||||
func NewMapKey(is []interface{}) MapKey {
|
||||
return MapKey{
|
||||
iface: newMapKey(is),
|
||||
}
|
||||
}
|
||||
|
||||
func newMapKey(is []interface{}) interface{} {
|
||||
switch len(is) {
|
||||
case 1:
|
||||
ptr := new([1]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 2:
|
||||
ptr := new([2]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 3:
|
||||
ptr := new([3]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 4:
|
||||
ptr := new([4]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 5:
|
||||
ptr := new([5]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 6:
|
||||
ptr := new([6]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 7:
|
||||
ptr := new([7]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 8:
|
||||
ptr := new([8]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 9:
|
||||
ptr := new([9]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
case 10:
|
||||
ptr := new([10]interface{})
|
||||
copy((*ptr)[:], is)
|
||||
return *ptr
|
||||
default:
|
||||
}
|
||||
|
||||
at := reflect.New(reflect.ArrayOf(len(is), ifaceType)).Elem()
|
||||
for i, v := range is {
|
||||
*(at.Index(i).Addr().Interface().(*interface{})) = v
|
||||
}
|
||||
return at.Interface()
|
||||
}
|
@ -0,0 +1,141 @@
|
||||
package parser
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
|
||||
"github.com/uptrace/bun/internal"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
b []byte
|
||||
i int
|
||||
}
|
||||
|
||||
func New(b []byte) *Parser {
|
||||
return &Parser{
|
||||
b: b,
|
||||
}
|
||||
}
|
||||
|
||||
func NewString(s string) *Parser {
|
||||
return New(internal.Bytes(s))
|
||||
}
|
||||
|
||||
func (p *Parser) Valid() bool {
|
||||
return p.i < len(p.b)
|
||||
}
|
||||
|
||||
func (p *Parser) Bytes() []byte {
|
||||
return p.b[p.i:]
|
||||
}
|
||||
|
||||
func (p *Parser) Read() byte {
|
||||
if p.Valid() {
|
||||
c := p.b[p.i]
|
||||
p.Advance()
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *Parser) Peek() byte {
|
||||
if p.Valid() {
|
||||
return p.b[p.i]
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *Parser) Advance() {
|
||||
p.i++
|
||||
}
|
||||
|
||||
func (p *Parser) Skip(skip byte) bool {
|
||||
if p.Peek() == skip {
|
||||
p.Advance()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Parser) SkipBytes(skip []byte) bool {
|
||||
if len(skip) > len(p.b[p.i:]) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) {
|
||||
return false
|
||||
}
|
||||
p.i += len(skip)
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Parser) ReadSep(sep byte) ([]byte, bool) {
|
||||
ind := bytes.IndexByte(p.b[p.i:], sep)
|
||||
if ind == -1 {
|
||||
b := p.b[p.i:]
|
||||
p.i = len(p.b)
|
||||
return b, false
|
||||
}
|
||||
|
||||
b := p.b[p.i : p.i+ind]
|
||||
p.i += ind + 1
|
||||
return b, true
|
||||
}
|
||||
|
||||
func (p *Parser) ReadIdentifier() (string, bool) {
|
||||
if p.i < len(p.b) && p.b[p.i] == '(' {
|
||||
s := p.i + 1
|
||||
if ind := bytes.IndexByte(p.b[s:], ')'); ind != -1 {
|
||||
b := p.b[s : s+ind]
|
||||
p.i = s + ind + 1
|
||||
return internal.String(b), false
|
||||
}
|
||||
}
|
||||
|
||||
ind := len(p.b) - p.i
|
||||
var alpha bool
|
||||
for i, c := range p.b[p.i:] {
|
||||
if isNum(c) {
|
||||
continue
|
||||
}
|
||||
if isAlpha(c) || (i > 0 && alpha && c == '_') {
|
||||
alpha = true
|
||||
continue
|
||||
}
|
||||
ind = i
|
||||
break
|
||||
}
|
||||
if ind == 0 {
|
||||
return "", false
|
||||
}
|
||||
b := p.b[p.i : p.i+ind]
|
||||
p.i += ind
|
||||
return internal.String(b), !alpha
|
||||
}
|
||||
|
||||
func (p *Parser) ReadNumber() int {
|
||||
ind := len(p.b) - p.i
|
||||
for i, c := range p.b[p.i:] {
|
||||
if !isNum(c) {
|
||||
ind = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if ind == 0 {
|
||||
return 0
|
||||
}
|
||||
n, err := strconv.Atoi(string(p.b[p.i : p.i+ind]))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.i += ind
|
||||
return n
|
||||
}
|
||||
|
||||
func isNum(c byte) bool {
|
||||
return c >= '0' && c <= '9'
|
||||
}
|
||||
|
||||
func isAlpha(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
@ -0,0 +1,11 @@
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
func String(b []byte) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func Bytes(s string) []byte {
|
||||
return []byte(s)
|
||||
}
|
@ -0,0 +1,184 @@
|
||||
package tagparser
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
Name string
|
||||
Options map[string][]string
|
||||
}
|
||||
|
||||
func (t Tag) IsZero() bool {
|
||||
return t.Name == "" && t.Options == nil
|
||||
}
|
||||
|
||||
func (t Tag) HasOption(name string) bool {
|
||||
_, ok := t.Options[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (t Tag) Option(name string) (string, bool) {
|
||||
if vs, ok := t.Options[name]; ok {
|
||||
return vs[len(vs)-1], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func Parse(s string) Tag {
|
||||
if s == "" {
|
||||
return Tag{}
|
||||
}
|
||||
p := parser{
|
||||
s: s,
|
||||
}
|
||||
p.parse()
|
||||
return p.tag
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
s string
|
||||
i int
|
||||
|
||||
tag Tag
|
||||
seenName bool // for empty names
|
||||
}
|
||||
|
||||
func (p *parser) setName(name string) {
|
||||
if p.seenName {
|
||||
p.addOption(name, "")
|
||||
} else {
|
||||
p.seenName = true
|
||||
p.tag.Name = name
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) addOption(key, value string) {
|
||||
p.seenName = true
|
||||
if key == "" {
|
||||
return
|
||||
}
|
||||
if p.tag.Options == nil {
|
||||
p.tag.Options = make(map[string][]string)
|
||||
}
|
||||
if vs, ok := p.tag.Options[key]; ok {
|
||||
p.tag.Options[key] = append(vs, value)
|
||||
} else {
|
||||
p.tag.Options[key] = []string{value}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parse() {
|
||||
for p.valid() {
|
||||
p.parseKeyValue()
|
||||
if p.peek() == ',' {
|
||||
p.i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) parseKeyValue() {
|
||||
start := p.i
|
||||
|
||||
for p.valid() {
|
||||
switch c := p.read(); c {
|
||||
case ',':
|
||||
key := p.s[start : p.i-1]
|
||||
p.setName(key)
|
||||
return
|
||||
case ':':
|
||||
key := p.s[start : p.i-1]
|
||||
value := p.parseValue()
|
||||
p.addOption(key, value)
|
||||
return
|
||||
case '"':
|
||||
key := p.parseQuotedValue()
|
||||
p.setName(key)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
key := p.s[start:p.i]
|
||||
p.setName(key)
|
||||
}
|
||||
|
||||
func (p *parser) parseValue() string {
|
||||
start := p.i
|
||||
|
||||
for p.valid() {
|
||||
switch c := p.read(); c {
|
||||
case '"':
|
||||
return p.parseQuotedValue()
|
||||
case ',':
|
||||
return p.s[start : p.i-1]
|
||||
case '(':
|
||||
p.skipPairs('(', ')')
|
||||
}
|
||||
}
|
||||
|
||||
if p.i == start {
|
||||
return ""
|
||||
}
|
||||
return p.s[start:p.i]
|
||||
}
|
||||
|
||||
func (p *parser) parseQuotedValue() string {
|
||||
if i := strings.IndexByte(p.s[p.i:], '"'); i >= 0 && p.s[p.i+i-1] != '\\' {
|
||||
s := p.s[p.i : p.i+i]
|
||||
p.i += i + 1
|
||||
return s
|
||||
}
|
||||
|
||||
b := make([]byte, 0, 16)
|
||||
|
||||
for p.valid() {
|
||||
switch c := p.read(); c {
|
||||
case '\\':
|
||||
b = append(b, p.read())
|
||||
case '"':
|
||||
return string(b)
|
||||
default:
|
||||
b = append(b, c)
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (p *parser) skipPairs(start, end byte) {
|
||||
var lvl int
|
||||
for p.valid() {
|
||||
switch c := p.read(); c {
|
||||
case '"':
|
||||
_ = p.parseQuotedValue()
|
||||
case start:
|
||||
lvl++
|
||||
case end:
|
||||
if lvl == 0 {
|
||||
return
|
||||
}
|
||||
lvl--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) valid() bool {
|
||||
return p.i < len(p.s)
|
||||
}
|
||||
|
||||
func (p *parser) read() byte {
|
||||
if !p.valid() {
|
||||
return 0
|
||||
}
|
||||
c := p.s[p.i]
|
||||
p.i++
|
||||
return c
|
||||
}
|
||||
|
||||
func (p *parser) peek() byte {
|
||||
if !p.valid() {
|
||||
return 0
|
||||
}
|
||||
c := p.s[p.i]
|
||||
return c
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
dateFormat = "2006-01-02"
|
||||
timeFormat = "15:04:05.999999999"
|
||||
timetzFormat1 = "15:04:05.999999999-07:00:00"
|
||||
timetzFormat2 = "15:04:05.999999999-07:00"
|
||||
timetzFormat3 = "15:04:05.999999999-07"
|
||||
timestampFormat = "2006-01-02 15:04:05.999999999"
|
||||
timestamptzFormat1 = "2006-01-02 15:04:05.999999999-07:00:00"
|
||||
timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00"
|
||||
timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07"
|
||||
)
|
||||
|
||||
func ParseTime(s string) (time.Time, error) {
|
||||
l := len(s)
|
||||
|
||||
if l >= len("2006-01-02 15:04:05") {
|
||||
switch s[10] {
|
||||
case ' ':
|
||||
if c := s[l-6]; c == '+' || c == '-' {
|
||||
return time.Parse(timestamptzFormat2, s)
|
||||
}
|
||||
if c := s[l-3]; c == '+' || c == '-' {
|
||||
return time.Parse(timestamptzFormat3, s)
|
||||
}
|
||||
if c := s[l-9]; c == '+' || c == '-' {
|
||||
return time.Parse(timestamptzFormat1, s)
|
||||
}
|
||||
return time.ParseInLocation(timestampFormat, s, time.UTC)
|
||||
case 'T':
|
||||
return time.Parse(time.RFC3339Nano, s)
|
||||
}
|
||||
}
|
||||
|
||||
if l >= len("15:04:05-07") {
|
||||
if c := s[l-6]; c == '+' || c == '-' {
|
||||
return time.Parse(timetzFormat2, s)
|
||||
}
|
||||
if c := s[l-3]; c == '+' || c == '-' {
|
||||
return time.Parse(timetzFormat3, s)
|
||||
}
|
||||
if c := s[l-9]; c == '+' || c == '-' {
|
||||
return time.Parse(timetzFormat1, s)
|
||||
}
|
||||
}
|
||||
|
||||
if l < len("15:04:05") {
|
||||
return time.Time{}, fmt.Errorf("bun: can't parse time=%q", s)
|
||||
}
|
||||
|
||||
if s[2] == ':' {
|
||||
return time.ParseInLocation(timeFormat, s, time.UTC)
|
||||
}
|
||||
return time.ParseInLocation(dateFormat, s, time.UTC)
|
||||
}
|
@ -0,0 +1,67 @@
|
||||
package internal
|
||||
|
||||
func IsUpper(c byte) bool {
|
||||
return c >= 'A' && c <= 'Z'
|
||||
}
|
||||
|
||||
func IsLower(c byte) bool {
|
||||
return c >= 'a' && c <= 'z'
|
||||
}
|
||||
|
||||
func ToUpper(c byte) byte {
|
||||
return c - 32
|
||||
}
|
||||
|
||||
func ToLower(c byte) byte {
|
||||
return c + 32
|
||||
}
|
||||
|
||||
// Underscore converts "CamelCasedString" to "camel_cased_string".
|
||||
func Underscore(s string) string {
|
||||
r := make([]byte, 0, len(s)+5)
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if IsUpper(c) {
|
||||
if i > 0 && i+1 < len(s) && (IsLower(s[i-1]) || IsLower(s[i+1])) {
|
||||
r = append(r, '_', ToLower(c))
|
||||
} else {
|
||||
r = append(r, ToLower(c))
|
||||
}
|
||||
} else {
|
||||
r = append(r, c)
|
||||
}
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func CamelCased(s string) string {
|
||||
r := make([]byte, 0, len(s))
|
||||
upperNext := true
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c == '_' {
|
||||
upperNext = true
|
||||
continue
|
||||
}
|
||||
if upperNext {
|
||||
if IsLower(c) {
|
||||
c = ToUpper(c)
|
||||
}
|
||||
upperNext = false
|
||||
}
|
||||
r = append(r, c)
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func ToExported(s string) string {
|
||||
if len(s) == 0 {
|
||||
return s
|
||||
}
|
||||
if c := s[0]; IsLower(c) {
|
||||
b := []byte(s)
|
||||
b[0] = ToUpper(c)
|
||||
return string(b)
|
||||
}
|
||||
return s
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// String converts byte slice to string.
|
||||
func String(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// Bytes converts string to byte slice.
|
||||
func Bytes(s string) []byte {
|
||||
return *(*[]byte)(unsafe.Pointer(
|
||||
&struct {
|
||||
string
|
||||
Cap int
|
||||
}{s, len(s)},
|
||||
))
|
||||
}
|
@ -0,0 +1,57 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
|
||||
if v.Kind() == reflect.Array {
|
||||
var pos int
|
||||
return func() reflect.Value {
|
||||
v := v.Index(pos)
|
||||
pos++
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
elemType := v.Type().Elem()
|
||||
|
||||
if elemType.Kind() == reflect.Ptr {
|
||||
elemType = elemType.Elem()
|
||||
return func() reflect.Value {
|
||||
if v.Len() < v.Cap() {
|
||||
v.Set(v.Slice(0, v.Len()+1))
|
||||
elem := v.Index(v.Len() - 1)
|
||||
if elem.IsNil() {
|
||||
elem.Set(reflect.New(elemType))
|
||||
}
|
||||
return elem.Elem()
|
||||
}
|
||||
|
||||
elem := reflect.New(elemType)
|
||||
v.Set(reflect.Append(v, elem))
|
||||
return elem.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
zero := reflect.Zero(elemType)
|
||||
return func() reflect.Value {
|
||||
if v.Len() < v.Cap() {
|
||||
v.Set(v.Slice(0, v.Len()+1))
|
||||
return v.Index(v.Len() - 1)
|
||||
}
|
||||
|
||||
v.Set(reflect.Append(v, zero))
|
||||
return v.Index(v.Len() - 1)
|
||||
}
|
||||
}
|
||||
|
||||
func Unwrap(err error) error {
|
||||
u, ok := err.(interface {
|
||||
Unwrap() error
|
||||
})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return u.Unwrap()
|
||||
}
|
@ -0,0 +1,201 @@
|
||||
package bun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
var errNilModel = errors.New("bun: Model(nil)")
|
||||
|
||||
var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
|
||||
type Model = schema.Model
|
||||
|
||||
type rowScanner interface {
|
||||
ScanRow(ctx context.Context, rows *sql.Rows) error
|
||||
}
|
||||
|
||||
type TableModel interface {
|
||||
Model
|
||||
|
||||
schema.BeforeAppendModelHook
|
||||
schema.BeforeScanRowHook
|
||||
schema.AfterScanRowHook
|
||||
ScanColumn(column string, src interface{}) error
|
||||
|
||||
Table() *schema.Table
|
||||
Relation() *schema.Relation
|
||||
|
||||
join(string) *relationJoin
|
||||
getJoin(string) *relationJoin
|
||||
getJoins() []relationJoin
|
||||
addJoin(relationJoin) *relationJoin
|
||||
|
||||
rootValue() reflect.Value
|
||||
parentIndex() []int
|
||||
mount(reflect.Value)
|
||||
|
||||
updateSoftDeleteField(time.Time) error
|
||||
}
|
||||
|
||||
func newModel(db *DB, dest []interface{}) (Model, error) {
|
||||
if len(dest) == 1 {
|
||||
return _newModel(db, dest[0], true)
|
||||
}
|
||||
|
||||
values := make([]reflect.Value, len(dest))
|
||||
|
||||
for i, el := range dest {
|
||||
v := reflect.ValueOf(el)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return nil, fmt.Errorf("bun: Scan(non-pointer %T)", dest)
|
||||
}
|
||||
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Slice {
|
||||
return newScanModel(db, dest), nil
|
||||
}
|
||||
|
||||
values[i] = v
|
||||
}
|
||||
|
||||
return newSliceModel(db, dest, values), nil
|
||||
}
|
||||
|
||||
func newSingleModel(db *DB, dest interface{}) (Model, error) {
|
||||
return _newModel(db, dest, false)
|
||||
}
|
||||
|
||||
func _newModel(db *DB, dest interface{}, scan bool) (Model, error) {
|
||||
switch dest := dest.(type) {
|
||||
case nil:
|
||||
return nil, errNilModel
|
||||
case Model:
|
||||
return dest, nil
|
||||
case sql.Scanner:
|
||||
if !scan {
|
||||
return nil, fmt.Errorf("bun: Model(unsupported %T)", dest)
|
||||
}
|
||||
return newScanModel(db, []interface{}{dest}), nil
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(dest)
|
||||
if !v.IsValid() {
|
||||
return nil, errNilModel
|
||||
}
|
||||
if v.Kind() != reflect.Ptr {
|
||||
return nil, fmt.Errorf("bun: Model(non-pointer %T)", dest)
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
typ := v.Type().Elem()
|
||||
if typ.Kind() == reflect.Struct {
|
||||
return newStructTableModel(db, dest, db.Table(typ)), nil
|
||||
}
|
||||
return nil, fmt.Errorf("bun: Model(nil %T)", dest)
|
||||
}
|
||||
|
||||
v = v.Elem()
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
typ := v.Type()
|
||||
if err := validMap(typ); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapPtr := v.Addr().Interface().(*map[string]interface{})
|
||||
return newMapModel(db, mapPtr), nil
|
||||
case reflect.Struct:
|
||||
if v.Type() != timeType {
|
||||
return newStructTableModelValue(db, dest, v), nil
|
||||
}
|
||||
case reflect.Slice:
|
||||
switch elemType := sliceElemType(v); elemType.Kind() {
|
||||
case reflect.Struct:
|
||||
if elemType != timeType {
|
||||
return newSliceTableModel(db, dest, v, elemType), nil
|
||||
}
|
||||
case reflect.Map:
|
||||
if err := validMap(elemType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
slicePtr := v.Addr().Interface().(*[]map[string]interface{})
|
||||
return newMapSliceModel(db, slicePtr), nil
|
||||
}
|
||||
return newSliceModel(db, []interface{}{dest}, []reflect.Value{v}), nil
|
||||
}
|
||||
|
||||
if scan {
|
||||
return newScanModel(db, []interface{}{dest}), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("bun: Model(unsupported %T)", dest)
|
||||
}
|
||||
|
||||
func newTableModelIndex(
|
||||
db *DB,
|
||||
table *schema.Table,
|
||||
root reflect.Value,
|
||||
index []int,
|
||||
rel *schema.Relation,
|
||||
) (TableModel, error) {
|
||||
typ := typeByIndex(table.Type, index)
|
||||
|
||||
if typ.Kind() == reflect.Struct {
|
||||
return &structTableModel{
|
||||
db: db,
|
||||
table: table.Dialect().Tables().Get(typ),
|
||||
rel: rel,
|
||||
|
||||
root: root,
|
||||
index: index,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if typ.Kind() == reflect.Slice {
|
||||
structType := indirectType(typ.Elem())
|
||||
if structType.Kind() == reflect.Struct {
|
||||
m := sliceTableModel{
|
||||
structTableModel: structTableModel{
|
||||
db: db,
|
||||
table: table.Dialect().Tables().Get(structType),
|
||||
rel: rel,
|
||||
|
||||
root: root,
|
||||
index: index,
|
||||
},
|
||||
}
|
||||
m.init(typ)
|
||||
return &m, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("bun: NewModel(%s)", typ)
|
||||
}
|
||||
|
||||
func validMap(typ reflect.Type) error {
|
||||
if typ.Key().Kind() != reflect.String || typ.Elem().Kind() != reflect.Interface {
|
||||
return fmt.Errorf("bun: Model(unsupported %s) (expected *map[string]interface{})",
|
||||
typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
func isSingleRowModel(m Model) bool {
|
||||
switch m.(type) {
|
||||
case *mapModel,
|
||||
*structTableModel,
|
||||
*scanModel:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
@ -0,0 +1,183 @@
|
||||
package bun
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/uptrace/bun/schema"
|
||||
)
|
||||
|
||||
type mapModel struct {
|
||||
db *DB
|
||||
|
||||
dest *map[string]interface{}
|
||||
m map[string]interface{}
|
||||
|
||||
rows *sql.Rows
|
||||
columns []string
|
||||
_columnTypes []*sql.ColumnType
|
||||
scanIndex int
|
||||
}
|
||||
|
||||
var _ Model = (*mapModel)(nil)
|
||||
|
||||
func newMapModel(db *DB, dest *map[string]interface{}) *mapModel {
|
||||
m := &mapModel{
|
||||
db: db,
|
||||
dest: dest,
|
||||
}
|
||||
if dest != nil {
|
||||
m.m = *dest
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *mapModel) Value() interface{} {
|
||||
return m.dest
|
||||
}
|
||||
|
||||
func (m *mapModel) ScanRows(ctx context.Context, rows *sql.Rows) (int, error) {
|
||||
if !rows.Next() {
|
||||
return 0, rows.Err()
|
||||
}
|
||||
|
||||
columns, err := rows.Columns()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
m.rows = rows
|
||||
m.columns = columns
|
||||
dest := makeDest(m, len(columns))
|
||||
|
||||
if m.m == nil {
|
||||
m.m = make(map[string]interface{}, len(m.columns))
|
||||
}
|
||||
|
||||
m.scanIndex = 0
|
||||
if err := rows.Scan(dest...); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
*m.dest = m.m
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (m *mapModel) Scan(src interface{}) error {
|
||||
if _, ok := src.([]byte); !ok {
|
||||
return m.scanRaw(src)
|
||||
}
|
||||
|
||||
columnTypes, err := m.columnTypes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
scanType := columnTypes[m.scanIndex].ScanType()
|
||||
switch scanType.Kind() {
|
||||
case reflect.Interface:
|
||||
return m.scanRaw(src)
|
||||
case reflect.Slice:
|
||||
if scanType.Elem().Kind() == reflect.Uint8 {
|
||||
return m.scanRaw(src)
|
||||
}
|
||||
}
|
||||
|
||||
dest := reflect.New(scanType).Elem()
|
||||
if err := schema.Scanner(scanType)(dest, src); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return m.scanRaw(dest.Interface())
|
||||
}
|
||||
|
||||
func (m *mapModel) columnTypes() ([]*sql.ColumnType, error) {
|
||||
if m._columnTypes == nil {
|
||||
columnTypes, err := m.rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m._columnTypes = columnTypes
|
||||
}
|
||||
return m._columnTypes, nil
|
||||
}
|
||||
|
||||
func (m *mapModel) scanRaw(src interface{}) error {
|
||||
columnName := m.columns[m.scanIndex]
|
||||
m.scanIndex++
|
||||
m.m[columnName] = src
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mapModel) appendColumnsValues(fmter schema.Formatter, b []byte) []byte {
|
||||
keys := make([]string, 0, len(m.m))
|
||||
|
||||
for k := range m.m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
b = append(b, " ("...)
|
||||
|
||||
for i, k := range keys {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
b = fmter.AppendIdent(b, k)
|
||||
}
|
||||
|
||||
b = append(b, ") VALUES ("...)
|
||||
|
||||
isTemplate := fmter.IsNop()
|
||||
for i, k := range keys {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
if isTemplate {
|
||||
b = append(b, '?')
|
||||
} else {
|
||||
b = schema.Append(fmter, b, m.m[k])
|
||||
}
|
||||
}
|
||||
|
||||
b = append(b, ")"...)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *mapModel) appendSet(fmter schema.Formatter, b []byte) []byte {
|
||||
keys := make([]string, 0, len(m.m))
|
||||
|
||||
for k := range m.m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
isTemplate := fmter.IsNop()
|
||||
for i, k := range keys {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
|
||||
b = fmter.AppendIdent(b, k)
|
||||
b = append(b, " = "...)
|
||||
if isTemplate {
|
||||
b = append(b, '?')
|
||||
} else {
|
||||
b = schema.Append(fmter, b, m.m[k])
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func makeDest(v interface{}, n int) []interface{} {
|
||||
dest := make([]interface{}, n)
|
||||
for i := range dest {
|
||||
dest[i] = v
|
||||
}
|
||||
return dest
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue