diff --git a/const.go b/const.go
index bdceae6..eb834b5 100644
--- a/const.go
+++ b/const.go
@@ -1,6 +1,6 @@
package gojobs
const (
- Version = "1.0.89"
+ Version = "1.0.90"
SpecifyIpNull = "0.0.0.0"
)
diff --git a/create_in.go b/create_in.go
index 5a71505..446e1cd 100644
--- a/create_in.go
+++ b/create_in.go
@@ -51,7 +51,7 @@ func (c *Client) CreateInCustomId(ctx context.Context, config *ConfigCreateInCus
go func() {
_, err = c.db.mongoClient.Database(c.db.mongoDatabaseName).
Collection(jobs_mongo_model.Task{}.TableName()).
- InsertOne(&jobs_mongo_model.Task{
+ InsertOne(ctx, &jobs_mongo_model.Task{
Id: primitive.NewObjectID(),
Status: TASK_IN,
Params: config.Params,
@@ -149,7 +149,7 @@ func (c *Client) CreateInCustomIdOnly(ctx context.Context, config *ConfigCreateI
go func() {
_, err = c.db.mongoClient.Database(c.db.mongoDatabaseName).
Collection(jobs_mongo_model.Task{}.TableName()).
- InsertOne(&jobs_mongo_model.Task{
+ InsertOne(ctx, &jobs_mongo_model.Task{
Id: primitive.NewObjectID(),
Status: TASK_IN,
Params: config.Params,
@@ -245,7 +245,7 @@ func (c *Client) CreateInCustomIdMaxNumber(ctx context.Context, config *ConfigCr
go func() {
_, err = c.db.mongoClient.Database(c.db.mongoDatabaseName).
Collection(jobs_mongo_model.Task{}.TableName()).
- InsertOne(&jobs_mongo_model.Task{
+ InsertOne(ctx, &jobs_mongo_model.Task{
Id: primitive.NewObjectID(),
Status: TASK_IN,
Params: config.Params,
@@ -346,7 +346,7 @@ func (c *Client) CreateInCustomIdMaxNumberOnly(ctx context.Context, config *Conf
go func() {
_, err = c.db.mongoClient.Database(c.db.mongoDatabaseName).
Collection(jobs_mongo_model.Task{}.TableName()).
- InsertOne(&jobs_mongo_model.Task{
+ InsertOne(ctx, &jobs_mongo_model.Task{
Id: primitive.NewObjectID(),
Status: TASK_IN,
Params: config.Params,
diff --git a/create_wait.go b/create_wait.go
index 9b38e3a..45897a4 100644
--- a/create_wait.go
+++ b/create_wait.go
@@ -51,7 +51,7 @@ func (c *Client) CreateWaitCustomId(ctx context.Context, config *ConfigCreateWai
go func() {
_, err = c.db.mongoClient.Database(c.db.mongoDatabaseName).
Collection(jobs_mongo_model.Task{}.TableName()).
- InsertOne(&jobs_mongo_model.Task{
+ InsertOne(ctx, &jobs_mongo_model.Task{
Id: primitive.NewObjectID(),
Status: TASK_WAIT,
Params: config.Params,
diff --git a/go.mod b/go.mod
index c619f4f..1870947 100644
--- a/go.mod
+++ b/go.mod
@@ -6,15 +6,15 @@ require (
github.com/go-redis/redis/v9 v9.0.0-beta.2
github.com/jasonlvhit/gocron v0.0.1
github.com/robfig/cron/v3 v3.0.1
- go.dtapp.net/dorm v1.0.33
+ go.dtapp.net/dorm v1.0.36
go.dtapp.net/goarray v1.0.1
- go.dtapp.net/goip v1.0.30
- go.dtapp.net/golog v1.0.73
+ go.dtapp.net/goip v1.0.34
+ go.dtapp.net/golog v1.0.77
go.dtapp.net/gostring v1.0.10
go.dtapp.net/gotime v1.0.5
go.dtapp.net/gotrace_id v1.0.6
go.mongodb.org/mongo-driver v1.10.2
- gorm.io/gorm v1.23.8
+ gorm.io/gorm v1.23.9
)
require (
@@ -27,7 +27,7 @@ require (
github.com/gin-gonic/gin v1.8.1 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
- github.com/go-playground/validator/v10 v10.11.0 // indirect
+ github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/goccy/go-json v0.9.11 // indirect
github.com/golang/snappy v0.0.4 // indirect
@@ -43,7 +43,7 @@ require (
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.15.9 // indirect
+ github.com/klauspost/compress v1.15.10 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/lib/pq v1.10.7 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
@@ -51,6 +51,8 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.6.6 // indirect
github.com/natefinch/lumberjack v2.0.0+incompatible // indirect
+ github.com/oschwald/geoip2-golang v1.8.0 // indirect
+ github.com/oschwald/maxminddb-golang v1.10.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda // indirect
@@ -83,7 +85,7 @@ require (
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 // indirect
- golang.org/x/sys v0.0.0-20220913175220-63ea55921009 // indirect
+ golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
diff --git a/go.sum b/go.sum
index d6f5439..960813e 100644
--- a/go.sum
+++ b/go.sum
@@ -96,8 +96,8 @@ github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
-github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw=
-github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
+github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
+github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-redis/redis v6.15.5+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis/v9 v9.0.0-beta.2 h1:ZSr84TsnQyKMAg8gnV+oawuQezeJR11/09THcWCQzr4=
github.com/go-redis/redis/v9 v9.0.0-beta.2/go.mod h1:Bldcd/M/bm9HbnNPi/LUtYBSD8ttcZYBMupwMXhdU0o=
@@ -275,8 +275,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
-github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
-github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
+github.com/klauspost/compress v1.15.10 h1:Ai8UzuomSCDw90e1qNMtb15msBXsNpH6gzkkENQNcJo=
+github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -368,6 +368,10 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/oschwald/geoip2-golang v1.8.0 h1:KfjYB8ojCEn/QLqsDU0AzrJ3R5Qa9vFlx3z6SLNcKTs=
+github.com/oschwald/geoip2-golang v1.8.0/go.mod h1:R7bRvYjOeaoenAp9sKRS8GX5bJWcZ0laWO5+DauEktw=
+github.com/oschwald/maxminddb-golang v1.10.0 h1:Xp1u0ZhqkSuopaKmk1WwHtjF0H9Hd9181uj2MQ5Vndg=
+github.com/oschwald/maxminddb-golang v1.10.0/go.mod h1:Y2ELenReaLAZ0b400URyGwvYxHV1dLIxBuyOsyYjHK0=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@@ -503,14 +507,14 @@ github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
-go.dtapp.net/dorm v1.0.33 h1:QRAVEQ6Uf3WENSOrXytzzH+PjH90JySowd3jbB9PQjw=
-go.dtapp.net/dorm v1.0.33/go.mod h1:4WNSzrUGs7YIudq1cRZQNkHOlPAbG6thI3mXX1tQcYY=
+go.dtapp.net/dorm v1.0.36 h1:3kqdGhZ/oAKr8YMQo9OXAlByGal+uok6Yh33YXmdJtw=
+go.dtapp.net/dorm v1.0.36/go.mod h1:z9ksZ4Y0HHH0odjEiG57d90/ZUBM51qXEWJC8fS+dEM=
go.dtapp.net/goarray v1.0.1 h1:cHNHaJ2MFcuJPA1WKU2PM1EUZShS1vQqEH7n6YXsQVU=
go.dtapp.net/goarray v1.0.1/go.mod h1:/MPhlFCAhQyeNV1M0v1PAOOX33Sg705fVjUgMO12IBQ=
-go.dtapp.net/goip v1.0.30 h1:/wP2ewSNWLzG2Oh2VsTfQCv/2rw1KKi9XerD4rQaMLM=
-go.dtapp.net/goip v1.0.30/go.mod h1:9l8e/slVanziGXfvrUwOMx6028EV/lzN5vVpixmtUYY=
-go.dtapp.net/golog v1.0.73 h1:1j7EU1iIM8b0UTMScxqUHZsgYFjKRy9SG/ArlpdcnfE=
-go.dtapp.net/golog v1.0.73/go.mod h1:I1WfgHWcEikqxjhMdoyH+/VVi/9KmnZy11NnqpsugqY=
+go.dtapp.net/goip v1.0.34 h1:aW2CuPpQwcDOJiyx/gHbvrha3/x+poFRpDxsLtO4EVw=
+go.dtapp.net/goip v1.0.34/go.mod h1:EctL6B8ue/kZKPr+kKZPU6YTTpNhihane9BHHffwo6Q=
+go.dtapp.net/golog v1.0.77 h1:FexLMY3r9niMKea7zZ1FUNpoOoG4HYjcIr2JjF4+4Es=
+go.dtapp.net/golog v1.0.77/go.mod h1:1/HpH2xNbzicKf4gOO6jTk3rJoxU106KGzVyvEiQFjg=
go.dtapp.net/gorandom v1.0.1 h1:IWfMClh1ECPvyUjlqD7MwLq4mZdUusD1qAwAdsvEJBs=
go.dtapp.net/gorandom v1.0.1/go.mod h1:ZPdgalKpvFV/ATQqR0k4ns/F/IpITAZpx6WkWirr5Y8=
go.dtapp.net/gorequest v1.0.31 h1:r/OoU5Y00TbJjkQtpvwjsb/pllqO0UQQjFRY1veZYZc=
@@ -653,8 +657,8 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220913175220-63ea55921009 h1:PuvuRMeLWqsf/ZdT1UUZz0syhioyv1mzuFZsXs4fvhw=
-golang.org/x/sys v0.0.0-20220913175220-63ea55921009/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 h1:ohgcoMbSofXygzo6AD2I1kz3BFmW1QArPYTtwEM3UXc=
+golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -742,8 +746,9 @@ gorm.io/driver/mysql v1.3.6/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10
gorm.io/driver/postgres v1.3.9 h1:lWGiVt5CijhQAg0PWB7Od1RNcBw/jS4d2cAScBcSDXg=
gorm.io/driver/postgres v1.3.9/go.mod h1:qw/FeqjxmYqW5dBcYNBsnhQULIApQdk7YuuDPktVi1U=
gorm.io/gorm v1.23.7/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
-gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE=
gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
+gorm.io/gorm v1.23.9 h1:NSHG021i+MCznokeXR3udGaNyFyBQJW8MbjrJMVCfGw=
+gorm.io/gorm v1.23.9/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/jobs.go b/jobs.go
index 350121a..07804ee 100644
--- a/jobs.go
+++ b/jobs.go
@@ -1,5 +1,10 @@
package gojobs
+import (
+ "context"
+ "go.dtapp.net/gojobs/jobs_gorm_model"
+)
+
const (
TASK_IN = "IN" // 任务运行
TASK_SUCCESS = "SUCCESS" // 任务完成
@@ -11,15 +16,15 @@ const (
// Cron
type jobs interface {
// Run 运行
- Run(info interface{}, status int, desc string)
- // RunAddLog 任务执行日志
- RunAddLog(id uint, runId string)
+ Run(ctx context.Context, info jobs_gorm_model.Task, status int, result string)
// CreateInCustomId 创建正在运行任务
- CreateInCustomId()
+ CreateInCustomId(ctx context.Context, config *ConfigCreateInCustomId) error
// CreateInCustomIdOnly 创建正在运行唯一任务
- CreateInCustomIdOnly()
+ CreateInCustomIdOnly(ctx context.Context, config *ConfigCreateInCustomIdOnly) error
// CreateInCustomIdMaxNumber 创建正在运行任务并限制数量
- CreateInCustomIdMaxNumber()
+ CreateInCustomIdMaxNumber(ctx context.Context, config *ConfigCreateInCustomIdMaxNumber) error
// CreateInCustomIdMaxNumberOnly 创建正在运行唯一任务并限制数量
- CreateInCustomIdMaxNumberOnly()
+ CreateInCustomIdMaxNumberOnly(ctx context.Context, config *ConfigCreateInCustomIdMaxNumberOnly) error
+ // CreateWaitCustomId 创建正在运行任务
+ CreateWaitCustomId(ctx context.Context, config *ConfigCreateWaitCustomId) error
}
diff --git a/model.go b/model.go
index ce4d4f6..aad5501 100644
--- a/model.go
+++ b/model.go
@@ -29,19 +29,36 @@ func (c *Client) mongoCreateCollectionTask(ctx context.Context) {
// 创建索引
func (c *Client) mongoCreateIndexesTask(ctx context.Context) {
- c.zapLog.WithTraceId(ctx).Sugar().Info(c.db.mongoClient.Db.Database(c.db.mongoDatabaseName).Collection(jobs_mongo_model.Task{}.TableName()).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"status", 1},
- }}))
- c.zapLog.WithTraceId(ctx).Sugar().Info(c.db.mongoClient.Db.Database(c.db.mongoDatabaseName).Collection(jobs_mongo_model.Task{}.TableName()).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"frequency", 1},
- }}))
- c.zapLog.WithTraceId(ctx).Sugar().Info(c.db.mongoClient.Db.Database(c.db.mongoDatabaseName).Collection(jobs_mongo_model.Task{}.TableName()).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"custom_id", 1},
- }}))
- c.zapLog.WithTraceId(ctx).Sugar().Info(c.db.mongoClient.Db.Database(c.db.mongoDatabaseName).Collection(jobs_mongo_model.Task{}.TableName()).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"type", 1},
- }}))
- c.zapLog.WithTraceId(ctx).Sugar().Info(c.db.mongoClient.Db.Database(c.db.mongoDatabaseName).Collection(jobs_mongo_model.Task{}.TableName()).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"specify_ip", 1},
- }}))
+ indexes, err := c.db.mongoClient.Database(c.db.mongoDatabaseName).Collection(jobs_mongo_model.Task{}.TableName()).CreateManyIndexes(ctx, []mongo.IndexModel{
+ {
+ Keys: bson.D{{
+ Key: "status",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "frequency",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "custom_id",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "type",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "specify_ip",
+ Value: 1,
+ }},
+ },
+ })
+ if err != nil {
+ c.zapLog.WithTraceId(ctx).Sugar().Errorf("创建索引:%s", err)
+ }
+ c.zapLog.WithTraceId(ctx).Sugar().Infof("创建索引:%s", indexes)
}
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
index 8b730b6..9d0a79e 100644
--- a/vendor/github.com/go-playground/validator/v10/README.md
+++ b/vendor/github.com/go-playground/validator/v10/README.md
@@ -1,7 +1,7 @@
Package validator
=================
[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-![Project status](https://img.shields.io/badge/version-10.11.0-green.svg)
+![Project status](https://img.shields.io/badge/version-10.11.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
index f2f0939..c9b1db4 100644
--- a/vendor/github.com/go-playground/validator/v10/baked_in.go
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -1484,10 +1484,15 @@ func isAlphaUnicode(fl FieldLevel) bool {
return alphaUnicodeRegex.MatchString(fl.Field().String())
}
-// isBoolean is the validation function for validating if the current field's value can be safely converted to a boolean.
+// isBoolean is the validation function for validating if the current field's value is a valid boolean value or can be safely converted to a boolean value.
func isBoolean(fl FieldLevel) bool {
- _, err := strconv.ParseBool(fl.Field().String())
- return err == nil
+ switch fl.Field().Kind() {
+ case reflect.Bool:
+ return true
+ default:
+ _, err := strconv.ParseBool(fl.Field().String())
+ return err == nil
+ }
}
// isDefault is the opposite of required aka hasValue
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index ad5c63a..2d6b010 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -17,6 +17,12 @@ This package provides various compression algorithms.
# changelog
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
+
* July 13, 2022 (v1.15.8)
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index c0c48bd..42a237e 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 9f3e9f7..ba7e8e6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -14,12 +14,14 @@ import (
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
+//
//go:noescape
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
+//
//go:noescape
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
@@ -145,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
+//
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
+//
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index dd1a5ae..8d2187a 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
//go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 4f6f37c..908c17d 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba6..298c4f8 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
@@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
// src[i:i+k-j] and src[j:k] have the same contents.
//
// It assumes that:
+//
// 0 <= i && i < j && j <= len(src)
func extendMatch(src []byte, i, j int) int {
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
@@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index beb7fa8..65b38ab 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
## Installation
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7eed729..f52d1ae 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -10,7 +10,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -651,7 +650,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
- ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
+ os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 2ad0207..176788f 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@ package zstd
import (
"fmt"
"io"
- "io/ioutil"
)
type byteBuffer interface {
@@ -124,7 +123,7 @@ func (r *readerWrapper) readByte() (byte, error) {
}
func (r *readerWrapper) skipN(n int64) error {
- n2, err := io.CopyN(ioutil.Discard, r.r, n)
+ n2, err := io.CopyN(io.Discard, r.r, n)
if n2 != n {
err = io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index d212f47..6104eb7 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -312,6 +312,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
+ initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
@@ -354,7 +355,16 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize != fcsUnknown {
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
return dst, ErrDecoderSizeExceeded
}
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +374,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
}
}
- if cap(dst) == 0 {
+ if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
@@ -382,6 +392,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
if err != nil {
return dst, err
}
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
@@ -852,6 +865,10 @@ decodeStream:
}
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
err = ErrDecoderSizeExceeded
}
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index c70e6fa..666c271 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -20,6 +20,7 @@ type decoderOptions struct {
maxWindowSize uint64
dicts []dict
ignoreChecksum bool
+ limitToCap bool
}
func (o *decoderOptions) setDefault() {
@@ -114,6 +115,17 @@ func WithDecoderMaxWindow(size uint64) DOption {
}
}
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index c769f69..d70e3fd 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -416,15 +416,23 @@ encodeLoop:
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
- cv := load3232(src, s)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
- coffsetL := candidateL.offset - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
@@ -434,12 +442,13 @@ encodeLoop:
// Check prev long...
if true {
- coffsetL = candidateL.prev - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 7ff0c64..1f4a9a2 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f51ab52..181edc0 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
@@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
continue
}
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 9568a4b..1559a20 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -353,12 +353,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
// Store input length, so we only check new data.
crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
if d.FrameContentSize != fcsUnknown {
- d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
return dst, ErrDecoderSizeExceeded
}
- if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)-len(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
@@ -378,7 +389,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if err != nil {
break
}
- if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
err = ErrDecoderSizeExceeded
break
}
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
index da32b44..bcde398 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
TEXT ·buildDtable_asm(SB), $0-24
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 7598c10..1c704d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
@@ -201,20 +205,24 @@ const errorNotEnoughSpace = 5
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
@@ -308,10 +316,12 @@ type executeAsmContext struct {
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
+//
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 27e7677..52e5703 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
diff --git a/vendor/github.com/oschwald/geoip2-golang/.gitignore b/vendor/github.com/oschwald/geoip2-golang/.gitignore
new file mode 100644
index 0000000..dca0694
--- /dev/null
+++ b/vendor/github.com/oschwald/geoip2-golang/.gitignore
@@ -0,0 +1,3 @@
+.vscode
+*.out
+*.test
diff --git a/vendor/github.com/oschwald/geoip2-golang/.gitmodules b/vendor/github.com/oschwald/geoip2-golang/.gitmodules
new file mode 100644
index 0000000..400b2ab
--- /dev/null
+++ b/vendor/github.com/oschwald/geoip2-golang/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "test-data"]
+ path = test-data
+ url = https://github.com/maxmind/MaxMind-DB.git
diff --git a/vendor/github.com/oschwald/geoip2-golang/.golangci.toml b/vendor/github.com/oschwald/geoip2-golang/.golangci.toml
new file mode 100644
index 0000000..b4f7e6a
--- /dev/null
+++ b/vendor/github.com/oschwald/geoip2-golang/.golangci.toml
@@ -0,0 +1,472 @@
+[run]
+ deadline = "10m"
+
+ tests = true
+
+[linters]
+ disable-all = true
+ enable = [
+ "asciicheck",
+ "bidichk",
+ "bodyclose",
+ "containedctx",
+ "contextcheck",
+ "deadcode",
+ "depguard",
+ "durationcheck",
+ "errcheck",
+ "errchkjson",
+ "errname",
+ "errorlint",
+ "exportloopref",
+ "forbidigo",
+ #"forcetypeassert",
+ "goconst",
+ "gocyclo",
+ "gocritic",
+ "godot",
+ "gofumpt",
+ "gomodguard",
+ "gosec",
+ "gosimple",
+ "govet",
+ "grouper",
+ "ineffassign",
+ "lll",
+ "makezero",
+ "maintidx",
+ "misspell",
+ "nakedret",
+ "nilerr",
+ "noctx",
+ "nolintlint",
+ "nosprintfhostport",
+ "predeclared",
+ "revive",
+ "rowserrcheck",
+ "sqlclosecheck",
+ "staticcheck",
+ "structcheck",
+ "stylecheck",
+ "tenv",
+ "tparallel",
+ "typecheck",
+ "unconvert",
+ "unparam",
+ "unused",
+ "varcheck",
+ "vetshadow",
+ "wastedassign",
+ ]
+
+# Please note that we only use depguard for stdlib as gomodguard only
+# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
+[linters-settings.depguard]
+ list-type = "blacklist"
+ include-go-root = true
+ packages = [
+ # ioutil is deprecated. The functions have been moved elsewhere:
+ # https://golang.org/doc/go1.16#ioutil
+ "io/ioutil",
+ ]
+
+[linters-settings.errcheck]
+ # Don't allow setting of error to the blank identifier. If there is a legtimate
+ # reason, there should be a nolint with an explanation.
+ check-blank = true
+
+ exclude-functions = [
+ # If we are rolling back a transaction, we are often already in an error
+ # state.
+ '(*database/sql.Tx).Rollback',
+
+ # It is reasonable to ignore errors if Cleanup fails in most cases.
+ '(*github.com/google/renameio/v2.PendingFile).Cleanup',
+
+ # We often don't care if removing a file failed (e.g., it doesn't exist)
+ 'os.Remove',
+ 'os.RemoveAll',
+ ]
+
+ # Ignoring Close so that we don't have to have a bunch of
+ # `defer func() { _ = r.Close() }()` constructs when we
+ # don't actually care about the error.
+ ignore = "Close,fmt:.*"
+
+[linters-settings.errorlint]
+ errorf = true
+ asserts = true
+ comparison = true
+
+[linters-settings.exhaustive]
+ default-signifies-exhaustive = true
+
+[linters-settings.forbidigo]
+ # Forbid the following identifiers
+ forbid = [
+ "^minFraud*",
+ "^maxMind*",
+ ]
+
+[linters-settings.gocritic]
+ enabled-checks = [
+ "appendAssign",
+ "appendCombine",
+ "argOrder",
+ "assignOp",
+ "badCall",
+ "badCond",
+ "badLock",
+ "badRegexp",
+ "badSorting",
+ "boolExprSimplify",
+ "builtinShadow",
+ "builtinShadowDecl",
+ "captLocal",
+ "caseOrder",
+ "codegenComment",
+ "commentedOutCode",
+ "commentedOutImport",
+ "commentFormatting",
+ "defaultCaseOrder",
+ # Revive's defer rule already captures this. This caught no extra cases.
+ # "deferInLoop",
+ "deferUnlambda",
+ "deprecatedComment",
+ "docStub",
+ "dupArg",
+ "dupBranchBody",
+ "dupCase",
+ "dupImport",
+ "dupSubExpr",
+ "dynamicFmtString",
+ "elseif",
+ "emptyDecl",
+ "emptyFallthrough",
+ "emptyStringTest",
+ "equalFold",
+ "evalOrder",
+ "exitAfterDefer",
+ "exposedSyncMutex",
+ "externalErrorReassign",
+ # Given that all of our code runs on Linux and the / separate should
+ # work fine, this seems less important.
+ # "filepathJoin",
+ "flagDeref",
+ "flagName",
+ "hexLiteral",
+ "ifElseChain",
+ "importShadow",
+ "indexAlloc",
+ "initClause",
+ "ioutilDeprecated",
+ "mapKey",
+ "methodExprCall",
+ "nestingReduce",
+ "newDeref",
+ "nilValReturn",
+ "octalLiteral",
+ "offBy1",
+ "paramTypeCombine",
+ "preferDecodeRune",
+ "preferFilepathJoin",
+ "preferFprint",
+ "preferStringWriter",
+ "preferWriteByte",
+ "ptrToRefParam",
+ "rangeExprCopy",
+ "rangeValCopy",
+ "redundantSprint",
+ "regexpMust",
+ "regexpPattern",
+ # This might be good, but I don't think we want to encourage
+ # significant changes to regexes as we port stuff from Perl.
+ # "regexpSimplify",
+ "ruleguard",
+ "singleCaseSwitch",
+ "sliceClear",
+ "sloppyLen",
+ # This seems like it might also be good, but a lot of existing code
+ # fails.
+ # "sloppyReassign",
+ "returnAfterHttpError",
+ "sloppyTypeAssert",
+ "sortSlice",
+ "sprintfQuotedString",
+ "sqlQuery",
+ "stringsCompare",
+ "stringXbytes",
+ "switchTrue",
+ "syncMapLoadAndDelete",
+ "timeExprSimplify",
+ "todoCommentWithoutDetail",
+ "tooManyResultsChecker",
+ "truncateCmp",
+ "typeAssertChain",
+ "typeDefFirst",
+ "typeSwitchVar",
+ "typeUnparen",
+ "underef",
+ "unlabelStmt",
+ "unlambda",
+ # I am not sure we would want this linter and a lot of existing
+ # code fails.
+ # "unnamedResult",
+ "unnecessaryBlock",
+ "unnecessaryDefer",
+ "unslice",
+ "valSwap",
+ "weakCond",
+ "wrapperFunc",
+ "yodaStyleExpr",
+ # This requires explanations for "nolint" directives. This would be
+ # nice for gosec ones, but I am not sure we want it generally unless
+ # we can get the false positive rate lower.
+ # "whyNoLint"
+ ]
+
+[linters-settings.gofumpt]
+ extra-rules = true
+ lang-version = "1.18"
+
+[linters-settings.govet]
+ "enable-all" = true
+
+[linters-settings.lll]
+ line-length = 120
+ tab-width = 4
+
+[linters-settings.nolintlint]
+ allow-leading-space = false
+ allow-unused = false
+ allow-no-explanation = ["lll", "misspell"]
+ require-explanation = true
+ require-specific = true
+
+[linters-settings.revive]
+ ignore-generated-header = true
+ severity = "warning"
+
+ # This might be nice but it is so common that it is hard
+ # to enable.
+ # [[linters-settings.revive.rules]]
+ # name = "add-constant"
+
+ # [[linters-settings.revive.rules]]
+ # name = "argument-limit"
+
+ [[linters-settings.revive.rules]]
+ name = "atomic"
+
+ [[linters-settings.revive.rules]]
+ name = "bare-return"
+
+ [[linters-settings.revive.rules]]
+ name = "blank-imports"
+
+ [[linters-settings.revive.rules]]
+ name = "bool-literal-in-expr"
+
+ [[linters-settings.revive.rules]]
+ name = "call-to-gc"
+
+ # [[linters-settings.revive.rules]]
+ # name = "cognitive-complexity"
+
+ # Probably a good rule, but we have a lot of names that
+ # only have case differences.
+ # [[linters-settings.revive.rules]]
+ # name = "confusing-naming"
+
+ # [[linters-settings.revive.rules]]
+ # name = "confusing-results"
+
+ [[linters-settings.revive.rules]]
+ name = "constant-logical-expr"
+
+ [[linters-settings.revive.rules]]
+ name = "context-as-argument"
+
+ [[linters-settings.revive.rules]]
+ name = "context-keys-type"
+
+ # [[linters-settings.revive.rules]]
+ # name = "cyclomatic"
+
+ # [[linters-settings.revive.rules]]
+ # name = "deep-exit"
+
+ [[linters-settings.revive.rules]]
+ name = "defer"
+
+ [[linters-settings.revive.rules]]
+ name = "dot-imports"
+
+ [[linters-settings.revive.rules]]
+ name = "duplicated-imports"
+
+ [[linters-settings.revive.rules]]
+ name = "early-return"
+
+ [[linters-settings.revive.rules]]
+ name = "empty-block"
+
+ [[linters-settings.revive.rules]]
+ name = "empty-lines"
+
+ [[linters-settings.revive.rules]]
+ name = "errorf"
+
+ [[linters-settings.revive.rules]]
+ name = "error-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "error-return"
+
+ [[linters-settings.revive.rules]]
+ name = "error-strings"
+
+ [[linters-settings.revive.rules]]
+ name = "exported"
+
+ # [[linters-settings.revive.rules]]
+ # name = "file-header"
+
+ # We have a lot of flag parameters. This linter probably makes
+ # a good point, but we would need some cleanup or a lot of nolints.
+ # [[linters-settings.revive.rules]]
+ # name = "flag-parameter"
+
+ # [[linters-settings.revive.rules]]
+ # name = "function-result-limit"
+
+ [[linters-settings.revive.rules]]
+ name = "get-return"
+
+ [[linters-settings.revive.rules]]
+ name = "identical-branches"
+
+ [[linters-settings.revive.rules]]
+ name = "if-return"
+
+ [[linters-settings.revive.rules]]
+ name = "imports-blacklist"
+
+ [[linters-settings.revive.rules]]
+ name = "import-shadowing"
+
+ [[linters-settings.revive.rules]]
+ name = "increment-decrement"
+
+ [[linters-settings.revive.rules]]
+ name = "indent-error-flow"
+
+ # [[linters-settings.revive.rules]]
+ # name = "line-length-limit"
+
+ # [[linters-settings.revive.rules]]
+ # name = "max-public-structs"
+
+ [[linters-settings.revive.rules]]
+ name = "modifies-parameter"
+
+ [[linters-settings.revive.rules]]
+ name = "modifies-value-receiver"
+
+ # We frequently use nested structs, particularly in tests.
+ # [[linters-settings.revive.rules]]
+ # name = "nested-structs"
+
+ [[linters-settings.revive.rules]]
+ name = "optimize-operands-order"
+
+ [[linters-settings.revive.rules]]
+ name = "package-comments"
+
+ [[linters-settings.revive.rules]]
+ name = "range"
+
+ [[linters-settings.revive.rules]]
+ name = "range-val-address"
+
+ [[linters-settings.revive.rules]]
+ name = "range-val-in-closure"
+
+ [[linters-settings.revive.rules]]
+ name = "receiver-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "redefines-builtin-id"
+
+ [[linters-settings.revive.rules]]
+ name = "string-of-int"
+
+ [[linters-settings.revive.rules]]
+ name = "struct-tag"
+
+ [[linters-settings.revive.rules]]
+ name = "superfluous-else"
+
+ [[linters-settings.revive.rules]]
+ name = "time-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "unconditional-recursion"
+
+ [[linters-settings.revive.rules]]
+ name = "unexported-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "unexported-return"
+
+ # This is covered elsewhere and we want to ignore some
+ # functions such as fmt.Fprintf.
+ # [[linters-settings.revive.rules]]
+ # name = "unhandled-error"
+
+ [[linters-settings.revive.rules]]
+ name = "unnecessary-stmt"
+
+ [[linters-settings.revive.rules]]
+ name = "unreachable-code"
+
+ [[linters-settings.revive.rules]]
+ name = "unused-parameter"
+
+ # We generally have unused receivers in tests for meeting the
+ # requirements of an interface.
+ # [[linters-settings.revive.rules]]
+ # name = "unused-receiver"
+
+ # This probably makes sense after we upgrade to 1.18
+ # [[linters-settings.revive.rules]]
+ # name = "use-any"
+
+ [[linters-settings.revive.rules]]
+ name = "useless-break"
+
+ [[linters-settings.revive.rules]]
+ name = "var-declaration"
+
+ [[linters-settings.revive.rules]]
+ name = "var-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "waitgroup-by-value"
+
+[linters-settings.unparam]
+ check-exported = true
+
+[[issues.exclude-rules]]
+ linters = [
+ "govet"
+ ]
+ # we want to enable almost all govet rules. It is easier to just filter out
+ # the ones we don't want:
+ #
+ # * fieldalignment - way too noisy. Although it is very useful in particular
+ # cases where we are trying to use as little memory as possible, having
+ # it go off on every struct isn't helpful.
+ # * shadow - although often useful, it complains about _many_ err
+ # shadowing assignments and some others where shadowing is clear.
+ text = "^(fieldalignment|shadow)"
diff --git a/vendor/github.com/oschwald/geoip2-golang/LICENSE b/vendor/github.com/oschwald/geoip2-golang/LICENSE
new file mode 100644
index 0000000..2969677
--- /dev/null
+++ b/vendor/github.com/oschwald/geoip2-golang/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2015, Gregory J. Oschwald
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/oschwald/geoip2-golang/README.md b/vendor/github.com/oschwald/geoip2-golang/README.md
new file mode 100644
index 0000000..72378f0
--- /dev/null
+++ b/vendor/github.com/oschwald/geoip2-golang/README.md
@@ -0,0 +1,93 @@
+# GeoIP2 Reader for Go #
+
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/oschwald/geoip2-golang)](https://pkg.go.dev/github.com/oschwald/geoip2-golang)
+
+This library reads MaxMind [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/)
+and [GeoIP2](http://www.maxmind.com/en/geolocation_landing) databases.
+
+This library is built using
+[the Go maxminddb reader](https://github.com/oschwald/maxminddb-golang).
+All data for the database record is decoded using this library. If you only
+need several fields, you may get superior performance by using maxminddb's
+`Lookup` directly with a result struct that only contains the required fields.
+(See [example_test.go](https://github.com/oschwald/maxminddb-golang/blob/main/example_test.go)
+in the maxminddb repository for an example of this.)
+
+## Installation ##
+
+```
+go get github.com/oschwald/geoip2-golang
+```
+
+## Usage ##
+
+[See GoDoc](http://godoc.org/github.com/oschwald/geoip2-golang) for
+documentation and examples.
+
+## Example ##
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net"
+
+ "github.com/oschwald/geoip2-golang"
+)
+
+func main() {
+ db, err := geoip2.Open("GeoIP2-City.mmdb")
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+ // If you are using strings that may be invalid, check that ip is not nil
+ ip := net.ParseIP("81.2.69.142")
+ record, err := db.City(ip)
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Printf("Portuguese (BR) city name: %v\n", record.City.Names["pt-BR"])
+ if len(record.Subdivisions) > 0 {
+ fmt.Printf("English subdivision name: %v\n", record.Subdivisions[0].Names["en"])
+ }
+ fmt.Printf("Russian country name: %v\n", record.Country.Names["ru"])
+ fmt.Printf("ISO country code: %v\n", record.Country.IsoCode)
+ fmt.Printf("Time zone: %v\n", record.Location.TimeZone)
+ fmt.Printf("Coordinates: %v, %v\n", record.Location.Latitude, record.Location.Longitude)
+ // Output:
+ // Portuguese (BR) city name: Londres
+ // English subdivision name: England
+ // Russian country name: Великобритания
+ // ISO country code: GB
+ // Time zone: Europe/London
+ // Coordinates: 51.5142, -0.0931
+}
+
+```
+
+## Testing ##
+
+Make sure you checked out test data submodule:
+
+```
+git submodule init
+git submodule update
+```
+
+Execute test suite:
+
+```
+go test
+```
+
+## Contributing ##
+
+Contributions welcome! Please fork the repository and open a pull request
+with your changes.
+
+## License ##
+
+This is free software, licensed under the ISC license.
diff --git a/vendor/github.com/oschwald/geoip2-golang/reader.go b/vendor/github.com/oschwald/geoip2-golang/reader.go
new file mode 100644
index 0000000..f8439ec
--- /dev/null
+++ b/vendor/github.com/oschwald/geoip2-golang/reader.go
@@ -0,0 +1,418 @@
+// Package geoip2 provides an easy-to-use API for the MaxMind GeoIP2 and
+// GeoLite2 databases; this package does not support GeoIP Legacy databases.
+//
+// The structs provided by this package match the internal structure of
+// the data in the MaxMind databases.
+//
+// See github.com/oschwald/maxminddb-golang for more advanced used cases.
+package geoip2
+
+import (
+ "fmt"
+ "net"
+
+ "github.com/oschwald/maxminddb-golang"
+)
+
+// The Enterprise struct corresponds to the data in the GeoIP2 Enterprise
+// database.
+type Enterprise struct {
+ City struct {
+ Confidence uint8 `maxminddb:"confidence"`
+ GeoNameID uint `maxminddb:"geoname_id"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"city"`
+ Continent struct {
+ Code string `maxminddb:"code"`
+ GeoNameID uint `maxminddb:"geoname_id"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"continent"`
+ Country struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ Confidence uint8 `maxminddb:"confidence"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ } `maxminddb:"country"`
+ Location struct {
+ AccuracyRadius uint16 `maxminddb:"accuracy_radius"`
+ Latitude float64 `maxminddb:"latitude"`
+ Longitude float64 `maxminddb:"longitude"`
+ MetroCode uint `maxminddb:"metro_code"`
+ TimeZone string `maxminddb:"time_zone"`
+ } `maxminddb:"location"`
+ Postal struct {
+ Code string `maxminddb:"code"`
+ Confidence uint8 `maxminddb:"confidence"`
+ } `maxminddb:"postal"`
+ RegisteredCountry struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ Confidence uint8 `maxminddb:"confidence"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ } `maxminddb:"registered_country"`
+ RepresentedCountry struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ Type string `maxminddb:"type"`
+ } `maxminddb:"represented_country"`
+ Subdivisions []struct {
+ Confidence uint8 `maxminddb:"confidence"`
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"subdivisions"`
+ Traits struct {
+ AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
+ AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
+ ConnectionType string `maxminddb:"connection_type"`
+ Domain string `maxminddb:"domain"`
+ IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
+ IsLegitimateProxy bool `maxminddb:"is_legitimate_proxy"`
+ IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
+ ISP string `maxminddb:"isp"`
+ MobileCountryCode string `maxminddb:"mobile_country_code"`
+ MobileNetworkCode string `maxminddb:"mobile_network_code"`
+ Organization string `maxminddb:"organization"`
+ StaticIPScore float64 `maxminddb:"static_ip_score"`
+ UserType string `maxminddb:"user_type"`
+ } `maxminddb:"traits"`
+}
+
+// The City struct corresponds to the data in the GeoIP2/GeoLite2 City
+// databases.
+type City struct {
+ City struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"city"`
+ Continent struct {
+ Code string `maxminddb:"code"`
+ GeoNameID uint `maxminddb:"geoname_id"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"continent"`
+ Country struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"country"`
+ Location struct {
+ AccuracyRadius uint16 `maxminddb:"accuracy_radius"`
+ Latitude float64 `maxminddb:"latitude"`
+ Longitude float64 `maxminddb:"longitude"`
+ MetroCode uint `maxminddb:"metro_code"`
+ TimeZone string `maxminddb:"time_zone"`
+ } `maxminddb:"location"`
+ Postal struct {
+ Code string `maxminddb:"code"`
+ } `maxminddb:"postal"`
+ RegisteredCountry struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"registered_country"`
+ RepresentedCountry struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ Type string `maxminddb:"type"`
+ } `maxminddb:"represented_country"`
+ Subdivisions []struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"subdivisions"`
+ Traits struct {
+ IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
+ IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
+ } `maxminddb:"traits"`
+}
+
+// The Country struct corresponds to the data in the GeoIP2/GeoLite2
+// Country databases.
+type Country struct {
+ Continent struct {
+ Code string `maxminddb:"code"`
+ GeoNameID uint `maxminddb:"geoname_id"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"continent"`
+ Country struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"country"`
+ RegisteredCountry struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ } `maxminddb:"registered_country"`
+ RepresentedCountry struct {
+ GeoNameID uint `maxminddb:"geoname_id"`
+ IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
+ IsoCode string `maxminddb:"iso_code"`
+ Names map[string]string `maxminddb:"names"`
+ Type string `maxminddb:"type"`
+ } `maxminddb:"represented_country"`
+ Traits struct {
+ IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
+ IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
+ } `maxminddb:"traits"`
+}
+
+// The AnonymousIP struct corresponds to the data in the GeoIP2
+// Anonymous IP database.
+type AnonymousIP struct {
+ IsAnonymous bool `maxminddb:"is_anonymous"`
+ IsAnonymousVPN bool `maxminddb:"is_anonymous_vpn"`
+ IsHostingProvider bool `maxminddb:"is_hosting_provider"`
+ IsPublicProxy bool `maxminddb:"is_public_proxy"`
+ IsResidentialProxy bool `maxminddb:"is_residential_proxy"`
+ IsTorExitNode bool `maxminddb:"is_tor_exit_node"`
+}
+
+// The ASN struct corresponds to the data in the GeoLite2 ASN database.
+type ASN struct {
+ AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
+ AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
+}
+
+// The ConnectionType struct corresponds to the data in the GeoIP2
+// Connection-Type database.
+type ConnectionType struct {
+ ConnectionType string `maxminddb:"connection_type"`
+}
+
+// The Domain struct corresponds to the data in the GeoIP2 Domain database.
+type Domain struct {
+ Domain string `maxminddb:"domain"`
+}
+
+// The ISP struct corresponds to the data in the GeoIP2 ISP database.
+type ISP struct {
+ AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
+ AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
+ ISP string `maxminddb:"isp"`
+ MobileCountryCode string `maxminddb:"mobile_country_code"`
+ MobileNetworkCode string `maxminddb:"mobile_network_code"`
+ Organization string `maxminddb:"organization"`
+}
+
+type databaseType int
+
+const (
+ isAnonymousIP = 1 << iota
+ isASN
+ isCity
+ isConnectionType
+ isCountry
+ isDomain
+ isEnterprise
+ isISP
+)
+
+// Reader holds the maxminddb.Reader struct. It can be created using the
+// Open and FromBytes functions.
+type Reader struct {
+ mmdbReader *maxminddb.Reader
+ databaseType databaseType
+}
+
+// InvalidMethodError is returned when a lookup method is called on a
+// database that it does not support. For instance, calling the ISP method
+// on a City database.
+type InvalidMethodError struct {
+ Method string
+ DatabaseType string
+}
+
+func (e InvalidMethodError) Error() string {
+ return fmt.Sprintf(`geoip2: the %s method does not support the %s database`,
+ e.Method, e.DatabaseType)
+}
+
+// UnknownDatabaseTypeError is returned when an unknown database type is
+// opened.
+type UnknownDatabaseTypeError struct {
+ DatabaseType string
+}
+
+func (e UnknownDatabaseTypeError) Error() string {
+ return fmt.Sprintf(`geoip2: reader does not support the %q database type`,
+ e.DatabaseType)
+}
+
+// Open takes a string path to a file and returns a Reader struct or an error.
+// The database file is opened using a memory map. Use the Close method on the
+// Reader object to return the resources to the system.
+func Open(file string) (*Reader, error) {
+ reader, err := maxminddb.Open(file)
+ if err != nil {
+ return nil, err
+ }
+ dbType, err := getDBType(reader)
+ return &Reader{reader, dbType}, err
+}
+
+// FromBytes takes a byte slice corresponding to a GeoIP2/GeoLite2 database
+// file and returns a Reader struct or an error. Note that the byte slice is
+// used directly; any modification of it after opening the database will result
+// in errors while reading from the database.
+func FromBytes(bytes []byte) (*Reader, error) {
+ reader, err := maxminddb.FromBytes(bytes)
+ if err != nil {
+ return nil, err
+ }
+ dbType, err := getDBType(reader)
+ return &Reader{reader, dbType}, err
+}
+
+func getDBType(reader *maxminddb.Reader) (databaseType, error) {
+ switch reader.Metadata.DatabaseType {
+ case "GeoIP2-Anonymous-IP":
+ return isAnonymousIP, nil
+ case "DBIP-ASN-Lite (compat=GeoLite2-ASN)",
+ "GeoLite2-ASN":
+ return isASN, nil
+ // We allow City lookups on Country for back compat
+ case "DBIP-City-Lite",
+ "DBIP-Country-Lite",
+ "DBIP-Country",
+ "DBIP-Location (compat=City)",
+ "GeoLite2-City",
+ "GeoIP2-City",
+ "GeoIP2-City-Africa",
+ "GeoIP2-City-Asia-Pacific",
+ "GeoIP2-City-Europe",
+ "GeoIP2-City-North-America",
+ "GeoIP2-City-South-America",
+ "GeoIP2-Precision-City",
+ "GeoLite2-Country",
+ "GeoIP2-Country":
+ return isCity | isCountry, nil
+ case "GeoIP2-Connection-Type":
+ return isConnectionType, nil
+ case "GeoIP2-Domain":
+ return isDomain, nil
+ case "DBIP-ISP (compat=Enterprise)",
+ "DBIP-Location-ISP (compat=Enterprise)",
+ "GeoIP2-Enterprise":
+ return isEnterprise | isCity | isCountry, nil
+ case "GeoIP2-ISP",
+ "GeoIP2-Precision-ISP":
+ return isISP | isASN, nil
+ default:
+ return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType}
+ }
+}
+
+// Enterprise takes an IP address as a net.IP struct and returns an Enterprise
+// struct and/or an error. This is intended to be used with the GeoIP2
+// Enterprise database.
+func (r *Reader) Enterprise(ipAddress net.IP) (*Enterprise, error) {
+ if isEnterprise&r.databaseType == 0 {
+ return nil, InvalidMethodError{"Enterprise", r.Metadata().DatabaseType}
+ }
+ var enterprise Enterprise
+ err := r.mmdbReader.Lookup(ipAddress, &enterprise)
+ return &enterprise, err
+}
+
+// City takes an IP address as a net.IP struct and returns a City struct
+// and/or an error. Although this can be used with other databases, this
+// method generally should be used with the GeoIP2 or GeoLite2 City databases.
+func (r *Reader) City(ipAddress net.IP) (*City, error) {
+ if isCity&r.databaseType == 0 {
+ return nil, InvalidMethodError{"City", r.Metadata().DatabaseType}
+ }
+ var city City
+ err := r.mmdbReader.Lookup(ipAddress, &city)
+ return &city, err
+}
+
+// Country takes an IP address as a net.IP struct and returns a Country struct
+// and/or an error. Although this can be used with other databases, this
+// method generally should be used with the GeoIP2 or GeoLite2 Country
+// databases.
+func (r *Reader) Country(ipAddress net.IP) (*Country, error) {
+ if isCountry&r.databaseType == 0 {
+ return nil, InvalidMethodError{"Country", r.Metadata().DatabaseType}
+ }
+ var country Country
+ err := r.mmdbReader.Lookup(ipAddress, &country)
+ return &country, err
+}
+
+// AnonymousIP takes an IP address as a net.IP struct and returns a
+// AnonymousIP struct and/or an error.
+func (r *Reader) AnonymousIP(ipAddress net.IP) (*AnonymousIP, error) {
+ if isAnonymousIP&r.databaseType == 0 {
+ return nil, InvalidMethodError{"AnonymousIP", r.Metadata().DatabaseType}
+ }
+ var anonIP AnonymousIP
+ err := r.mmdbReader.Lookup(ipAddress, &anonIP)
+ return &anonIP, err
+}
+
+// ASN takes an IP address as a net.IP struct and returns a ASN struct and/or
+// an error.
+func (r *Reader) ASN(ipAddress net.IP) (*ASN, error) {
+ if isASN&r.databaseType == 0 {
+ return nil, InvalidMethodError{"ASN", r.Metadata().DatabaseType}
+ }
+ var val ASN
+ err := r.mmdbReader.Lookup(ipAddress, &val)
+ return &val, err
+}
+
+// ConnectionType takes an IP address as a net.IP struct and returns a
+// ConnectionType struct and/or an error.
+func (r *Reader) ConnectionType(ipAddress net.IP) (*ConnectionType, error) {
+ if isConnectionType&r.databaseType == 0 {
+ return nil, InvalidMethodError{"ConnectionType", r.Metadata().DatabaseType}
+ }
+ var val ConnectionType
+ err := r.mmdbReader.Lookup(ipAddress, &val)
+ return &val, err
+}
+
+// Domain takes an IP address as a net.IP struct and returns a
+// Domain struct and/or an error.
+func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) {
+ if isDomain&r.databaseType == 0 {
+ return nil, InvalidMethodError{"Domain", r.Metadata().DatabaseType}
+ }
+ var val Domain
+ err := r.mmdbReader.Lookup(ipAddress, &val)
+ return &val, err
+}
+
+// ISP takes an IP address as a net.IP struct and returns a ISP struct and/or
+// an error.
+func (r *Reader) ISP(ipAddress net.IP) (*ISP, error) {
+ if isISP&r.databaseType == 0 {
+ return nil, InvalidMethodError{"ISP", r.Metadata().DatabaseType}
+ }
+ var val ISP
+ err := r.mmdbReader.Lookup(ipAddress, &val)
+ return &val, err
+}
+
+// Metadata takes no arguments and returns a struct containing metadata about
+// the MaxMind database in use by the Reader.
+func (r *Reader) Metadata() maxminddb.Metadata {
+ return r.mmdbReader.Metadata
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system.
+func (r *Reader) Close() error {
+ return r.mmdbReader.Close()
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitignore b/vendor/github.com/oschwald/maxminddb-golang/.gitignore
new file mode 100644
index 0000000..fe3fa4a
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.gitignore
@@ -0,0 +1,4 @@
+.vscode
+*.out
+*.sw?
+*.test
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.gitmodules b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
new file mode 100644
index 0000000..400b2ab
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "test-data"]
+ path = test-data
+ url = https://github.com/maxmind/MaxMind-DB.git
diff --git a/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml b/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
new file mode 100644
index 0000000..b4f7e6a
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/.golangci.toml
@@ -0,0 +1,472 @@
+[run]
+ deadline = "10m"
+
+ tests = true
+
+[linters]
+ disable-all = true
+ enable = [
+ "asciicheck",
+ "bidichk",
+ "bodyclose",
+ "containedctx",
+ "contextcheck",
+ "deadcode",
+ "depguard",
+ "durationcheck",
+ "errcheck",
+ "errchkjson",
+ "errname",
+ "errorlint",
+ "exportloopref",
+ "forbidigo",
+ #"forcetypeassert",
+ "goconst",
+ "gocyclo",
+ "gocritic",
+ "godot",
+ "gofumpt",
+ "gomodguard",
+ "gosec",
+ "gosimple",
+ "govet",
+ "grouper",
+ "ineffassign",
+ "lll",
+ "makezero",
+ "maintidx",
+ "misspell",
+ "nakedret",
+ "nilerr",
+ "noctx",
+ "nolintlint",
+ "nosprintfhostport",
+ "predeclared",
+ "revive",
+ "rowserrcheck",
+ "sqlclosecheck",
+ "staticcheck",
+ "structcheck",
+ "stylecheck",
+ "tenv",
+ "tparallel",
+ "typecheck",
+ "unconvert",
+ "unparam",
+ "unused",
+ "varcheck",
+ "vetshadow",
+ "wastedassign",
+ ]
+
+# Please note that we only use depguard for stdlib as gomodguard only
+# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
+[linters-settings.depguard]
+ list-type = "blacklist"
+ include-go-root = true
+ packages = [
+ # ioutil is deprecated. The functions have been moved elsewhere:
+ # https://golang.org/doc/go1.16#ioutil
+ "io/ioutil",
+ ]
+
+[linters-settings.errcheck]
+ # Don't allow setting of error to the blank identifier. If there is a legtimate
+ # reason, there should be a nolint with an explanation.
+ check-blank = true
+
+ exclude-functions = [
+ # If we are rolling back a transaction, we are often already in an error
+ # state.
+ '(*database/sql.Tx).Rollback',
+
+ # It is reasonable to ignore errors if Cleanup fails in most cases.
+ '(*github.com/google/renameio/v2.PendingFile).Cleanup',
+
+ # We often don't care if removing a file failed (e.g., it doesn't exist)
+ 'os.Remove',
+ 'os.RemoveAll',
+ ]
+
+ # Ignoring Close so that we don't have to have a bunch of
+ # `defer func() { _ = r.Close() }()` constructs when we
+ # don't actually care about the error.
+ ignore = "Close,fmt:.*"
+
+[linters-settings.errorlint]
+ errorf = true
+ asserts = true
+ comparison = true
+
+[linters-settings.exhaustive]
+ default-signifies-exhaustive = true
+
+[linters-settings.forbidigo]
+ # Forbid the following identifiers
+ forbid = [
+ "^minFraud*",
+ "^maxMind*",
+ ]
+
+[linters-settings.gocritic]
+ enabled-checks = [
+ "appendAssign",
+ "appendCombine",
+ "argOrder",
+ "assignOp",
+ "badCall",
+ "badCond",
+ "badLock",
+ "badRegexp",
+ "badSorting",
+ "boolExprSimplify",
+ "builtinShadow",
+ "builtinShadowDecl",
+ "captLocal",
+ "caseOrder",
+ "codegenComment",
+ "commentedOutCode",
+ "commentedOutImport",
+ "commentFormatting",
+ "defaultCaseOrder",
+ # Revive's defer rule already captures this. This caught no extra cases.
+ # "deferInLoop",
+ "deferUnlambda",
+ "deprecatedComment",
+ "docStub",
+ "dupArg",
+ "dupBranchBody",
+ "dupCase",
+ "dupImport",
+ "dupSubExpr",
+ "dynamicFmtString",
+ "elseif",
+ "emptyDecl",
+ "emptyFallthrough",
+ "emptyStringTest",
+ "equalFold",
+ "evalOrder",
+ "exitAfterDefer",
+ "exposedSyncMutex",
+ "externalErrorReassign",
+ # Given that all of our code runs on Linux and the / separate should
+ # work fine, this seems less important.
+ # "filepathJoin",
+ "flagDeref",
+ "flagName",
+ "hexLiteral",
+ "ifElseChain",
+ "importShadow",
+ "indexAlloc",
+ "initClause",
+ "ioutilDeprecated",
+ "mapKey",
+ "methodExprCall",
+ "nestingReduce",
+ "newDeref",
+ "nilValReturn",
+ "octalLiteral",
+ "offBy1",
+ "paramTypeCombine",
+ "preferDecodeRune",
+ "preferFilepathJoin",
+ "preferFprint",
+ "preferStringWriter",
+ "preferWriteByte",
+ "ptrToRefParam",
+ "rangeExprCopy",
+ "rangeValCopy",
+ "redundantSprint",
+ "regexpMust",
+ "regexpPattern",
+ # This might be good, but I don't think we want to encourage
+ # significant changes to regexes as we port stuff from Perl.
+ # "regexpSimplify",
+ "ruleguard",
+ "singleCaseSwitch",
+ "sliceClear",
+ "sloppyLen",
+ # This seems like it might also be good, but a lot of existing code
+ # fails.
+ # "sloppyReassign",
+ "returnAfterHttpError",
+ "sloppyTypeAssert",
+ "sortSlice",
+ "sprintfQuotedString",
+ "sqlQuery",
+ "stringsCompare",
+ "stringXbytes",
+ "switchTrue",
+ "syncMapLoadAndDelete",
+ "timeExprSimplify",
+ "todoCommentWithoutDetail",
+ "tooManyResultsChecker",
+ "truncateCmp",
+ "typeAssertChain",
+ "typeDefFirst",
+ "typeSwitchVar",
+ "typeUnparen",
+ "underef",
+ "unlabelStmt",
+ "unlambda",
+ # I am not sure we would want this linter and a lot of existing
+ # code fails.
+ # "unnamedResult",
+ "unnecessaryBlock",
+ "unnecessaryDefer",
+ "unslice",
+ "valSwap",
+ "weakCond",
+ "wrapperFunc",
+ "yodaStyleExpr",
+ # This requires explanations for "nolint" directives. This would be
+ # nice for gosec ones, but I am not sure we want it generally unless
+ # we can get the false positive rate lower.
+ # "whyNoLint"
+ ]
+
+[linters-settings.gofumpt]
+ extra-rules = true
+ lang-version = "1.18"
+
+[linters-settings.govet]
+ "enable-all" = true
+
+[linters-settings.lll]
+ line-length = 120
+ tab-width = 4
+
+[linters-settings.nolintlint]
+ allow-leading-space = false
+ allow-unused = false
+ allow-no-explanation = ["lll", "misspell"]
+ require-explanation = true
+ require-specific = true
+
+[linters-settings.revive]
+ ignore-generated-header = true
+ severity = "warning"
+
+ # This might be nice but it is so common that it is hard
+ # to enable.
+ # [[linters-settings.revive.rules]]
+ # name = "add-constant"
+
+ # [[linters-settings.revive.rules]]
+ # name = "argument-limit"
+
+ [[linters-settings.revive.rules]]
+ name = "atomic"
+
+ [[linters-settings.revive.rules]]
+ name = "bare-return"
+
+ [[linters-settings.revive.rules]]
+ name = "blank-imports"
+
+ [[linters-settings.revive.rules]]
+ name = "bool-literal-in-expr"
+
+ [[linters-settings.revive.rules]]
+ name = "call-to-gc"
+
+ # [[linters-settings.revive.rules]]
+ # name = "cognitive-complexity"
+
+ # Probably a good rule, but we have a lot of names that
+ # only have case differences.
+ # [[linters-settings.revive.rules]]
+ # name = "confusing-naming"
+
+ # [[linters-settings.revive.rules]]
+ # name = "confusing-results"
+
+ [[linters-settings.revive.rules]]
+ name = "constant-logical-expr"
+
+ [[linters-settings.revive.rules]]
+ name = "context-as-argument"
+
+ [[linters-settings.revive.rules]]
+ name = "context-keys-type"
+
+ # [[linters-settings.revive.rules]]
+ # name = "cyclomatic"
+
+ # [[linters-settings.revive.rules]]
+ # name = "deep-exit"
+
+ [[linters-settings.revive.rules]]
+ name = "defer"
+
+ [[linters-settings.revive.rules]]
+ name = "dot-imports"
+
+ [[linters-settings.revive.rules]]
+ name = "duplicated-imports"
+
+ [[linters-settings.revive.rules]]
+ name = "early-return"
+
+ [[linters-settings.revive.rules]]
+ name = "empty-block"
+
+ [[linters-settings.revive.rules]]
+ name = "empty-lines"
+
+ [[linters-settings.revive.rules]]
+ name = "errorf"
+
+ [[linters-settings.revive.rules]]
+ name = "error-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "error-return"
+
+ [[linters-settings.revive.rules]]
+ name = "error-strings"
+
+ [[linters-settings.revive.rules]]
+ name = "exported"
+
+ # [[linters-settings.revive.rules]]
+ # name = "file-header"
+
+ # We have a lot of flag parameters. This linter probably makes
+ # a good point, but we would need some cleanup or a lot of nolints.
+ # [[linters-settings.revive.rules]]
+ # name = "flag-parameter"
+
+ # [[linters-settings.revive.rules]]
+ # name = "function-result-limit"
+
+ [[linters-settings.revive.rules]]
+ name = "get-return"
+
+ [[linters-settings.revive.rules]]
+ name = "identical-branches"
+
+ [[linters-settings.revive.rules]]
+ name = "if-return"
+
+ [[linters-settings.revive.rules]]
+ name = "imports-blacklist"
+
+ [[linters-settings.revive.rules]]
+ name = "import-shadowing"
+
+ [[linters-settings.revive.rules]]
+ name = "increment-decrement"
+
+ [[linters-settings.revive.rules]]
+ name = "indent-error-flow"
+
+ # [[linters-settings.revive.rules]]
+ # name = "line-length-limit"
+
+ # [[linters-settings.revive.rules]]
+ # name = "max-public-structs"
+
+ [[linters-settings.revive.rules]]
+ name = "modifies-parameter"
+
+ [[linters-settings.revive.rules]]
+ name = "modifies-value-receiver"
+
+ # We frequently use nested structs, particularly in tests.
+ # [[linters-settings.revive.rules]]
+ # name = "nested-structs"
+
+ [[linters-settings.revive.rules]]
+ name = "optimize-operands-order"
+
+ [[linters-settings.revive.rules]]
+ name = "package-comments"
+
+ [[linters-settings.revive.rules]]
+ name = "range"
+
+ [[linters-settings.revive.rules]]
+ name = "range-val-address"
+
+ [[linters-settings.revive.rules]]
+ name = "range-val-in-closure"
+
+ [[linters-settings.revive.rules]]
+ name = "receiver-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "redefines-builtin-id"
+
+ [[linters-settings.revive.rules]]
+ name = "string-of-int"
+
+ [[linters-settings.revive.rules]]
+ name = "struct-tag"
+
+ [[linters-settings.revive.rules]]
+ name = "superfluous-else"
+
+ [[linters-settings.revive.rules]]
+ name = "time-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "unconditional-recursion"
+
+ [[linters-settings.revive.rules]]
+ name = "unexported-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "unexported-return"
+
+ # This is covered elsewhere and we want to ignore some
+ # functions such as fmt.Fprintf.
+ # [[linters-settings.revive.rules]]
+ # name = "unhandled-error"
+
+ [[linters-settings.revive.rules]]
+ name = "unnecessary-stmt"
+
+ [[linters-settings.revive.rules]]
+ name = "unreachable-code"
+
+ [[linters-settings.revive.rules]]
+ name = "unused-parameter"
+
+ # We generally have unused receivers in tests for meeting the
+ # requirements of an interface.
+ # [[linters-settings.revive.rules]]
+ # name = "unused-receiver"
+
+ # This probably makes sense after we upgrade to 1.18
+ # [[linters-settings.revive.rules]]
+ # name = "use-any"
+
+ [[linters-settings.revive.rules]]
+ name = "useless-break"
+
+ [[linters-settings.revive.rules]]
+ name = "var-declaration"
+
+ [[linters-settings.revive.rules]]
+ name = "var-naming"
+
+ [[linters-settings.revive.rules]]
+ name = "waitgroup-by-value"
+
+[linters-settings.unparam]
+ check-exported = true
+
+[[issues.exclude-rules]]
+ linters = [
+ "govet"
+ ]
+ # we want to enable almost all govet rules. It is easier to just filter out
+ # the ones we don't want:
+ #
+ # * fieldalignment - way too noisy. Although it is very useful in particular
+ # cases where we are trying to use as little memory as possible, having
+ # it go off on every struct isn't helpful.
+ # * shadow - although often useful, it complains about _many_ err
+ # shadowing assignments and some others where shadowing is clear.
+ text = "^(fieldalignment|shadow)"
diff --git a/vendor/github.com/oschwald/maxminddb-golang/LICENSE b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
new file mode 100644
index 0000000..2969677
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2015, Gregory J. Oschwald
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
+INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/README.md b/vendor/github.com/oschwald/maxminddb-golang/README.md
new file mode 100644
index 0000000..9662888
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/README.md
@@ -0,0 +1,36 @@
+# MaxMind DB Reader for Go #
+
+[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.svg)](https://godoc.org/github.com/oschwald/maxminddb-golang)
+
+This is a Go reader for the MaxMind DB format. Although this can be used to
+read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
+[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
+[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
+API for doing so.
+
+This is not an official MaxMind API.
+
+## Installation ##
+
+```
+go get github.com/oschwald/maxminddb-golang
+```
+
+## Usage ##
+
+[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
+documentation and examples.
+
+## Examples ##
+
+See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
+`example_test.go` for examples.
+
+## Contributing ##
+
+Contributions welcome! Please fork the repository and open a pull request
+with your changes.
+
+## License ##
+
+This is free software, licensed under the ISC License.
diff --git a/vendor/github.com/oschwald/maxminddb-golang/decoder.go b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
new file mode 100644
index 0000000..828c57f
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/decoder.go
@@ -0,0 +1,897 @@
+package maxminddb
+
+import (
+ "encoding/binary"
+ "math"
+ "math/big"
+ "reflect"
+ "sync"
+)
+
+type decoder struct {
+ buffer []byte
+}
+
+type dataType int
+
+const (
+ _Extended dataType = iota
+ _Pointer
+ _String
+ _Float64
+ _Bytes
+ _Uint16
+ _Uint32
+ _Map
+ _Int32
+ _Uint64
+ _Uint128
+ _Slice
+ // We don't use the next two. They are placeholders. See the spec
+ // for more details.
+ _Container //nolint: deadcode, varcheck // above
+ _Marker //nolint: deadcode, varcheck // above
+ _Bool
+ _Float32
+)
+
+const (
+ // This is the value used in libmaxminddb.
+ maximumDataStructureDepth = 512
+)
+
+func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
+ if depth > maximumDataStructureDepth {
+ return 0, newInvalidDatabaseError(
+ "exceeded maximum data structure depth; database is likely corrupt",
+ )
+ }
+ typeNum, size, newOffset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return 0, err
+ }
+
+ if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
+ result.Set(reflect.ValueOf(uintptr(offset)))
+ return d.nextValueOffset(offset, 1)
+ }
+ return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
+}
+
+func (d *decoder) decodeToDeserializer(
+ offset uint,
+ dser deserializer,
+ depth int,
+ getNext bool,
+) (uint, error) {
+ if depth > maximumDataStructureDepth {
+ return 0, newInvalidDatabaseError(
+ "exceeded maximum data structure depth; database is likely corrupt",
+ )
+ }
+ skip, err := dser.ShouldSkip(uintptr(offset))
+ if err != nil {
+ return 0, err
+ }
+ if skip {
+ if getNext {
+ return d.nextValueOffset(offset, 1)
+ }
+ return 0, nil
+ }
+
+ typeNum, size, newOffset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return 0, err
+ }
+
+ return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1)
+}
+
+func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
+ newOffset := offset + 1
+ if offset >= uint(len(d.buffer)) {
+ return 0, 0, 0, newOffsetError()
+ }
+ ctrlByte := d.buffer[offset]
+
+ typeNum := dataType(ctrlByte >> 5)
+ if typeNum == _Extended {
+ if newOffset >= uint(len(d.buffer)) {
+ return 0, 0, 0, newOffsetError()
+ }
+ typeNum = dataType(d.buffer[newOffset] + 7)
+ newOffset++
+ }
+
+ var size uint
+ size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
+ return typeNum, size, newOffset, err
+}
+
+func (d *decoder) sizeFromCtrlByte(
+ ctrlByte byte,
+ offset uint,
+ typeNum dataType,
+) (uint, uint, error) {
+ size := uint(ctrlByte & 0x1f)
+ if typeNum == _Extended {
+ return size, offset, nil
+ }
+
+ var bytesToRead uint
+ if size < 29 {
+ return size, offset, nil
+ }
+
+ bytesToRead = size - 28
+ newOffset := offset + bytesToRead
+ if newOffset > uint(len(d.buffer)) {
+ return 0, 0, newOffsetError()
+ }
+ if size == 29 {
+ return 29 + uint(d.buffer[offset]), offset + 1, nil
+ }
+
+ sizeBytes := d.buffer[offset:newOffset]
+
+ switch {
+ case size == 30:
+ size = 285 + uintFromBytes(0, sizeBytes)
+ case size > 30:
+ size = uintFromBytes(0, sizeBytes) + 65821
+ }
+ return size, newOffset, nil
+}
+
+func (d *decoder) decodeFromType(
+ dtype dataType,
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ result = d.indirect(result)
+
+ // For these types, size has a special meaning
+ switch dtype {
+ case _Bool:
+ return d.unmarshalBool(size, offset, result)
+ case _Map:
+ return d.unmarshalMap(size, offset, result, depth)
+ case _Pointer:
+ return d.unmarshalPointer(size, offset, result, depth)
+ case _Slice:
+ return d.unmarshalSlice(size, offset, result, depth)
+ }
+
+ // For the remaining types, size is the byte size
+ if offset+size > uint(len(d.buffer)) {
+ return 0, newOffsetError()
+ }
+ switch dtype {
+ case _Bytes:
+ return d.unmarshalBytes(size, offset, result)
+ case _Float32:
+ return d.unmarshalFloat32(size, offset, result)
+ case _Float64:
+ return d.unmarshalFloat64(size, offset, result)
+ case _Int32:
+ return d.unmarshalInt32(size, offset, result)
+ case _String:
+ return d.unmarshalString(size, offset, result)
+ case _Uint16:
+ return d.unmarshalUint(size, offset, result, 16)
+ case _Uint32:
+ return d.unmarshalUint(size, offset, result, 32)
+ case _Uint64:
+ return d.unmarshalUint(size, offset, result, 64)
+ case _Uint128:
+ return d.unmarshalUint128(size, offset, result)
+ default:
+ return 0, newInvalidDatabaseError("unknown type: %d", dtype)
+ }
+}
+
+func (d *decoder) decodeFromTypeToDeserializer(
+ dtype dataType,
+ size uint,
+ offset uint,
+ dser deserializer,
+ depth int,
+) (uint, error) {
+ // For these types, size has a special meaning
+ switch dtype {
+ case _Bool:
+ v, offset := d.decodeBool(size, offset)
+ return offset, dser.Bool(v)
+ case _Map:
+ return d.decodeMapToDeserializer(size, offset, dser, depth)
+ case _Pointer:
+ pointer, newOffset, err := d.decodePointer(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ _, err = d.decodeToDeserializer(pointer, dser, depth, false)
+ return newOffset, err
+ case _Slice:
+ return d.decodeSliceToDeserializer(size, offset, dser, depth)
+ }
+
+ // For the remaining types, size is the byte size
+ if offset+size > uint(len(d.buffer)) {
+ return 0, newOffsetError()
+ }
+ switch dtype {
+ case _Bytes:
+ v, offset := d.decodeBytes(size, offset)
+ return offset, dser.Bytes(v)
+ case _Float32:
+ v, offset := d.decodeFloat32(size, offset)
+ return offset, dser.Float32(v)
+ case _Float64:
+ v, offset := d.decodeFloat64(size, offset)
+ return offset, dser.Float64(v)
+ case _Int32:
+ v, offset := d.decodeInt(size, offset)
+ return offset, dser.Int32(int32(v))
+ case _String:
+ v, offset := d.decodeString(size, offset)
+ return offset, dser.String(v)
+ case _Uint16:
+ v, offset := d.decodeUint(size, offset)
+ return offset, dser.Uint16(uint16(v))
+ case _Uint32:
+ v, offset := d.decodeUint(size, offset)
+ return offset, dser.Uint32(uint32(v))
+ case _Uint64:
+ v, offset := d.decodeUint(size, offset)
+ return offset, dser.Uint64(v)
+ case _Uint128:
+ v, offset := d.decodeUint128(size, offset)
+ return offset, dser.Uint128(v)
+ default:
+ return 0, newInvalidDatabaseError("unknown type: %d", dtype)
+ }
+}
+
+func (d *decoder) unmarshalBool(size, offset uint, result reflect.Value) (uint, error) {
+ if size > 1 {
+ return 0, newInvalidDatabaseError(
+ "the MaxMind DB file's data section contains bad data (bool size of %v)",
+ size,
+ )
+ }
+ value, newOffset := d.decodeBool(size, offset)
+
+ switch result.Kind() {
+ case reflect.Bool:
+ result.SetBool(value)
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+// indirect follows pointers and create values as necessary. This is
+// heavily based on encoding/json as my original version had a subtle
+// bug. This method should be considered to be licensed under
+// https://golang.org/LICENSE
+func (d *decoder) indirect(result reflect.Value) reflect.Value {
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if result.Kind() == reflect.Interface && !result.IsNil() {
+ e := result.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() {
+ result = e
+ continue
+ }
+ }
+
+ if result.Kind() != reflect.Ptr {
+ break
+ }
+
+ if result.IsNil() {
+ result.Set(reflect.New(result.Type().Elem()))
+ }
+
+ result = result.Elem()
+ }
+ return result
+}
+
+var sliceType = reflect.TypeOf([]byte{})
+
+func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) {
+ value, newOffset := d.decodeBytes(size, offset)
+
+ switch result.Kind() {
+ case reflect.Slice:
+ if result.Type() == sliceType {
+ result.SetBytes(value)
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) {
+ if size != 4 {
+ return 0, newInvalidDatabaseError(
+ "the MaxMind DB file's data section contains bad data (float32 size of %v)",
+ size,
+ )
+ }
+ value, newOffset := d.decodeFloat32(size, offset)
+
+ switch result.Kind() {
+ case reflect.Float32, reflect.Float64:
+ result.SetFloat(float64(value))
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) {
+ if size != 8 {
+ return 0, newInvalidDatabaseError(
+ "the MaxMind DB file's data section contains bad data (float 64 size of %v)",
+ size,
+ )
+ }
+ value, newOffset := d.decodeFloat64(size, offset)
+
+ switch result.Kind() {
+ case reflect.Float32, reflect.Float64:
+ if result.OverflowFloat(value) {
+ return 0, newUnmarshalTypeError(value, result.Type())
+ }
+ result.SetFloat(value)
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) {
+ if size > 4 {
+ return 0, newInvalidDatabaseError(
+ "the MaxMind DB file's data section contains bad data (int32 size of %v)",
+ size,
+ )
+ }
+ value, newOffset := d.decodeInt(size, offset)
+
+ switch result.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := int64(value)
+ if !result.OverflowInt(n) {
+ result.SetInt(n)
+ return newOffset, nil
+ }
+ case reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32,
+ reflect.Uint64,
+ reflect.Uintptr:
+ n := uint64(value)
+ if !result.OverflowUint(n) {
+ result.SetUint(n)
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalMap(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ result = d.indirect(result)
+ switch result.Kind() {
+ default:
+ return 0, newUnmarshalTypeError("map", result.Type())
+ case reflect.Struct:
+ return d.decodeStruct(size, offset, result, depth)
+ case reflect.Map:
+ return d.decodeMap(size, offset, result, depth)
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ rv := reflect.ValueOf(make(map[string]interface{}, size))
+ newOffset, err := d.decodeMap(size, offset, rv, depth)
+ result.Set(rv)
+ return newOffset, err
+ }
+ return 0, newUnmarshalTypeError("map", result.Type())
+ }
+}
+
+func (d *decoder) unmarshalPointer(
+ size, offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ pointer, newOffset, err := d.decodePointer(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ _, err = d.decode(pointer, result, depth)
+ return newOffset, err
+}
+
+func (d *decoder) unmarshalSlice(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ switch result.Kind() {
+ case reflect.Slice:
+ return d.decodeSlice(size, offset, result, depth)
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ a := []interface{}{}
+ rv := reflect.ValueOf(&a).Elem()
+ newOffset, err := d.decodeSlice(size, offset, rv, depth)
+ result.Set(rv)
+ return newOffset, err
+ }
+ }
+ return 0, newUnmarshalTypeError("array", result.Type())
+}
+
+func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) {
+ value, newOffset := d.decodeString(size, offset)
+
+ switch result.Kind() {
+ case reflect.String:
+ result.SetString(value)
+ return newOffset, nil
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) unmarshalUint(
+ size, offset uint,
+ result reflect.Value,
+ uintType uint,
+) (uint, error) {
+ if size > uintType/8 {
+ return 0, newInvalidDatabaseError(
+ "the MaxMind DB file's data section contains bad data (uint%v size of %v)",
+ uintType,
+ size,
+ )
+ }
+
+ value, newOffset := d.decodeUint(size, offset)
+
+ switch result.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n := int64(value)
+ if !result.OverflowInt(n) {
+ result.SetInt(n)
+ return newOffset, nil
+ }
+ case reflect.Uint,
+ reflect.Uint8,
+ reflect.Uint16,
+ reflect.Uint32,
+ reflect.Uint64,
+ reflect.Uintptr:
+ if !result.OverflowUint(value) {
+ result.SetUint(value)
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+var bigIntType = reflect.TypeOf(big.Int{})
+
+func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) {
+ if size > 16 {
+ return 0, newInvalidDatabaseError(
+ "the MaxMind DB file's data section contains bad data (uint128 size of %v)",
+ size,
+ )
+ }
+ value, newOffset := d.decodeUint128(size, offset)
+
+ switch result.Kind() {
+ case reflect.Struct:
+ if result.Type() == bigIntType {
+ result.Set(reflect.ValueOf(*value))
+ return newOffset, nil
+ }
+ case reflect.Interface:
+ if result.NumMethod() == 0 {
+ result.Set(reflect.ValueOf(value))
+ return newOffset, nil
+ }
+ }
+ return newOffset, newUnmarshalTypeError(value, result.Type())
+}
+
+func (d *decoder) decodeBool(size, offset uint) (bool, uint) {
+ return size != 0, offset
+}
+
+func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) {
+ newOffset := offset + size
+ bytes := make([]byte, size)
+ copy(bytes, d.buffer[offset:newOffset])
+ return bytes, newOffset
+}
+
+func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) {
+ newOffset := offset + size
+ bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
+ return math.Float64frombits(bits), newOffset
+}
+
+func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) {
+ newOffset := offset + size
+ bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
+ return math.Float32frombits(bits), newOffset
+}
+
+func (d *decoder) decodeInt(size, offset uint) (int, uint) {
+ newOffset := offset + size
+ var val int32
+ for _, b := range d.buffer[offset:newOffset] {
+ val = (val << 8) | int32(b)
+ }
+ return int(val), newOffset
+}
+
+func (d *decoder) decodeMap(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ if result.IsNil() {
+ result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
+ }
+
+ mapType := result.Type()
+ keyValue := reflect.New(mapType.Key()).Elem()
+ elemType := mapType.Elem()
+ elemKind := elemType.Kind()
+ var elemValue reflect.Value
+ for i := uint(0); i < size; i++ {
+ var key []byte
+ var err error
+ key, offset, err = d.decodeKey(offset)
+
+ if err != nil {
+ return 0, err
+ }
+
+ if !elemValue.IsValid() || elemKind == reflect.Interface {
+ elemValue = reflect.New(elemType).Elem()
+ }
+
+ offset, err = d.decode(offset, elemValue, depth)
+ if err != nil {
+ return 0, err
+ }
+
+ keyValue.SetString(string(key))
+ result.SetMapIndex(keyValue, elemValue)
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodeMapToDeserializer(
+ size uint,
+ offset uint,
+ dser deserializer,
+ depth int,
+) (uint, error) {
+ err := dser.StartMap(size)
+ if err != nil {
+ return 0, err
+ }
+ for i := uint(0); i < size; i++ {
+ // TODO - implement key/value skipping?
+ offset, err = d.decodeToDeserializer(offset, dser, depth, true)
+ if err != nil {
+ return 0, err
+ }
+
+ offset, err = d.decodeToDeserializer(offset, dser, depth, true)
+ if err != nil {
+ return 0, err
+ }
+ }
+ err = dser.End()
+ if err != nil {
+ return 0, err
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodePointer(
+ size uint,
+ offset uint,
+) (uint, uint, error) {
+ pointerSize := ((size >> 3) & 0x3) + 1
+ newOffset := offset + pointerSize
+ if newOffset > uint(len(d.buffer)) {
+ return 0, 0, newOffsetError()
+ }
+ pointerBytes := d.buffer[offset:newOffset]
+ var prefix uint
+ if pointerSize == 4 {
+ prefix = 0
+ } else {
+ prefix = size & 0x7
+ }
+ unpacked := uintFromBytes(prefix, pointerBytes)
+
+ var pointerValueOffset uint
+ switch pointerSize {
+ case 1:
+ pointerValueOffset = 0
+ case 2:
+ pointerValueOffset = 2048
+ case 3:
+ pointerValueOffset = 526336
+ case 4:
+ pointerValueOffset = 0
+ }
+
+ pointer := unpacked + pointerValueOffset
+
+ return pointer, newOffset, nil
+}
+
+func (d *decoder) decodeSlice(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
+ for i := 0; i < int(size); i++ {
+ var err error
+ offset, err = d.decode(offset, result.Index(i), depth)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodeSliceToDeserializer(
+ size uint,
+ offset uint,
+ dser deserializer,
+ depth int,
+) (uint, error) {
+ err := dser.StartSlice(size)
+ if err != nil {
+ return 0, err
+ }
+ for i := uint(0); i < size; i++ {
+ offset, err = d.decodeToDeserializer(offset, dser, depth, true)
+ if err != nil {
+ return 0, err
+ }
+ }
+ err = dser.End()
+ if err != nil {
+ return 0, err
+ }
+ return offset, nil
+}
+
+func (d *decoder) decodeString(size, offset uint) (string, uint) {
+ newOffset := offset + size
+ return string(d.buffer[offset:newOffset]), newOffset
+}
+
+func (d *decoder) decodeStruct(
+ size uint,
+ offset uint,
+ result reflect.Value,
+ depth int,
+) (uint, error) {
+ fields := cachedFields(result)
+
+ // This fills in embedded structs
+ for _, i := range fields.anonymousFields {
+ _, err := d.unmarshalMap(size, offset, result.Field(i), depth)
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ // This handles named fields
+ for i := uint(0); i < size; i++ {
+ var (
+ err error
+ key []byte
+ )
+ key, offset, err = d.decodeKey(offset)
+ if err != nil {
+ return 0, err
+ }
+ // The string() does not create a copy due to this compiler
+ // optimization: https://github.com/golang/go/issues/3512
+ j, ok := fields.namedFields[string(key)]
+ if !ok {
+ offset, err = d.nextValueOffset(offset, 1)
+ if err != nil {
+ return 0, err
+ }
+ continue
+ }
+
+ offset, err = d.decode(offset, result.Field(j), depth)
+ if err != nil {
+ return 0, err
+ }
+ }
+ return offset, nil
+}
+
+type fieldsType struct {
+ namedFields map[string]int
+ anonymousFields []int
+}
+
+var fieldsMap sync.Map
+
+func cachedFields(result reflect.Value) *fieldsType {
+ resultType := result.Type()
+
+ if fields, ok := fieldsMap.Load(resultType); ok {
+ return fields.(*fieldsType)
+ }
+ numFields := resultType.NumField()
+ namedFields := make(map[string]int, numFields)
+ var anonymous []int
+ for i := 0; i < numFields; i++ {
+ field := resultType.Field(i)
+
+ fieldName := field.Name
+ if tag := field.Tag.Get("maxminddb"); tag != "" {
+ if tag == "-" {
+ continue
+ }
+ fieldName = tag
+ }
+ if field.Anonymous {
+ anonymous = append(anonymous, i)
+ continue
+ }
+ namedFields[fieldName] = i
+ }
+ fields := &fieldsType{namedFields, anonymous}
+ fieldsMap.Store(resultType, fields)
+
+ return fields
+}
+
+func (d *decoder) decodeUint(size, offset uint) (uint64, uint) {
+ newOffset := offset + size
+ bytes := d.buffer[offset:newOffset]
+
+ var val uint64
+ for _, b := range bytes {
+ val = (val << 8) | uint64(b)
+ }
+ return val, newOffset
+}
+
+func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) {
+ newOffset := offset + size
+ val := new(big.Int)
+ val.SetBytes(d.buffer[offset:newOffset])
+
+ return val, newOffset
+}
+
+func uintFromBytes(prefix uint, uintBytes []byte) uint {
+ val := prefix
+ for _, b := range uintBytes {
+ val = (val << 8) | uint(b)
+ }
+ return val
+}
+
+// decodeKey decodes a map key into []byte slice. We use a []byte so that we
+// can take advantage of https://github.com/golang/go/issues/3512 to avoid
+// copying the bytes when decoding a struct. Previously, we achieved this by
+// using unsafe.
+func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
+ typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return nil, 0, err
+ }
+ if typeNum == _Pointer {
+ pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
+ if err != nil {
+ return nil, 0, err
+ }
+ key, _, err := d.decodeKey(pointer)
+ return key, ptrOffset, err
+ }
+ if typeNum != _String {
+ return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
+ }
+ newOffset := dataOffset + size
+ if newOffset > uint(len(d.buffer)) {
+ return nil, 0, newOffsetError()
+ }
+ return d.buffer[dataOffset:newOffset], newOffset, nil
+}
+
+// This function is used to skip ahead to the next value without decoding
+// the one at the offset passed in. The size bits have different meanings for
+// different data types.
+func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) {
+ if numberToSkip == 0 {
+ return offset, nil
+ }
+ typeNum, size, offset, err := d.decodeCtrlData(offset)
+ if err != nil {
+ return 0, err
+ }
+ switch typeNum {
+ case _Pointer:
+ _, offset, err = d.decodePointer(size, offset)
+ if err != nil {
+ return 0, err
+ }
+ case _Map:
+ numberToSkip += 2 * size
+ case _Slice:
+ numberToSkip += size
+ case _Bool:
+ default:
+ offset += size
+ }
+ return d.nextValueOffset(offset, numberToSkip-1)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/deserializer.go b/vendor/github.com/oschwald/maxminddb-golang/deserializer.go
new file mode 100644
index 0000000..c6dd68d
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/deserializer.go
@@ -0,0 +1,31 @@
+package maxminddb
+
+import "math/big"
+
+// deserializer is an interface for a type that deserializes an MaxMind DB
+// data record to some other type. This exists as an alternative to the
+// standard reflection API.
+//
+// This is fundamentally different than the Unmarshaler interface that
+// several packages provide. A Deserializer will generally create the
+// final struct or value rather than unmarshaling to itself.
+//
+// This interface and the associated unmarshaling code is EXPERIMENTAL!
+// It is not currently covered by any Semantic Versioning guarantees.
+// Use at your own risk.
+type deserializer interface {
+ ShouldSkip(offset uintptr) (bool, error)
+ StartSlice(size uint) error
+ StartMap(size uint) error
+ End() error
+ String(string) error
+ Float64(float64) error
+ Bytes([]byte) error
+ Uint16(uint16) error
+ Uint32(uint32) error
+ Int32(int32) error
+ Uint64(uint64) error
+ Uint128(*big.Int) error
+ Bool(bool) error
+ Float32(float32) error
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/errors.go b/vendor/github.com/oschwald/maxminddb-golang/errors.go
new file mode 100644
index 0000000..1327800
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/errors.go
@@ -0,0 +1,42 @@
+package maxminddb
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// InvalidDatabaseError is returned when the database contains invalid data
+// and cannot be parsed.
+type InvalidDatabaseError struct {
+ message string
+}
+
+func newOffsetError() InvalidDatabaseError {
+ return InvalidDatabaseError{"unexpected end of database"}
+}
+
+func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
+ return InvalidDatabaseError{fmt.Sprintf(format, args...)}
+}
+
+func (e InvalidDatabaseError) Error() string {
+ return e.message
+}
+
+// UnmarshalTypeError is returned when the value in the database cannot be
+// assigned to the specified data type.
+type UnmarshalTypeError struct {
+ Value string // stringified copy of the database value that caused the error
+ Type reflect.Type // type of the value that could not be assign to
+}
+
+func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
+ return UnmarshalTypeError{
+ Value: fmt.Sprintf("%v", value),
+ Type: rType,
+ }
+}
+
+func (e UnmarshalTypeError) Error() string {
+ return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
new file mode 100644
index 0000000..eeb2e05
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_unix.go
@@ -0,0 +1,16 @@
+//go:build !windows && !appengine && !plan9
+// +build !windows,!appengine,!plan9
+
+package maxminddb
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func mmap(fd, length int) (data []byte, err error) {
+ return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
+}
+
+func munmap(b []byte) (err error) {
+ return unix.Munmap(b)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
new file mode 100644
index 0000000..661250e
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/mmap_windows.go
@@ -0,0 +1,85 @@
+// +build windows,!appengine
+
+package maxminddb
+
+// Windows support largely borrowed from mmap-go.
+//
+// Copyright 2011 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+import (
+ "errors"
+ "os"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+type memoryMap []byte
+
+// Windows
+var handleLock sync.Mutex
+var handleMap = map[uintptr]windows.Handle{}
+
+func mmap(fd int, length int) (data []byte, err error) {
+ h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
+ uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
+ if h == 0 {
+ return nil, os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
+ 0, uintptr(length))
+ if addr == 0 {
+ return nil, os.NewSyscallError("MapViewOfFile", errno)
+ }
+ handleLock.Lock()
+ handleMap[addr] = h
+ handleLock.Unlock()
+
+ m := memoryMap{}
+ dh := m.header()
+ dh.Data = addr
+ dh.Len = length
+ dh.Cap = dh.Len
+
+ return m, nil
+}
+
+func (m *memoryMap) header() *reflect.SliceHeader {
+ return (*reflect.SliceHeader)(unsafe.Pointer(m))
+}
+
+func flush(addr, len uintptr) error {
+ errno := windows.FlushViewOfFile(addr, len)
+ return os.NewSyscallError("FlushViewOfFile", errno)
+}
+
+func munmap(b []byte) (err error) {
+ m := memoryMap(b)
+ dh := m.header()
+
+ addr := dh.Data
+ length := uintptr(dh.Len)
+
+ flush(addr, length)
+ err = windows.UnmapViewOfFile(addr)
+ if err != nil {
+ return err
+ }
+
+ handleLock.Lock()
+ defer handleLock.Unlock()
+ handle, ok := handleMap[addr]
+ if !ok {
+ // should be impossible; we would've errored above
+ return errors.New("unknown base address")
+ }
+ delete(handleMap, addr)
+
+ e := windows.CloseHandle(windows.Handle(handle))
+ return os.NewSyscallError("CloseHandle", e)
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/node.go b/vendor/github.com/oschwald/maxminddb-golang/node.go
new file mode 100644
index 0000000..16e8b5f
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/node.go
@@ -0,0 +1,58 @@
+package maxminddb
+
+type nodeReader interface {
+ readLeft(uint) uint
+ readRight(uint) uint
+}
+
+type nodeReader24 struct {
+ buffer []byte
+}
+
+func (n nodeReader24) readLeft(nodeNumber uint) uint {
+ return (uint(n.buffer[nodeNumber]) << 16) |
+ (uint(n.buffer[nodeNumber+1]) << 8) |
+ uint(n.buffer[nodeNumber+2])
+}
+
+func (n nodeReader24) readRight(nodeNumber uint) uint {
+ return (uint(n.buffer[nodeNumber+3]) << 16) |
+ (uint(n.buffer[nodeNumber+4]) << 8) |
+ uint(n.buffer[nodeNumber+5])
+}
+
+type nodeReader28 struct {
+ buffer []byte
+}
+
+func (n nodeReader28) readLeft(nodeNumber uint) uint {
+ return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) |
+ (uint(n.buffer[nodeNumber]) << 16) |
+ (uint(n.buffer[nodeNumber+1]) << 8) |
+ uint(n.buffer[nodeNumber+2])
+}
+
+func (n nodeReader28) readRight(nodeNumber uint) uint {
+ return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) |
+ (uint(n.buffer[nodeNumber+4]) << 16) |
+ (uint(n.buffer[nodeNumber+5]) << 8) |
+ uint(n.buffer[nodeNumber+6])
+}
+
+type nodeReader32 struct {
+ buffer []byte
+}
+
+func (n nodeReader32) readLeft(nodeNumber uint) uint {
+ return (uint(n.buffer[nodeNumber]) << 24) |
+ (uint(n.buffer[nodeNumber+1]) << 16) |
+ (uint(n.buffer[nodeNumber+2]) << 8) |
+ uint(n.buffer[nodeNumber+3])
+}
+
+func (n nodeReader32) readRight(nodeNumber uint) uint {
+ return (uint(n.buffer[nodeNumber+4]) << 24) |
+ (uint(n.buffer[nodeNumber+5]) << 16) |
+ (uint(n.buffer[nodeNumber+6]) << 8) |
+ uint(n.buffer[nodeNumber+7])
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader.go b/vendor/github.com/oschwald/maxminddb-golang/reader.go
new file mode 100644
index 0000000..263cf64
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader.go
@@ -0,0 +1,310 @@
+// Package maxminddb provides a reader for the MaxMind DB file format.
+package maxminddb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "reflect"
+)
+
+const (
+ // NotFound is returned by LookupOffset when a matched root record offset
+ // cannot be found.
+ NotFound = ^uintptr(0)
+
+ dataSectionSeparatorSize = 16
+)
+
+var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
+
+// Reader holds the data corresponding to the MaxMind DB file. Its only public
+// field is Metadata, which contains the metadata from the MaxMind DB file.
+//
+// All of the methods on Reader are thread-safe. The struct may be safely
+// shared across goroutines.
+type Reader struct {
+ hasMappedFile bool
+ buffer []byte
+ nodeReader nodeReader
+ decoder decoder
+ Metadata Metadata
+ ipv4Start uint
+ ipv4StartBitDepth int
+ nodeOffsetMult uint
+}
+
+// Metadata holds the metadata decoded from the MaxMind DB file. In particular
+// it has the format version, the build time as Unix epoch time, the database
+// type and description, the IP version supported, and a slice of the natural
+// languages included.
+type Metadata struct {
+ BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
+ BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
+ BuildEpoch uint `maxminddb:"build_epoch"`
+ DatabaseType string `maxminddb:"database_type"`
+ Description map[string]string `maxminddb:"description"`
+ IPVersion uint `maxminddb:"ip_version"`
+ Languages []string `maxminddb:"languages"`
+ NodeCount uint `maxminddb:"node_count"`
+ RecordSize uint `maxminddb:"record_size"`
+}
+
+// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
+// a Reader structure or an error.
+func FromBytes(buffer []byte) (*Reader, error) {
+ metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
+
+ if metadataStart == -1 {
+ return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
+ }
+
+ metadataStart += len(metadataStartMarker)
+ metadataDecoder := decoder{buffer[metadataStart:]}
+
+ var metadata Metadata
+
+ rvMetdata := reflect.ValueOf(&metadata)
+ _, err := metadataDecoder.decode(0, rvMetdata, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
+ dataSectionStart := searchTreeSize + dataSectionSeparatorSize
+ dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
+ if dataSectionStart > dataSectionEnd {
+ return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
+ }
+ d := decoder{
+ buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
+ }
+
+ nodeBuffer := buffer[:searchTreeSize]
+ var nodeReader nodeReader
+ switch metadata.RecordSize {
+ case 24:
+ nodeReader = nodeReader24{buffer: nodeBuffer}
+ case 28:
+ nodeReader = nodeReader28{buffer: nodeBuffer}
+ case 32:
+ nodeReader = nodeReader32{buffer: nodeBuffer}
+ default:
+ return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
+ }
+
+ reader := &Reader{
+ buffer: buffer,
+ nodeReader: nodeReader,
+ decoder: d,
+ Metadata: metadata,
+ ipv4Start: 0,
+ nodeOffsetMult: metadata.RecordSize / 4,
+ }
+
+ reader.setIPv4Start()
+
+ return reader, err
+}
+
+func (r *Reader) setIPv4Start() {
+ if r.Metadata.IPVersion != 6 {
+ return
+ }
+
+ nodeCount := r.Metadata.NodeCount
+
+ node := uint(0)
+ i := 0
+ for ; i < 96 && node < nodeCount; i++ {
+ node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
+ }
+ r.ipv4Start = node
+ r.ipv4StartBitDepth = i
+}
+
+// Lookup retrieves the database record for ip and stores it in the value
+// pointed to by result. If result is nil or not a pointer, an error is
+// returned. If the data in the database record cannot be stored in result
+// because of type differences, an UnmarshalTypeError is returned. If the
+// database is invalid or otherwise cannot be read, an InvalidDatabaseError
+// is returned.
+func (r *Reader) Lookup(ip net.IP, result interface{}) error {
+ if r.buffer == nil {
+ return errors.New("cannot call Lookup on a closed database")
+ }
+ pointer, _, _, err := r.lookupPointer(ip)
+ if pointer == 0 || err != nil {
+ return err
+ }
+ return r.retrieveData(pointer, result)
+}
+
+// LookupNetwork retrieves the database record for ip and stores it in the
+// value pointed to by result. The network returned is the network associated
+// with the data record in the database. The ok return value indicates whether
+// the database contained a record for the ip.
+//
+// If result is nil or not a pointer, an error is returned. If the data in the
+// database record cannot be stored in result because of type differences, an
+// UnmarshalTypeError is returned. If the database is invalid or otherwise
+// cannot be read, an InvalidDatabaseError is returned.
+func (r *Reader) LookupNetwork(
+ ip net.IP,
+ result interface{},
+) (network *net.IPNet, ok bool, err error) {
+ if r.buffer == nil {
+ return nil, false, errors.New("cannot call Lookup on a closed database")
+ }
+ pointer, prefixLength, ip, err := r.lookupPointer(ip)
+
+ network = r.cidr(ip, prefixLength)
+ if pointer == 0 || err != nil {
+ return network, false, err
+ }
+
+ return network, true, r.retrieveData(pointer, result)
+}
+
+// LookupOffset maps an argument net.IP to a corresponding record offset in the
+// database. NotFound is returned if no such record is found, and a record may
+// otherwise be extracted by passing the returned offset to Decode. LookupOffset
+// is an advanced API, which exists to provide clients with a means to cache
+// previously-decoded records.
+func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
+ if r.buffer == nil {
+ return 0, errors.New("cannot call LookupOffset on a closed database")
+ }
+ pointer, _, _, err := r.lookupPointer(ip)
+ if pointer == 0 || err != nil {
+ return NotFound, err
+ }
+ return r.resolveDataPointer(pointer)
+}
+
+func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
+ // This is necessary as the node that the IPv4 start is at may
+ // be at a bit depth that is less that 96, i.e., ipv4Start points
+ // to a leaf node. For instance, if a record was inserted at ::/8,
+ // the ipv4Start would point directly at the leaf node for the
+ // record and would have a bit depth of 8. This would not happen
+ // with databases currently distributed by MaxMind as all of them
+ // have an IPv4 subtree that is greater than a single node.
+ if r.Metadata.IPVersion == 6 &&
+ len(ip) == net.IPv4len &&
+ r.ipv4StartBitDepth != 96 {
+ return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
+ }
+
+ mask := net.CIDRMask(prefixLength, len(ip)*8)
+ return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
+}
+
+// Decode the record at |offset| into |result|. The result value pointed to
+// must be a data value that corresponds to a record in the database. This may
+// include a struct representation of the data, a map capable of holding the
+// data or an empty interface{} value.
+//
+// If result is a pointer to a struct, the struct need not include a field
+// for every value that may be in the database. If a field is not present in
+// the structure, the decoder will not decode that field, reducing the time
+// required to decode the record.
+//
+// As a special case, a struct field of type uintptr will be used to capture
+// the offset of the value. Decode may later be used to extract the stored
+// value from the offset. MaxMind DBs are highly normalized: for example in
+// the City database, all records of the same country will reference a
+// single representative record for that country. This uintptr behavior allows
+// clients to leverage this normalization in their own sub-record caching.
+func (r *Reader) Decode(offset uintptr, result interface{}) error {
+ if r.buffer == nil {
+ return errors.New("cannot call Decode on a closed database")
+ }
+ return r.decode(offset, result)
+}
+
+func (r *Reader) decode(offset uintptr, result interface{}) error {
+ rv := reflect.ValueOf(result)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return errors.New("result param must be a pointer")
+ }
+
+ if dser, ok := result.(deserializer); ok {
+ _, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false)
+ return err
+ }
+
+ _, err := r.decoder.decode(uint(offset), rv, 0)
+ return err
+}
+
+func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
+ if ip == nil {
+ return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil")
+ }
+
+ ipV4Address := ip.To4()
+ if ipV4Address != nil {
+ ip = ipV4Address
+ }
+ if len(ip) == 16 && r.Metadata.IPVersion == 4 {
+ return 0, 0, ip, fmt.Errorf(
+ "error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database",
+ ip.String(),
+ )
+ }
+
+ bitCount := uint(len(ip) * 8)
+
+ var node uint
+ if bitCount == 32 {
+ node = r.ipv4Start
+ }
+ node, prefixLength := r.traverseTree(ip, node, bitCount)
+
+ nodeCount := r.Metadata.NodeCount
+ if node == nodeCount {
+ // Record is empty
+ return 0, prefixLength, ip, nil
+ } else if node > nodeCount {
+ return node, prefixLength, ip, nil
+ }
+
+ return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree")
+}
+
+func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) {
+ nodeCount := r.Metadata.NodeCount
+
+ i := uint(0)
+ for ; i < bitCount && node < nodeCount; i++ {
+ bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
+
+ offset := node * r.nodeOffsetMult
+ if bit == 0 {
+ node = r.nodeReader.readLeft(offset)
+ } else {
+ node = r.nodeReader.readRight(offset)
+ }
+ }
+
+ return node, int(i)
+}
+
+func (r *Reader) retrieveData(pointer uint, result interface{}) error {
+ offset, err := r.resolveDataPointer(pointer)
+ if err != nil {
+ return err
+ }
+ return r.decode(offset, result)
+}
+
+func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
+ resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
+
+ if resolved >= uintptr(len(r.buffer)) {
+ return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
+ }
+ return resolved, nil
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
new file mode 100644
index 0000000..c6385d8
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_appengine.go
@@ -0,0 +1,28 @@
+// +build appengine plan9
+
+package maxminddb
+
+import "io/ioutil"
+
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map,
+// except on Google App Engine where mmap is not supported; there the database
+// is loaded into memory. Use the Close method on the Reader object to return
+// the resources to the system.
+func Open(file string) (*Reader, error) {
+ bytes, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, err
+ }
+
+ return FromBytes(bytes)
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes
+// or Open on Google App Engine, this method sets the underlying buffer
+// to nil, returning the resources to the system.
+func (r *Reader) Close() error {
+ r.buffer = nil
+ return nil
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/reader_other.go b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
new file mode 100644
index 0000000..0ed9de1
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/reader_other.go
@@ -0,0 +1,66 @@
+//go:build !appengine && !plan9
+// +build !appengine,!plan9
+
+package maxminddb
+
+import (
+ "os"
+ "runtime"
+)
+
+// Open takes a string path to a MaxMind DB file and returns a Reader
+// structure or an error. The database file is opened using a memory map,
+// except on Google App Engine where mmap is not supported; there the database
+// is loaded into memory. Use the Close method on the Reader object to return
+// the resources to the system.
+func Open(file string) (*Reader, error) {
+ mapFile, err := os.Open(file)
+ if err != nil {
+ _ = mapFile.Close()
+ return nil, err
+ }
+
+ stats, err := mapFile.Stat()
+ if err != nil {
+ _ = mapFile.Close()
+ return nil, err
+ }
+
+ fileSize := int(stats.Size())
+ mmap, err := mmap(int(mapFile.Fd()), fileSize)
+ if err != nil {
+ _ = mapFile.Close()
+ return nil, err
+ }
+
+ if err := mapFile.Close(); err != nil {
+ //nolint:errcheck // we prefer to return the original error
+ munmap(mmap)
+ return nil, err
+ }
+
+ reader, err := FromBytes(mmap)
+ if err != nil {
+ //nolint:errcheck // we prefer to return the original error
+ munmap(mmap)
+ return nil, err
+ }
+
+ reader.hasMappedFile = true
+ runtime.SetFinalizer(reader, (*Reader).Close)
+ return reader, nil
+}
+
+// Close unmaps the database file from virtual memory and returns the
+// resources to the system. If called on a Reader opened using FromBytes
+// or Open on Google App Engine, this method does nothing.
+func (r *Reader) Close() error {
+ var err error
+ if r.hasMappedFile {
+ runtime.SetFinalizer(r, nil)
+ r.hasMappedFile = false
+ err = munmap(r.buffer)
+ }
+ r.buffer = nil
+ return err
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/traverse.go b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
new file mode 100644
index 0000000..7009ec1
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/traverse.go
@@ -0,0 +1,205 @@
+package maxminddb
+
+import (
+ "fmt"
+ "net"
+)
+
+// Internal structure used to keep track of nodes we still need to visit.
+type netNode struct {
+ ip net.IP
+ bit uint
+ pointer uint
+}
+
+// Networks represents a set of subnets that we are iterating over.
+type Networks struct {
+ reader *Reader
+ nodes []netNode // Nodes we still have to visit.
+ lastNode netNode
+ err error
+
+ skipAliasedNetworks bool
+}
+
+var (
+ allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)}
+ allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)}
+)
+
+// NetworksOption are options for Networks and NetworksWithin.
+type NetworksOption func(*Networks)
+
+// SkipAliasedNetworks is an option for Networks and NetworksWithin that
+// makes them not iterate over aliases of the IPv4 subtree in an IPv6
+// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16.
+//
+// You most likely want to set this. The only reason it isn't the default
+// behavior is to provide backwards compatibility to existing users.
+func SkipAliasedNetworks(networks *Networks) {
+ networks.skipAliasedNetworks = true
+}
+
+// Networks returns an iterator that can be used to traverse all networks in
+// the database.
+//
+// Please note that a MaxMind DB may map IPv4 networks into several locations
+// in an IPv6 database. This iterator will iterate over all of these locations
+// separately. To only iterate over the IPv4 networks once, use the
+// SkipAliasedNetworks option.
+func (r *Reader) Networks(options ...NetworksOption) *Networks {
+ var networks *Networks
+ if r.Metadata.IPVersion == 6 {
+ networks = r.NetworksWithin(allIPv6, options...)
+ } else {
+ networks = r.NetworksWithin(allIPv4, options...)
+ }
+
+ return networks
+}
+
+// NetworksWithin returns an iterator that can be used to traverse all networks
+// in the database which are contained in a given network.
+//
+// Please note that a MaxMind DB may map IPv4 networks into several locations
+// in an IPv6 database. This iterator will iterate over all of these locations
+// separately. To only iterate over the IPv4 networks once, use the
+// SkipAliasedNetworks option.
+//
+// If the provided network is contained within a network in the database, the
+// iterator will iterate over exactly one network, the containing network.
+func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks {
+ if r.Metadata.IPVersion == 4 && network.IP.To4() == nil {
+ return &Networks{
+ err: fmt.Errorf(
+ "error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database",
+ network.String(),
+ ),
+ }
+ }
+
+ networks := &Networks{reader: r}
+ for _, option := range options {
+ option(networks)
+ }
+
+ ip := network.IP
+ prefixLength, _ := network.Mask.Size()
+
+ if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len {
+ if networks.skipAliasedNetworks {
+ ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]}
+ } else {
+ ip = ip.To16()
+ }
+ prefixLength += 96
+ }
+
+ pointer, bit := r.traverseTree(ip, 0, uint(prefixLength))
+ networks.nodes = []netNode{
+ {
+ ip: ip,
+ bit: uint(bit),
+ pointer: pointer,
+ },
+ }
+
+ return networks
+}
+
+// Next prepares the next network for reading with the Network method. It
+// returns true if there is another network to be processed and false if there
+// are no more networks or if there is an error.
+func (n *Networks) Next() bool {
+ if n.err != nil {
+ return false
+ }
+ for len(n.nodes) > 0 {
+ node := n.nodes[len(n.nodes)-1]
+ n.nodes = n.nodes[:len(n.nodes)-1]
+
+ for node.pointer != n.reader.Metadata.NodeCount {
+ // This skips IPv4 aliases without hardcoding the networks that the writer
+ // currently aliases.
+ if n.skipAliasedNetworks && n.reader.ipv4Start != 0 &&
+ node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) {
+ break
+ }
+
+ if node.pointer > n.reader.Metadata.NodeCount {
+ n.lastNode = node
+ return true
+ }
+ ipRight := make(net.IP, len(node.ip))
+ copy(ipRight, node.ip)
+ if len(ipRight) <= int(node.bit>>3) {
+ n.err = newInvalidDatabaseError(
+ "invalid search tree at %v/%v", ipRight, node.bit)
+ return false
+ }
+ ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
+
+ offset := node.pointer * n.reader.nodeOffsetMult
+ rightPointer := n.reader.nodeReader.readRight(offset)
+
+ node.bit++
+ n.nodes = append(n.nodes, netNode{
+ pointer: rightPointer,
+ ip: ipRight,
+ bit: node.bit,
+ })
+
+ node.pointer = n.reader.nodeReader.readLeft(offset)
+ }
+ }
+
+ return false
+}
+
+// Network returns the current network or an error if there is a problem
+// decoding the data for the network. It takes a pointer to a result value to
+// decode the network's data into.
+func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
+ if n.err != nil {
+ return nil, n.err
+ }
+ if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
+ return nil, err
+ }
+
+ ip := n.lastNode.ip
+ prefixLength := int(n.lastNode.bit)
+
+ // We do this because uses of SkipAliasedNetworks expect the IPv4 networks
+ // to be returned as IPv4 networks. If we are not skipping aliased
+ // networks, then the user will get IPv4 networks from the ::FFFF:0:0/96
+ // network as Go automatically converts those.
+ if n.skipAliasedNetworks && isInIPv4Subtree(ip) {
+ ip = ip[12:]
+ prefixLength -= 96
+ }
+
+ return &net.IPNet{
+ IP: ip,
+ Mask: net.CIDRMask(prefixLength, len(ip)*8),
+ }, nil
+}
+
+// Err returns an error, if any, that was encountered during iteration.
+func (n *Networks) Err() error {
+ return n.err
+}
+
+// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's
+// IPv4 subtree.
+func isInIPv4Subtree(ip net.IP) bool {
+ if len(ip) != 16 {
+ return false
+ }
+ for i := 0; i < 12; i++ {
+ if ip[i] != 0 {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/oschwald/maxminddb-golang/verifier.go b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
new file mode 100644
index 0000000..88381d7
--- /dev/null
+++ b/vendor/github.com/oschwald/maxminddb-golang/verifier.go
@@ -0,0 +1,201 @@
+package maxminddb
+
+import (
+ "reflect"
+ "runtime"
+)
+
+type verifier struct {
+ reader *Reader
+}
+
+// Verify checks that the database is valid. It validates the search tree,
+// the data section, and the metadata section. This verifier is stricter than
+// the specification and may return errors on databases that are readable.
+func (r *Reader) Verify() error {
+ v := verifier{r}
+ if err := v.verifyMetadata(); err != nil {
+ return err
+ }
+
+ err := v.verifyDatabase()
+ runtime.KeepAlive(v.reader)
+ return err
+}
+
+func (v *verifier) verifyMetadata() error {
+ metadata := v.reader.Metadata
+
+ if metadata.BinaryFormatMajorVersion != 2 {
+ return testError(
+ "binary_format_major_version",
+ 2,
+ metadata.BinaryFormatMajorVersion,
+ )
+ }
+
+ if metadata.BinaryFormatMinorVersion != 0 {
+ return testError(
+ "binary_format_minor_version",
+ 0,
+ metadata.BinaryFormatMinorVersion,
+ )
+ }
+
+ if metadata.DatabaseType == "" {
+ return testError(
+ "database_type",
+ "non-empty string",
+ metadata.DatabaseType,
+ )
+ }
+
+ if len(metadata.Description) == 0 {
+ return testError(
+ "description",
+ "non-empty slice",
+ metadata.Description,
+ )
+ }
+
+ if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
+ return testError(
+ "ip_version",
+ "4 or 6",
+ metadata.IPVersion,
+ )
+ }
+
+ if metadata.RecordSize != 24 &&
+ metadata.RecordSize != 28 &&
+ metadata.RecordSize != 32 {
+ return testError(
+ "record_size",
+ "24, 28, or 32",
+ metadata.RecordSize,
+ )
+ }
+
+ if metadata.NodeCount == 0 {
+ return testError(
+ "node_count",
+ "positive integer",
+ metadata.NodeCount,
+ )
+ }
+ return nil
+}
+
+func (v *verifier) verifyDatabase() error {
+ offsets, err := v.verifySearchTree()
+ if err != nil {
+ return err
+ }
+
+ if err := v.verifyDataSectionSeparator(); err != nil {
+ return err
+ }
+
+ return v.verifyDataSection(offsets)
+}
+
+func (v *verifier) verifySearchTree() (map[uint]bool, error) {
+ offsets := make(map[uint]bool)
+
+ it := v.reader.Networks()
+ for it.Next() {
+ offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
+ if err != nil {
+ return nil, err
+ }
+ offsets[uint(offset)] = true
+ }
+ if err := it.Err(); err != nil {
+ return nil, err
+ }
+ return offsets, nil
+}
+
+func (v *verifier) verifyDataSectionSeparator() error {
+ separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
+
+ separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
+
+ for _, b := range separator {
+ if b != 0 {
+ return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
+ }
+ }
+ return nil
+}
+
+func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
+ pointerCount := len(offsets)
+
+ decoder := v.reader.decoder
+
+ var offset uint
+ bufferLen := uint(len(decoder.buffer))
+ for offset < bufferLen {
+ var data interface{}
+ rv := reflect.ValueOf(&data)
+ newOffset, err := decoder.decode(offset, rv, 0)
+ if err != nil {
+ return newInvalidDatabaseError(
+ "received decoding error (%v) at offset of %v",
+ err,
+ offset,
+ )
+ }
+ if newOffset <= offset {
+ return newInvalidDatabaseError(
+ "data section offset unexpectedly went from %v to %v",
+ offset,
+ newOffset,
+ )
+ }
+
+ pointer := offset
+
+ if _, ok := offsets[pointer]; !ok {
+ return newInvalidDatabaseError(
+ "found data (%v) at %v that the search tree does not point to",
+ data,
+ pointer,
+ )
+ }
+ delete(offsets, pointer)
+
+ offset = newOffset
+ }
+
+ if offset != bufferLen {
+ return newInvalidDatabaseError(
+ "unexpected data at the end of the data section (last offset: %v, end: %v)",
+ offset,
+ bufferLen,
+ )
+ }
+
+ if len(offsets) != 0 {
+ return newInvalidDatabaseError(
+ "found %v pointers (of %v) in the search tree that we did not see in the data section",
+ len(offsets),
+ pointerCount,
+ )
+ }
+ return nil
+}
+
+func testError(
+ field string,
+ expected interface{},
+ actual interface{},
+) error {
+ return newInvalidDatabaseError(
+ "%v - Expected: %v Actual: %v",
+ field,
+ expected,
+ actual,
+ )
+}
diff --git a/vendor/go.dtapp.net/dorm/const.go b/vendor/go.dtapp.net/dorm/const.go
index af53090..6dd6b81 100644
--- a/vendor/go.dtapp.net/dorm/const.go
+++ b/vendor/go.dtapp.net/dorm/const.go
@@ -1,3 +1,3 @@
package dorm
-const Version = "1.0.33"
+const Version = "1.0.36"
diff --git a/vendor/go.dtapp.net/dorm/gorm_postgresql.go b/vendor/go.dtapp.net/dorm/gorm_postgresql.go
index 2fd10b4..a15149a 100644
--- a/vendor/go.dtapp.net/dorm/gorm_postgresql.go
+++ b/vendor/go.dtapp.net/dorm/gorm_postgresql.go
@@ -160,7 +160,7 @@ func NewGormPostgresqlClient(config *ConfigGormClient) (*GormClient, error) {
// 设置了连接可复用的最大时间
if c.config.ConnSetConnMaxLifetime == 0 {
- sqlDB.SetConnMaxLifetime(time.Second * 600)
+ sqlDB.SetConnMaxLifetime(time.Hour)
} else {
sqlDB.SetConnMaxLifetime(time.Duration(c.config.ConnSetConnMaxLifetime))
}
diff --git a/vendor/go.dtapp.net/dorm/mongo.go b/vendor/go.dtapp.net/dorm/mongo.go
index af8003f..1b33b36 100644
--- a/vendor/go.dtapp.net/dorm/mongo.go
+++ b/vendor/go.dtapp.net/dorm/mongo.go
@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
- "go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
@@ -16,36 +15,31 @@ type ConfigMongoClient struct {
}
type MongoClient struct {
- Db *mongo.Client // 驱动
- config *ConfigMongoClient // 配置
- databaseName string // 库名
- collectionName string // 表名
- //filterArr []queryFilter // 查询条件数组
- filter bson.D // 查询条件
+ Db *mongo.Client // 驱动
+ config *ConfigMongoClient // 配置
}
func NewMongoClient(config *ConfigMongoClient) (*MongoClient, error) {
+ var ctx = context.Background()
var err error
c := &MongoClient{config: config}
- c.databaseName = c.config.DatabaseName
-
// 连接到MongoDB
if c.config.Dns != "" {
- c.Db, err = mongo.Connect(context.Background(), options.Client().ApplyURI(c.config.Dns))
+ c.Db, err = mongo.Connect(ctx, options.Client().ApplyURI(c.config.Dns))
if err != nil {
return nil, errors.New(fmt.Sprintf("连接失败:%v", err))
}
} else {
- c.Db, err = mongo.Connect(context.Background(), c.config.Opts)
+ c.Db, err = mongo.Connect(ctx, c.config.Opts)
if err != nil {
return nil, errors.New(fmt.Sprintf("连接失败:%v", err))
}
}
// 检查连接
- err = c.Db.Ping(context.TODO(), nil)
+ err = c.Db.Ping(ctx, nil)
if err != nil {
return nil, errors.New(fmt.Sprintf("检查连接失败:%v", err))
}
@@ -54,10 +48,6 @@ func NewMongoClient(config *ConfigMongoClient) (*MongoClient, error) {
}
// Close 关闭
-func (c *MongoClient) Close() error {
- err := c.Db.Disconnect(context.TODO())
- if err != nil {
- return errors.New(fmt.Sprintf("关闭失败:%v", err))
- }
- return nil
+func (c *MongoClient) Close(ctx context.Context) error {
+ return c.Db.Disconnect(ctx)
}
diff --git a/vendor/go.dtapp.net/dorm/mongo_bson.go b/vendor/go.dtapp.net/dorm/mongo_bson.go
deleted file mode 100644
index c382773..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_bson.go
+++ /dev/null
@@ -1 +0,0 @@
-package dorm
diff --git a/vendor/go.dtapp.net/dorm/mongo_collection.go b/vendor/go.dtapp.net/dorm/mongo_collection.go
new file mode 100644
index 0000000..a84bfb6
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_collection.go
@@ -0,0 +1,56 @@
+package dorm
+
+import (
+ "context"
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+type MongoCollectionOptions struct {
+ dbCollection *mongo.Collection // 集合
+}
+
+// Collection 选择集合
+func (cd *MongoDatabaseOptions) Collection(name string, opts ...*options.CollectionOptions) *MongoCollectionOptions {
+ return &MongoCollectionOptions{
+ dbCollection: cd.dbDatabase.Collection(name, opts...),
+ }
+}
+
+// CreateOneIndexes 创建一个索引
+func (cc *MongoCollectionOptions) CreateOneIndexes(ctx context.Context, key string, value string) (string, error) {
+ return cc.dbCollection.Indexes().CreateOne(ctx, mongo.IndexModel{
+ Keys: bson.D{{
+ Key: key,
+ Value: value,
+ }},
+ })
+}
+
+// CreateOneUniqueIndexes 创建一个唯一索引
+func (cc *MongoCollectionOptions) CreateOneUniqueIndexes(ctx context.Context, key string, value string) (string, error) {
+ return cc.dbCollection.Indexes().CreateOne(ctx, mongo.IndexModel{
+ Keys: bson.D{{
+ Key: key,
+ Value: value,
+ }},
+ Options: options.Index().SetUnique(true),
+ })
+}
+
+// CreateOneUniqueIndexesOpts 创建一个索引
+func (cc *MongoCollectionOptions) CreateOneUniqueIndexesOpts(ctx context.Context, key string, value string, opts *options.IndexOptions) (string, error) {
+ return cc.dbCollection.Indexes().CreateOne(ctx, mongo.IndexModel{
+ Keys: bson.D{{
+ Key: key,
+ Value: value,
+ }},
+ Options: opts,
+ })
+}
+
+// CreateManyIndexes 创建多个索引
+func (cc *MongoCollectionOptions) CreateManyIndexes(ctx context.Context, models []mongo.IndexModel) ([]string, error) {
+ return cc.dbCollection.Indexes().CreateMany(ctx, models)
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_collection_curd.go b/vendor/go.dtapp.net/dorm/mongo_collection_curd.go
new file mode 100644
index 0000000..3309494
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_collection_curd.go
@@ -0,0 +1,47 @@
+package dorm
+
+import (
+ "context"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+// InsertOne 插入一个文档
+func (cc *MongoCollectionOptions) InsertOne(ctx context.Context, document interface{}, opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error) {
+ return cc.dbCollection.InsertOne(ctx, document, opts...)
+}
+
+// InsertMany 插入多个文档
+func (cc *MongoCollectionOptions) InsertMany(ctx context.Context, document []interface{}, opts ...*options.InsertManyOptions) (*mongo.InsertManyResult, error) {
+ return cc.dbCollection.InsertMany(ctx, document, opts...)
+}
+
+// DeleteOne 删除一个文档
+func (cc *MongoCollectionOptions) DeleteOne(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*mongo.DeleteResult, error) {
+ return cc.dbCollection.DeleteOne(ctx, filter, opts...)
+}
+
+// DeleteMany 删除多个文档
+func (cc *MongoCollectionOptions) DeleteMany(ctx context.Context, filter interface{}, opts ...*options.DeleteOptions) (*mongo.DeleteResult, error) {
+ return cc.dbCollection.DeleteMany(ctx, filter, opts...)
+}
+
+// UpdateOne 更新一个文档
+func (cc *MongoCollectionOptions) UpdateOne(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*mongo.UpdateResult, error) {
+ return cc.dbCollection.UpdateOne(ctx, filter, update, opts...)
+}
+
+// UpdateMany 更新多个文档
+func (cc *MongoCollectionOptions) UpdateMany(ctx context.Context, filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*mongo.UpdateResult, error) {
+ return cc.dbCollection.UpdateMany(ctx, filter, update, opts...)
+}
+
+// FindOne 查询一个文档
+func (cc *MongoCollectionOptions) FindOne(ctx context.Context, filter interface{}, opts ...*options.FindOneOptions) *mongo.SingleResult {
+ return cc.dbCollection.FindOne(ctx, filter, opts...)
+}
+
+// Find 查询多个文档
+func (cc *MongoCollectionOptions) Find(ctx context.Context, filter interface{}, opts ...*options.FindOptions) (*mongo.Cursor, error) {
+ return cc.dbCollection.Find(ctx, filter, opts...)
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_curd.go b/vendor/go.dtapp.net/dorm/mongo_curd.go
deleted file mode 100644
index 80a4908..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_curd.go
+++ /dev/null
@@ -1,185 +0,0 @@
-package dorm
-
-import (
- "context"
- "go.mongodb.org/mongo-driver/bson"
- "go.mongodb.org/mongo-driver/mongo"
- "go.mongodb.org/mongo-driver/mongo/options"
-)
-
-// InsertOne 插入一个文档
-func (c *MongoClient) InsertOne(document interface{}) (result *mongo.InsertOneResult, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- result, err = collection.InsertOne(context.TODO(), document)
- return
-}
-
-// InsertMany 插入多个文档
-func (c *MongoClient) InsertMany(documents []interface{}) (result *mongo.InsertManyResult, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- result, err = collection.InsertMany(context.TODO(), documents)
- return
-}
-
-// Delete 删除文档
-func (c *MongoClient) Delete(filter interface{}) (err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- _, err = collection.DeleteOne(context.TODO(), filter)
- return
-}
-
-// DeleteId 删除文档
-func (c *MongoClient) DeleteId(id interface{}) (err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- _, err = collection.DeleteOne(context.TODO(), bson.M{"_id": id})
- return
-}
-
-// DeleteMany 删除多个文档
-func (c *MongoClient) DeleteMany(filter interface{}) (result *mongo.DeleteResult, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- result, err = collection.DeleteMany(context.TODO(), filter)
- return
-}
-
-// UpdateOne 更新单个文档
-// 修改字段的值($set)
-// 字段增加值 inc($inc)
-// 从数组中增加一个元素 push($push)
-// 从数组中删除一个元素 pull($pull)
-func (c *MongoClient) UpdateOne(filter interface{}, update interface{}) (err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- _, err = collection.UpdateOne(context.TODO(), filter, update)
- return
-}
-
-// UpdateId 更新单个文档
-// 修改字段的值($set)
-// 字段增加值 inc($inc)
-// 从数组中增加一个元素 push($push)
-// 从数组中删除一个元素 pull($pull)
-func (c *MongoClient) UpdateId(id interface{}, update interface{}) (err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- _, err = collection.UpdateOne(context.TODO(), bson.M{"_id": id}, update)
- return
-}
-
-// UpdateMany 更新多个文档
-// 修改字段的值($set)
-// 字段增加值 inc($inc)
-// 从数组中增加一个元素 push($push)
-// 从数组中删除一个元素 pull($pull)
-func (c *MongoClient) UpdateMany(filter interface{}, update interface{}) (result *mongo.UpdateResult, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- result, err = collection.UpdateMany(context.TODO(), filter, update)
- return
-}
-
-type FindResultI interface {
- Many(result interface{}) error
-}
-
-// Find 查询
-func (c *MongoClient) Find(filter interface{}) (*mongo.Cursor, error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- return collection.Find(context.TODO(), filter)
-}
-
-type FindOneResultI interface {
- One(result interface{}) error
-}
-
-// FindOne 查询单个文档
-func (c *MongoClient) FindOne(filter interface{}) *mongo.SingleResult {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- return collection.FindOne(context.TODO(), filter)
-}
-
-type FindManyResultI interface {
- Many(result interface{}) error
-}
-
-// FindMany 查询多个文档
-func (c *MongoClient) FindMany(filter interface{}) (*mongo.Cursor, error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- return collection.Find(context.TODO(), filter)
-}
-
-// FindManyByFilters 多条件查询
-func (c *MongoClient) FindManyByFilters(filter interface{}) (result *mongo.Cursor, err error) {
- collection, err := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName).Clone()
- result, err = collection.Find(context.TODO(), bson.M{"$and": filter})
- return result, err
-}
-
-// FindManyByFiltersSort 多条件查询支持排序
-func (c *MongoClient) FindManyByFiltersSort(filter interface{}, Sort interface{}) (result *mongo.Cursor, err error) {
- collection, err := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName).Clone()
- findOptions := options.Find()
- findOptions.SetSort(Sort)
- result, err = collection.Find(context.TODO(), filter, findOptions)
- return result, err
-}
-
-// FindCollection 查询集合文档
-func (c *MongoClient) FindCollection(Limit int64) (result *mongo.Cursor, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- findOptions := options.Find()
- findOptions.SetLimit(Limit)
- result, err = collection.Find(context.TODO(), bson.D{{}}, findOptions)
- return result, err
-}
-
-// FindCollectionSort 查询集合文档支持排序
-func (c *MongoClient) FindCollectionSort(Sort interface{}, Limit int64) (result *mongo.Cursor, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- findOptions := options.Find()
- findOptions.SetSort(Sort)
- findOptions.SetLimit(Limit)
- result, err = collection.Find(context.TODO(), bson.D{{}}, findOptions)
- return result, err
-}
-
-// FindManyCollectionSort 查询集合文档支持排序支持条件
-func (c *MongoClient) FindManyCollectionSort(filter interface{}, Sort interface{}) (result *mongo.Cursor, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- findOptions := options.Find()
- findOptions.SetSort(Sort)
- result, err = collection.Find(context.TODO(), filter, findOptions)
- return result, err
-}
-
-// CollectionCount 查询集合里有多少数据
-func (c *MongoClient) CollectionCount() (name string, size int64) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- name = collection.Name()
- size, _ = collection.EstimatedDocumentCount(context.TODO())
- return name, size
-}
-
-// CollectionDocuments 按选项查询集合
-// Skip 跳过
-// Limit 读取数量
-// sort 1 ,-1 . 1 为升序 , -1 为降序
-func (c *MongoClient) CollectionDocuments(Skip, Limit int64, sort int, key string, value interface{}) (result *mongo.Cursor, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- SORT := bson.D{{"_id", sort}}
- filter := bson.D{{key, value}}
- findOptions := options.Find().SetSort(SORT).SetLimit(Limit).SetSkip(Skip)
- result, err = collection.Find(context.TODO(), filter, findOptions)
- return result, err
-}
-
-// AggregateByFiltersSort 统计分析
-func (c *MongoClient) AggregateByFiltersSort(pipeline interface{}) (result *mongo.Cursor, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- result, err = collection.Aggregate(context.TODO(), pipeline)
- return result, err
-}
-
-// CountDocumentsByFilters 统计数量
-func (c *MongoClient) CountDocumentsByFilters(filter interface{}) (count int64, err error) {
- collection := c.Db.Database(c.getDatabaseName()).Collection(c.collectionName)
- count, err = collection.CountDocuments(context.TODO(), filter)
- return count, err
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_curl_find.go b/vendor/go.dtapp.net/dorm/mongo_curl_find.go
deleted file mode 100644
index 3219492..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_curl_find.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package dorm
-
-import (
- "context"
- "go.mongodb.org/mongo-driver/mongo"
-)
-
-type FindResult struct {
- cursor *mongo.Cursor
- err error
-}
-
-func (f *FindResult) Many(result interface{}) error {
- return f.cursor.All(context.TODO(), result)
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_curl_find_many.go b/vendor/go.dtapp.net/dorm/mongo_curl_find_many.go
deleted file mode 100644
index 192edf6..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_curl_find_many.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package dorm
-
-import (
- "context"
- "go.mongodb.org/mongo-driver/mongo"
-)
-
-type FindManyResult struct {
- cursor *mongo.Cursor
- err error
-}
-
-func (f *FindManyResult) Many(result interface{}) error {
- return f.cursor.All(context.TODO(), result)
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_curl_find_one.go b/vendor/go.dtapp.net/dorm/mongo_curl_find_one.go
deleted file mode 100644
index 4ef3666..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_curl_find_one.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package dorm
-
-import "go.mongodb.org/mongo-driver/mongo"
-
-type FindOneResult struct {
- singleResult *mongo.SingleResult
-}
-
-func (f *FindOneResult) One(result interface{}) error {
- return f.singleResult.Decode(result)
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_database.go b/vendor/go.dtapp.net/dorm/mongo_database.go
new file mode 100644
index 0000000..75749f4
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_database.go
@@ -0,0 +1,28 @@
+package dorm
+
+import (
+ "context"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+type MongoDatabaseOptions struct {
+ dbDatabase *mongo.Database // 数据库
+}
+
+// Database 选择数据库
+func (c *MongoClient) Database(name string, opts ...*options.DatabaseOptions) *MongoDatabaseOptions {
+ return &MongoDatabaseOptions{
+ dbDatabase: c.Db.Database(name, opts...),
+ }
+}
+
+// CreateCollection 创建集合
+func (cd *MongoDatabaseOptions) CreateCollection(ctx context.Context, name string, opts ...*options.CreateCollectionOptions) error {
+ return cd.dbDatabase.CreateCollection(ctx, name, opts...)
+}
+
+// CreateTimeSeriesCollection 创建时间序列集合
+func (cd *MongoDatabaseOptions) CreateTimeSeriesCollection(ctx context.Context, name string, timeField string) error {
+ return cd.dbDatabase.CreateCollection(ctx, name, options.CreateCollection().SetTimeSeriesOptions(options.TimeSeries().SetTimeField(timeField)))
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_get.go b/vendor/go.dtapp.net/dorm/mongo_get.go
index 8ccd7f6..eaa58b5 100644
--- a/vendor/go.dtapp.net/dorm/mongo_get.go
+++ b/vendor/go.dtapp.net/dorm/mongo_get.go
@@ -6,13 +6,3 @@ import "go.mongodb.org/mongo-driver/mongo"
func (c *MongoClient) GetDb() *mongo.Client {
return c.Db
}
-
-// 获取库名
-func (c *MongoClient) getDatabaseName() string {
- return c.databaseName
-}
-
-// 获取表名
-func (c *MongoClient) getCollectionName() string {
- return c.collectionName
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_options.go b/vendor/go.dtapp.net/dorm/mongo_options.go
deleted file mode 100644
index c382773..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_options.go
+++ /dev/null
@@ -1 +0,0 @@
-package dorm
diff --git a/vendor/go.dtapp.net/dorm/mongo_session.go b/vendor/go.dtapp.net/dorm/mongo_session.go
new file mode 100644
index 0000000..b66ded2
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_session.go
@@ -0,0 +1,50 @@
+package dorm
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "go.mongodb.org/mongo-driver/mongo"
+)
+
+type MongoSessionOptions struct {
+ Session mongo.SessionContext // 会话
+ Db *mongo.Client // 驱动
+ startSession mongo.Session // 开始会话
+}
+
+// Begin 开始事务,会同时创建开始会话需要在退出时关闭会话
+func (c *MongoClient) Begin() (ms *MongoSessionOptions, err error) {
+
+ var ctx = context.Background()
+
+ ms.Db = c.Db
+
+ // 开始会话
+ ms.startSession, err = ms.Db.StartSession()
+ if err != nil {
+ return nil, errors.New(fmt.Sprintf("开始会话失败:%v", err))
+ }
+
+ // 会话上下文
+ ms.Session = mongo.NewSessionContext(ctx, ms.startSession)
+
+ // 会话开启事务
+ err = ms.startSession.StartTransaction()
+ return ms, err
+}
+
+// Close 关闭会话
+func (cs *MongoSessionOptions) Close(ctx context.Context) {
+ cs.startSession.EndSession(ctx)
+}
+
+// Rollback 回滚事务
+func (cs *MongoSessionOptions) Rollback(ctx context.Context) error {
+ return cs.startSession.AbortTransaction(ctx)
+}
+
+// Commit 提交事务
+func (cs *MongoSessionOptions) Commit(ctx context.Context) error {
+ return cs.startSession.CommitTransaction(ctx)
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_session_collection.go b/vendor/go.dtapp.net/dorm/mongo_session_collection.go
new file mode 100644
index 0000000..31e3905
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_session_collection.go
@@ -0,0 +1,19 @@
+package dorm
+
+import (
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+type MongoSessionCollectionOptions struct {
+ session mongo.SessionContext // 会话
+ dbCollection *mongo.Collection // 集合
+}
+
+// Collection 选择集合
+func (csd *MongoSessionDatabaseOptions) Collection(name string, opts ...*options.CollectionOptions) *MongoSessionCollectionOptions {
+ return &MongoSessionCollectionOptions{
+ session: csd.session, // 会话
+ dbCollection: csd.dbDatabase.Collection(name, opts...), // 集合
+ }
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_session_collection_curd.go b/vendor/go.dtapp.net/dorm/mongo_session_collection_curd.go
new file mode 100644
index 0000000..7bca5a7
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_session_collection_curd.go
@@ -0,0 +1,46 @@
+package dorm
+
+import (
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+// InsertOne 插入一个文档
+func (csc *MongoSessionCollectionOptions) InsertOne(document interface{}, opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error) {
+ return csc.dbCollection.InsertOne(csc.session, document, opts...)
+}
+
+// InsertMany 插入多个文档
+func (csc *MongoSessionCollectionOptions) InsertMany(document []interface{}, opts ...*options.InsertManyOptions) (*mongo.InsertManyResult, error) {
+ return csc.dbCollection.InsertMany(csc.session, document, opts...)
+}
+
+// DeleteOne 删除一个文档
+func (csc *MongoSessionCollectionOptions) DeleteOne(filter interface{}, opts ...*options.DeleteOptions) (*mongo.DeleteResult, error) {
+ return csc.dbCollection.DeleteOne(csc.session, filter, opts...)
+}
+
+// DeleteMany 删除多个文档
+func (csc *MongoSessionCollectionOptions) DeleteMany(filter interface{}, opts ...*options.DeleteOptions) (*mongo.DeleteResult, error) {
+ return csc.dbCollection.DeleteMany(csc.session, filter, opts...)
+}
+
+// UpdateOne 更新一个文档
+func (csc *MongoSessionCollectionOptions) UpdateOne(filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*mongo.UpdateResult, error) {
+ return csc.dbCollection.UpdateOne(csc.session, filter, update, opts...)
+}
+
+// UpdateMany 更新多个文档
+func (csc *MongoSessionCollectionOptions) UpdateMany(filter interface{}, update interface{}, opts ...*options.UpdateOptions) (*mongo.UpdateResult, error) {
+ return csc.dbCollection.UpdateMany(csc.session, filter, update, opts...)
+}
+
+// FindOne 查询一个文档
+func (csc *MongoSessionCollectionOptions) FindOne(filter interface{}, opts ...*options.FindOneOptions) *mongo.SingleResult {
+ return csc.dbCollection.FindOne(csc.session, filter, opts...)
+}
+
+// Find 查询多个文档
+func (csc *MongoSessionCollectionOptions) Find(filter interface{}, opts ...*options.FindOptions) (*mongo.Cursor, error) {
+ return csc.dbCollection.Find(csc.session, filter, opts...)
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_session_database.go b/vendor/go.dtapp.net/dorm/mongo_session_database.go
new file mode 100644
index 0000000..1d2952c
--- /dev/null
+++ b/vendor/go.dtapp.net/dorm/mongo_session_database.go
@@ -0,0 +1,19 @@
+package dorm
+
+import (
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+type MongoSessionDatabaseOptions struct {
+ session mongo.SessionContext // 会话
+ dbDatabase *mongo.Database // 数据库
+}
+
+// Database 选择数据库
+func (cs *MongoSessionOptions) Database(name string, opts ...*options.DatabaseOptions) *MongoSessionDatabaseOptions {
+ return &MongoSessionDatabaseOptions{
+ session: cs.Session, // 会话
+ dbDatabase: cs.Db.Database(name, opts...), // 数据库
+ }
+}
diff --git a/vendor/go.dtapp.net/dorm/mongo_set.go b/vendor/go.dtapp.net/dorm/mongo_set.go
deleted file mode 100644
index 818b203..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_set.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package dorm
-
-import "reflect"
-
-// Database 设置库名
-func (c *MongoClient) Database(databaseName string) *MongoClient {
- c.databaseName = databaseName
- return c
-}
-
-// Collection 设置表名
-func (c *MongoClient) Collection(collectionName string) *MongoClient {
- c.collectionName = collectionName
- return c
-}
-
-// Model 传入模型自动获取库名和表名
-func (c *MongoClient) Model(value interface{}) *MongoClient {
- // https://studygolang.com/articles/896
- val := reflect.ValueOf(value)
- if methodValue := val.MethodByName("Database"); methodValue.IsValid() {
- c.databaseName = methodValue.Call(nil)[0].String()
- }
- if methodValue := val.MethodByName("TableName"); methodValue.IsValid() {
- c.collectionName = methodValue.Call(nil)[0].String()
- }
- return c
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_time.go b/vendor/go.dtapp.net/dorm/mongo_time.go
index b51646c..6331c35 100644
--- a/vendor/go.dtapp.net/dorm/mongo_time.go
+++ b/vendor/go.dtapp.net/dorm/mongo_time.go
@@ -7,44 +7,79 @@ import (
"time"
)
-// BsonTime 类型
+// BsonTime 时间类型
type BsonTime time.Time
-// Value 时间类型
-func (t BsonTime) Value() string {
- return gotime.SetCurrent(time.Time(t)).Bson()
-}
-
// MarshalJSON 实现json序列化
-func (t BsonTime) MarshalJSON() ([]byte, error) {
- //log.Println("MarshalJSON")
+func (bt BsonTime) MarshalJSON() ([]byte, error) {
+
b := make([]byte, 0)
- b = append(b, gotime.SetCurrent(time.Time(t)).Bson()...)
+
+ b = append(b, gotime.SetCurrent(time.Time(bt)).Bson()...)
+
return b, nil
}
// UnmarshalJSON 实现json反序列化
-func (t *BsonTime) UnmarshalJSON(data []byte) (err error) {
- //log.Println("UnmarshalJSON")
- t1 := gotime.SetCurrentParse(string(data))
- *t = BsonTime(t1.Time)
- return
+func (bt *BsonTime) UnmarshalJSON(data []byte) (err error) {
+
+ if string(data) == "null" {
+ return nil
+ }
+
+ bsonTime := gotime.SetCurrentParse(string(data))
+
+ *bt = BsonTime(bsonTime.Time)
+
+ return nil
+}
+
+func (bt BsonTime) Time() time.Time {
+ return gotime.SetCurrent(time.Time(bt)).Time
+}
+
+func (bt BsonTime) Format() string {
+ return gotime.SetCurrent(time.Time(bt)).Format()
+}
+
+func (bt BsonTime) TimePro() gotime.Pro {
+ return gotime.SetCurrent(time.Time(bt))
+}
+
+// NewBsonTimeCurrent 创建当前时间
+func NewBsonTimeCurrent() BsonTime {
+ return BsonTime(gotime.Current().Time)
+}
+
+// NewBsonTimeFromTime 创建某个时间
+func NewBsonTimeFromTime(t time.Time) BsonTime {
+ return BsonTime(t)
+}
+
+// NewBsonTimeFromString 创建某个时间 字符串
+func NewBsonTimeFromString(t string) BsonTime {
+ return BsonTime(gotime.SetCurrentParse(t).Time)
+}
+
+// Value 时间类型
+func (bt BsonTime) Value() string {
+ return gotime.SetCurrent(time.Time(bt)).Bson()
}
// MarshalBSONValue 实现bson序列化
-func (t BsonTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
+func (bt BsonTime) MarshalBSONValue() (bsontype.Type, []byte, error) {
//log.Println("MarshalBSONValue")
- targetTime := gotime.SetCurrent(time.Time(t)).Bson()
+ targetTime := gotime.SetCurrent(time.Time(bt)).Bson()
return bson.MarshalValue(targetTime)
}
// UnmarshalBSONValue 实现bson反序列化
-func (t *BsonTime) UnmarshalBSONValue(t2 bsontype.Type, data []byte) error {
+func (bt *BsonTime) UnmarshalBSONValue(t2 bsontype.Type, data []byte) error {
//log.Println("UnmarshalBSONValue")
t1 := gotime.SetCurrentParse(string(data))
//if string(data) == "" {
// return errors.New(fmt.Sprintf("%s, %s, %s", "读取数据失败:", t2, data))
//}
- *t = BsonTime(t1.Time)
+ *bt = BsonTime(t1.Time)
return nil
}
diff --git a/vendor/go.dtapp.net/dorm/mongo_transaction.go b/vendor/go.dtapp.net/dorm/mongo_transaction.go
deleted file mode 100644
index bec0430..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_transaction.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package dorm
-
-import (
- "context"
- "go.mongodb.org/mongo-driver/mongo"
-)
-
-type MongoTransaction struct {
- startSession mongo.Session
- Session mongo.SessionContext
- db *mongo.Client // 驱动
- databaseName string // 库名
- collectionName string // 表名
-}
-
-// Begin 开始事务,会同时创建开始会话需要在退出时关闭会话
-func (c *MongoClient) Begin() (ms MongoTransaction, err error) {
-
- ms.db = c.Db
-
- // 开始会话
- ms.startSession, err = ms.db.StartSession()
- if err != nil {
- panic(err)
- }
-
- // 会话上下文
- ms.Session = mongo.NewSessionContext(context.Background(), ms.startSession)
-
- // 会话开启事务
- err = ms.startSession.StartTransaction()
- return ms, err
-}
-
-// Close 关闭会话
-func (ms *MongoTransaction) Close() {
- ms.startSession.EndSession(context.TODO())
-}
-
-// Rollback 回滚事务
-func (ms *MongoTransaction) Rollback() error {
- return ms.startSession.AbortTransaction(context.Background())
-}
-
-// Commit 提交事务
-func (ms *MongoTransaction) Commit() error {
- return ms.startSession.CommitTransaction(context.Background())
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_transaction_curd.go b/vendor/go.dtapp.net/dorm/mongo_transaction_curd.go
deleted file mode 100644
index e7cfd81..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_transaction_curd.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package dorm
-
-import (
- "context"
- "go.mongodb.org/mongo-driver/bson"
- "go.mongodb.org/mongo-driver/mongo"
- "go.mongodb.org/mongo-driver/mongo/options"
-)
-
-// InsertOne 插入一个文档
-func (ms *MongoTransaction) InsertOne(document interface{}) (result *mongo.InsertOneResult, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- result, err = collection.InsertOne(ms.Session, document)
- return
-}
-
-// InsertMany 插入多个文档
-func (ms *MongoTransaction) InsertMany(documents []interface{}) (result *mongo.InsertManyResult, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- result, err = collection.InsertMany(ms.Session, documents)
- return
-}
-
-// Delete 删除文档
-func (ms *MongoTransaction) Delete(filter interface{}) (err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- _, err = collection.DeleteOne(ms.Session, filter)
- return
-}
-
-// DeleteId 删除文档
-func (ms *MongoTransaction) DeleteId(id interface{}) (err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- _, err = collection.DeleteOne(ms.Session, bson.M{"_id": id})
- return
-}
-
-// DeleteMany 删除多个文档
-func (ms *MongoTransaction) DeleteMany(key string, value interface{}) (result *mongo.DeleteResult, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- filter := bson.D{{key, value}}
- result, err = collection.DeleteMany(ms.Session, filter)
- return
-}
-
-// UpdateOne 更新单个文档
-// 修改字段的值($set)
-// 字段增加值 inc($inc)
-// 从数组中增加一个元素 push($push)
-// 从数组中删除一个元素 pull($pull)
-func (ms *MongoTransaction) UpdateOne(filter interface{}, update interface{}) (err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- _, err = collection.UpdateOne(ms.Session, filter, update)
- return
-}
-
-// UpdateId 更新单个文档
-// 修改字段的值($set)
-// 字段增加值 inc($inc)
-// 从数组中增加一个元素 push($push)
-// 从数组中删除一个元素 pull($pull)
-func (ms *MongoTransaction) UpdateId(id interface{}, update interface{}) (err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- _, err = collection.UpdateOne(context.TODO(), bson.M{"_id": id}, update)
- return
-}
-
-// UpdateMany 更新多个文档
-// 修改字段的值($set)
-// 字段增加值 inc($inc)
-// 从数组中增加一个元素 push($push)
-// 从数组中删除一个元素 pull($pull)
-func (ms *MongoTransaction) UpdateMany(filter interface{}, update interface{}) (result *mongo.UpdateResult, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- result, err = collection.UpdateMany(ms.Session, filter, update)
- return
-}
-
-// Find 查询
-func (ms *MongoTransaction) Find(filter interface{}) (*mongo.Cursor, error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- return collection.Find(ms.Session, filter)
-}
-
-// FindOne 查询单个文档
-func (ms *MongoTransaction) FindOne(filter interface{}) *mongo.SingleResult {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- return collection.FindOne(ms.Session, filter)
-}
-
-// FindMany 查询多个文档
-func (ms *MongoTransaction) FindMany(filter interface{}) (*mongo.Cursor, error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- return collection.Find(ms.Session, filter)
-}
-
-// FindManyByFilters 多条件查询
-func (ms *MongoTransaction) FindManyByFilters(filter interface{}) (result *mongo.Cursor, err error) {
- collection, err := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName).Clone()
- result, err = collection.Find(ms.Session, bson.M{"$and": filter})
- return result, err
-}
-
-// FindManyByFiltersSort 多条件查询支持排序
-func (ms *MongoTransaction) FindManyByFiltersSort(filter interface{}, Sort interface{}) (result *mongo.Cursor, err error) {
- collection, err := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName).Clone()
- findOptions := options.Find()
- findOptions.SetSort(Sort)
- result, err = collection.Find(ms.Session, filter, findOptions)
- return result, err
-}
-
-// FindCollection 查询集合文档
-func (ms *MongoTransaction) FindCollection(Limit int64) (result *mongo.Cursor, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- findOptions := options.Find()
- findOptions.SetLimit(Limit)
- result, err = collection.Find(ms.Session, bson.D{{}}, findOptions)
- return result, err
-}
-
-// FindCollectionSort 查询集合文档支持排序
-func (ms *MongoTransaction) FindCollectionSort(Sort interface{}, Limit int64) (result *mongo.Cursor, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- findOptions := options.Find()
- findOptions.SetSort(Sort)
- findOptions.SetLimit(Limit)
- result, err = collection.Find(ms.Session, bson.D{{}}, findOptions)
- return result, err
-}
-
-// FindManyCollectionSort 查询集合文档支持排序支持条件
-func (ms *MongoTransaction) FindManyCollectionSort(filter interface{}, Sort interface{}) (result *mongo.Cursor, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- findOptions := options.Find()
- findOptions.SetSort(Sort)
- result, err = collection.Find(ms.Session, filter, findOptions)
- return result, err
-}
-
-// CollectionCount 查询集合里有多少数据
-func (ms *MongoTransaction) CollectionCount(ctx context.Context) (name string, size int64) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- name = collection.Name()
- size, _ = collection.EstimatedDocumentCount(ctx)
- return name, size
-}
-
-// CollectionDocuments 按选项查询集合
-// Skip 跳过
-// Limit 读取数量
-// sort 1 ,-1 . 1 为升序 , -1 为降序
-func (ms *MongoTransaction) CollectionDocuments(Skip, Limit int64, sort int, key string, value interface{}) (result *mongo.Cursor, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- SORT := bson.D{{"_id", sort}}
- filter := bson.D{{key, value}}
- findOptions := options.Find().SetSort(SORT).SetLimit(Limit).SetSkip(Skip)
- result, err = collection.Find(ms.Session, filter, findOptions)
- return result, err
-}
-
-// AggregateByFiltersSort 统计分析
-func (ms *MongoTransaction) AggregateByFiltersSort(pipeline interface{}) (result *mongo.Cursor, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- result, err = collection.Aggregate(ms.Session, pipeline)
- return result, err
-}
-
-// CountDocumentsByFilters 统计数量
-func (ms *MongoTransaction) CountDocumentsByFilters(filter interface{}) (count int64, err error) {
- collection := ms.db.Database(ms.getDatabaseName()).Collection(ms.collectionName)
- count, err = collection.CountDocuments(ms.Session, filter)
- return count, err
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_transaction_get.go b/vendor/go.dtapp.net/dorm/mongo_transaction_get.go
deleted file mode 100644
index 3926370..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_transaction_get.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package dorm
-
-// 获取库名
-func (ms *MongoTransaction) getDatabaseName() string {
- return ms.databaseName
-}
-
-// 获取表名
-func (ms *MongoTransaction) getCollectionName() string {
- return ms.collectionName
-}
diff --git a/vendor/go.dtapp.net/dorm/mongo_transaction_set.go b/vendor/go.dtapp.net/dorm/mongo_transaction_set.go
deleted file mode 100644
index e36daa6..0000000
--- a/vendor/go.dtapp.net/dorm/mongo_transaction_set.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package dorm
-
-import "reflect"
-
-// Database 设置库名
-func (ms *MongoTransaction) Database(databaseName string) *MongoTransaction {
- ms.databaseName = databaseName
- return ms
-}
-
-// Collection 设置表名
-func (ms *MongoTransaction) Collection(collectionName string) *MongoTransaction {
- ms.collectionName = collectionName
- return ms
-}
-
-// Model 传入模型自动获取库名和表名
-func (ms *MongoTransaction) Model(value interface{}) *MongoTransaction {
- // https://studygolang.com/articles/896
- val := reflect.ValueOf(value)
- if methodValue := val.MethodByName("Database"); methodValue.IsValid() {
- ms.databaseName = methodValue.Call(nil)[0].String()
- }
- if methodValue := val.MethodByName("TableName"); methodValue.IsValid() {
- ms.collectionName = methodValue.Call(nil)[0].String()
- }
- return ms
-}
diff --git a/vendor/go.dtapp.net/goip/.gitignore b/vendor/go.dtapp.net/goip/.gitignore
index 27a3bc6..557a87e 100644
--- a/vendor/go.dtapp.net/goip/.gitignore
+++ b/vendor/go.dtapp.net/goip/.gitignore
@@ -6,3 +6,5 @@
*.log
*_test.go
gomod.sh
+*.zip
+*.tar.gz
\ No newline at end of file
diff --git a/vendor/go.dtapp.net/goip/analyse.go b/vendor/go.dtapp.net/goip/analyse.go
index 10fcab7..d61d496 100644
--- a/vendor/go.dtapp.net/goip/analyse.go
+++ b/vendor/go.dtapp.net/goip/analyse.go
@@ -1,53 +1,47 @@
package goip
import (
+ "go.dtapp.net/goip/geoip"
+ "go.dtapp.net/goip/ip2region"
+ "go.dtapp.net/goip/ip2region_v2"
+ "go.dtapp.net/goip/ipv6wry"
+ "go.dtapp.net/goip/qqwry"
+ "net"
"strconv"
)
-var (
- ipv4 = "IPV4"
- ipv6 = "IPV6"
-)
-
type AnalyseResult struct {
- IP string `json:"ip,omitempty"` // 输入的ip地址
- Country string `json:"country,omitempty"` // 国家或地区
- Province string `json:"province,omitempty"` // 省份
- City string `json:"city,omitempty"` // 城市
- Area string `json:"area,omitempty"` // 区域
- Isp string `json:"isp,omitempty"` // 运营商
+ Ip string `json:"ip,omitempty"`
+ QqwryInfo qqwry.QueryResult `json:"qqwry_info"`
+ Ip2regionInfo ip2region.QueryResult `json:"ip2region_info"`
+ Ip2regionV2info ip2region_v2.QueryResult `json:"ip2regionv2_info"`
+ GeoipInfo geoip.QueryCityResult `json:"geoip_info"`
+ Ipv6wryInfo ipv6wry.QueryResult `json:"ipv6wry_info"`
}
func (c *Client) Analyse(item string) AnalyseResult {
isIp := c.isIpv4OrIpv6(item)
+ ipByte := net.ParseIP(item)
switch isIp {
case ipv4:
- info := c.V4db.Find(item)
- search, err := c.V4Region.MemorySearch(item)
- if err != nil {
- return AnalyseResult{
- IP: info.IP,
- Country: info.Country,
- Area: info.Area,
- }
- } else {
- return AnalyseResult{
- IP: search.IP,
- Country: search.Country,
- Province: search.Province,
- City: search.City,
- Isp: info.Area,
- }
+ qqeryInfo, _ := c.QueryQqWry(ipByte)
+ ip2regionInfo, _ := c.QueryIp2Region(ipByte)
+ ip2regionV2Info, _ := c.QueryIp2RegionV2(ipByte)
+ geoipInfo, _ := c.QueryGeoIp(ipByte)
+ return AnalyseResult{
+ Ip: ipByte.String(),
+ QqwryInfo: qqeryInfo,
+ Ip2regionInfo: ip2regionInfo,
+ Ip2regionV2info: ip2regionV2Info,
+ GeoipInfo: geoipInfo,
}
case ipv6:
- info := c.V6db.Find(item)
+ geoipInfo, _ := c.QueryGeoIp(ipByte)
+ ipv6Info, _ := c.QueryIpv6wry(ipByte)
return AnalyseResult{
- IP: info.IP,
- Country: info.Country,
- Province: info.Province,
- City: info.City,
- Area: info.Area,
- Isp: info.Isp,
+ Ip: ipByte.String(),
+ GeoipInfo: geoipInfo,
+ Ipv6wryInfo: ipv6Info,
}
default:
return AnalyseResult{}
diff --git a/vendor/go.dtapp.net/goip/client.go b/vendor/go.dtapp.net/goip/client.go
new file mode 100644
index 0000000..20f3527
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/client.go
@@ -0,0 +1,39 @@
+package goip
+
+import (
+ "go.dtapp.net/goip/geoip"
+ "go.dtapp.net/goip/ip2region"
+ "go.dtapp.net/goip/ip2region_v2"
+ "go.dtapp.net/goip/ipv6wry"
+ "go.dtapp.net/goip/qqwry"
+)
+
+type Client struct {
+ ip2regionV2Client *ip2region_v2.Client
+ ip2regionClient *ip2region.Client
+ qqwryClient *qqwry.Client
+ geoIpClient *geoip.Client
+ ipv6wryClient *ipv6wry.Client
+}
+
+// NewIp 实例化
+func NewIp() *Client {
+
+ c := &Client{}
+
+ c.ip2regionV2Client, _ = ip2region_v2.New()
+
+ c.ip2regionClient = ip2region.New()
+
+ c.qqwryClient = qqwry.New()
+
+ c.geoIpClient, _ = geoip.New()
+
+ c.ipv6wryClient = ipv6wry.New()
+
+ return c
+}
+
+func (c *Client) Close() {
+ c.geoIpClient.Close()
+}
diff --git a/vendor/go.dtapp.net/goip/const.go b/vendor/go.dtapp.net/goip/const.go
index 70adccf..8f0b4f8 100644
--- a/vendor/go.dtapp.net/goip/const.go
+++ b/vendor/go.dtapp.net/goip/const.go
@@ -1,3 +1,3 @@
package goip
-const Version = "1.0.30"
+const Version = "1.0.34"
diff --git a/vendor/go.dtapp.net/goip/geoip/GeoLite2-ASN.mmdb b/vendor/go.dtapp.net/goip/geoip/GeoLite2-ASN.mmdb
new file mode 100644
index 0000000..7b1f24d
Binary files /dev/null and b/vendor/go.dtapp.net/goip/geoip/GeoLite2-ASN.mmdb differ
diff --git a/vendor/go.dtapp.net/goip/geoip/GeoLite2-City.mmdb b/vendor/go.dtapp.net/goip/geoip/GeoLite2-City.mmdb
new file mode 100644
index 0000000..0482b28
Binary files /dev/null and b/vendor/go.dtapp.net/goip/geoip/GeoLite2-City.mmdb differ
diff --git a/vendor/go.dtapp.net/goip/geoip/GeoLite2-Country.mmdb b/vendor/go.dtapp.net/goip/geoip/GeoLite2-Country.mmdb
new file mode 100644
index 0000000..ef8c639
Binary files /dev/null and b/vendor/go.dtapp.net/goip/geoip/GeoLite2-Country.mmdb differ
diff --git a/vendor/go.dtapp.net/goip/geoip/client.go b/vendor/go.dtapp.net/goip/geoip/client.go
new file mode 100644
index 0000000..3bb9d01
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/geoip/client.go
@@ -0,0 +1,52 @@
+package geoip
+
+import (
+ _ "embed"
+ "github.com/oschwald/geoip2-golang"
+)
+
+//go:embed GeoLite2-ASN.mmdb
+var asnBuff []byte
+
+//go:embed GeoLite2-City.mmdb
+var cityBuff []byte
+
+//go:embed GeoLite2-Country.mmdb
+var countryBuff []byte
+
+type Client struct {
+ asnDb *geoip2.Reader
+ cityDb *geoip2.Reader
+ countryDb *geoip2.Reader
+}
+
+func New() (*Client, error) {
+
+ var err error
+ c := &Client{}
+
+ c.asnDb, err = geoip2.FromBytes(asnBuff)
+ if err != nil {
+ return nil, err
+ }
+
+ c.cityDb, err = geoip2.FromBytes(cityBuff)
+ if err != nil {
+ return nil, err
+ }
+
+ c.countryDb, err = geoip2.FromBytes(countryBuff)
+ if err != nil {
+ return nil, err
+ }
+
+ return c, err
+}
+
+func (c *Client) Close() {
+
+ c.asnDb.Close()
+ c.cityDb.Close()
+ c.countryDb.Close()
+
+}
diff --git a/vendor/go.dtapp.net/goip/geoip/download.go b/vendor/go.dtapp.net/goip/geoip/download.go
new file mode 100644
index 0000000..2eefdbd
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/geoip/download.go
@@ -0,0 +1,23 @@
+package geoip
+
+import (
+ "io/ioutil"
+ "log"
+ "net/http"
+)
+
+func OnlineDownload(downloadUrl string, downloadName string) {
+ resp, err := http.Get(downloadUrl)
+ if err != nil {
+ panic(err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+
+ err = ioutil.WriteFile("./"+downloadName, body, 0644)
+ if err != nil {
+ panic(err)
+ }
+ log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./"+downloadName)
+}
diff --git a/vendor/go.dtapp.net/goip/geoip/download_url.go b/vendor/go.dtapp.net/goip/geoip/download_url.go
new file mode 100644
index 0000000..5fb70aa
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/geoip/download_url.go
@@ -0,0 +1,31 @@
+package geoip
+
+import (
+ "go.dtapp.net/gostring"
+)
+
+var licenseKey = "" // 许可证密钥
+
+func GetGeoLite2AsnDownloadUrl() string {
+ return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
+}
+
+//func GetGeoLite2AsnCsvDownloadUrl() string {
+// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
+//}
+
+func GetGeoLite2CityDownloadUrl() string {
+ return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
+}
+
+//func GetGeoLite2CityCsvDownloadUrl() string {
+// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
+//}
+
+func GetGeoLite2CountryDownloadUrl() string {
+ return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
+}
+
+//func GetGeoLite2CountryCsvDownloadUrl() string {
+// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
+//}
diff --git a/vendor/go.dtapp.net/goip/geoip/query.go b/vendor/go.dtapp.net/goip/geoip/query.go
new file mode 100644
index 0000000..e9b34a8
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/geoip/query.go
@@ -0,0 +1,66 @@
+package geoip
+
+import (
+ _ "embed"
+ "net"
+)
+
+// QueryCityResult 返回
+type QueryCityResult struct {
+ Ip string `json:"ip,omitempty"` // ip
+ Continent struct {
+ Code string `json:"code,omitempty"` // 大陆代码
+ Name string `json:"name,omitempty"` // 大陆名称
+ } `json:"continent,omitempty"`
+ Country struct {
+ Code string `json:"code,omitempty"` // 国家代码
+ Name string `json:"name,omitempty"` // 国家名称
+ } `json:"country,omitempty"`
+ Province struct {
+ Code string `json:"code,omitempty"` // 省份代码
+ Name string `json:"name,omitempty"` // 省份名称
+ } `json:"province,omitempty"`
+ City struct {
+ Name string `json:"name,omitempty"` // 城市名称
+ } `json:"city,omitempty"`
+ Location struct {
+ TimeZone string `json:"time_zone,omitempty"` // 位置时区
+ Latitude float64 `json:"latitude,omitempty"` // 坐标纬度
+ Longitude float64 `json:"longitude,omitempty"` // 坐标经度
+ } `json:"location,omitempty"`
+}
+
+func (c *Client) QueryCity(ipAddress net.IP) (result QueryCityResult, err error) {
+
+ record, err := c.cityDb.City(ipAddress)
+ if err != nil {
+ return QueryCityResult{}, err
+ }
+
+ // ip
+ result.Ip = ipAddress.String()
+
+ // 大陆
+ result.Continent.Code = record.Continent.Code
+ result.Continent.Name = record.Continent.Names["zh-CN"]
+
+ // 国家
+ result.Country.Code = record.Country.IsoCode
+ result.Country.Name = record.Country.Names["zh-CN"]
+
+ // 省份
+ if len(record.Subdivisions) > 0 {
+ result.Province.Code = record.Subdivisions[0].IsoCode
+ result.Province.Name = record.Subdivisions[0].Names["zh-CN"]
+ }
+
+ // 城市
+ result.City.Name = record.City.Names["zh-CN"]
+
+ // 位置
+ result.Location.TimeZone = record.Location.TimeZone
+ result.Location.Latitude = record.Location.Latitude
+ result.Location.Longitude = record.Location.Longitude
+
+ return result, err
+}
diff --git a/vendor/go.dtapp.net/goip/goip.go b/vendor/go.dtapp.net/goip/goip.go
deleted file mode 100644
index 818d629..0000000
--- a/vendor/go.dtapp.net/goip/goip.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package goip
-
-import (
- "go.dtapp.net/goip/ip2region"
- v4 "go.dtapp.net/goip/v4"
- v6 "go.dtapp.net/goip/v6"
- "log"
- "strings"
-)
-
-type Client struct {
- V4Region ip2region.Ip2Region // IPV4
- V4db v4.Pointer // IPV4
- V6db v6.Pointer // IPV6
-}
-
-// NewIp 实例化
-func NewIp() *Client {
- app := &Client{}
- v4Num := app.V4db.InitIPV4Data()
- log.Printf("IPV4 库加载完成 共加载:%d 条 IP 记录\n", v4Num)
- v6Num := app.V6db.InitIPV4Data()
- log.Printf("IPV6 库加载完成 共加载:%d 条 IP 记录\n", v6Num)
- return app
-}
-
-func (c *Client) Ipv4(ip string) (res v4.Result, resInfo ip2region.IpInfo) {
- res = c.V4db.Find(ip)
- resInfo, _ = c.V4Region.MemorySearch(ip)
- return res, resInfo
-}
-
-func (c *Client) Ipv6(ip string) (res v6.Result) {
- res = c.V6db.Find(ip)
- return res
-}
-
-func (c *Client) isIpv4OrIpv6(ip string) string {
- if len(ip) < 7 {
- return ""
- }
- arrIpv4 := strings.Split(ip, ".")
- if len(arrIpv4) == 4 {
- //. 判断IPv4
- for _, val := range arrIpv4 {
- if !c.CheckIpv4(val) {
- return ""
- }
- }
- return ipv4
- }
- arrIpv6 := strings.Split(ip, ":")
- if len(arrIpv6) == 8 {
- // 判断Ipv6
- for _, val := range arrIpv6 {
- if !c.CheckIpv6(val) {
- return "Neither"
- }
- }
- return ipv6
- }
- return ""
-}
diff --git a/vendor/go.dtapp.net/goip/ip.go b/vendor/go.dtapp.net/goip/ip.go
index 28613e9..158c4bb 100644
--- a/vendor/go.dtapp.net/goip/ip.go
+++ b/vendor/go.dtapp.net/goip/ip.go
@@ -9,6 +9,7 @@ import (
// GetInsideIp 内网ip
func GetInsideIp(ctx context.Context) string {
+
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
panic(err)
@@ -49,27 +50,25 @@ func Ips(ctx context.Context) (map[string]string, error) {
var respGetOutsideIp struct {
Data struct {
- Ip string `json:"ip"`
+ Ip string `json:"ip,omitempty"`
} `json:"data"`
}
// GetOutsideIp 外网ip
-func GetOutsideIp(ctx context.Context) (ip string) {
- ip = "0.0.0.0"
- get := gorequest.NewHttp()
- get.SetUri("https://api.dtapp.net/ip")
- response, err := get.Get(ctx)
+func GetOutsideIp(ctx context.Context) string {
+ // 请求
+ getHttp := gorequest.NewHttp()
+ getHttp.SetUri("https://api.dtapp.net/ip")
+ response, err := getHttp.Get(ctx)
if err != nil {
- return
+ return "0.0.0.0"
}
+ // 解析
err = json.Unmarshal(response.ResponseBody, &respGetOutsideIp)
if err != nil {
- return
- }
- if respGetOutsideIp.Data.Ip == "" {
- return
+ return "0.0.0.0"
}
- ip = respGetOutsideIp.Data.Ip
+ respGetOutsideIp.Data.Ip = "0.0.0.0"
return respGetOutsideIp.Data.Ip
}
diff --git a/vendor/go.dtapp.net/goip/ip2region/client.go b/vendor/go.dtapp.net/goip/ip2region/client.go
new file mode 100644
index 0000000..19c13c6
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region/client.go
@@ -0,0 +1,103 @@
+package ip2region
+
+import (
+ _ "embed"
+ "errors"
+ "go.dtapp.net/gostring"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ IndexBlockLength = 12
+)
+
+//go:embed ip2region.db
+var dbBuff []byte
+
+type Client struct {
+ // db file handler
+ dbFileHandler *os.File
+
+ //header block info
+
+ headerSip []int64
+ headerPtr []int64
+ headerLen int64
+
+ // super block index info
+ firstIndexPtr int64
+ lastIndexPtr int64
+ totalBlocks int64
+
+ // for memory mode only
+ // the original db binary string
+
+ dbFile string
+}
+
+func New() *Client {
+
+ c := &Client{}
+
+ return c
+}
+
+// 获取Ip信息
+func getIpInfo(ipStr string, cityId int64, line []byte) (result QueryResult) {
+
+ lineSlice := strings.Split(string(line), "|")
+ length := len(lineSlice)
+ result.CityId = cityId
+ if length < 5 {
+ for i := 0; i <= 5-length; i++ {
+ lineSlice = append(lineSlice, "")
+ }
+ }
+
+ if lineSlice[0] != "0" {
+ result.Country = gostring.SpaceAndLineBreak(lineSlice[0])
+ }
+ if lineSlice[1] != "0" {
+ result.Region = gostring.SpaceAndLineBreak(lineSlice[1])
+ }
+ if lineSlice[2] != "0" {
+ result.Province = gostring.SpaceAndLineBreak(lineSlice[2])
+ }
+ if lineSlice[3] != "0" {
+ result.City = gostring.SpaceAndLineBreak(lineSlice[3])
+ }
+ if lineSlice[4] != "0" {
+ result.Isp = gostring.SpaceAndLineBreak(lineSlice[4])
+ }
+
+ result.Ip = ipStr
+ return result
+}
+
+func getLong(b []byte, offset int64) int64 {
+
+ val := int64(b[offset]) |
+ int64(b[offset+1])<<8 |
+ int64(b[offset+2])<<16 |
+ int64(b[offset+3])<<24
+
+ return val
+
+}
+
+func ip2long(IpStr string) (int64, error) {
+ bits := strings.Split(IpStr, ".")
+ if len(bits) != 4 {
+ return 0, errors.New("ip format error")
+ }
+
+ var sum int64
+ for i, n := range bits {
+ bit, _ := strconv.ParseInt(n, 10, 64)
+ sum += bit << uint(24-8*i)
+ }
+
+ return sum, nil
+}
diff --git a/vendor/go.dtapp.net/goip/ip2region/download.go b/vendor/go.dtapp.net/goip/ip2region/download.go
index 2e95e61..e45bb73 100644
--- a/vendor/go.dtapp.net/goip/ip2region/download.go
+++ b/vendor/go.dtapp.net/goip/ip2region/download.go
@@ -2,16 +2,22 @@ package ip2region
import (
"io/ioutil"
+ "log"
"net/http"
)
-func getOnline() ([]byte, error) {
- resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/data/ip2region.db?raw=true")
+func OnlineDownload() {
+ resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/v1.0/data/ip2region.db?raw=true")
if err != nil {
- return nil, err
+ panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
- return body, err
+
+ err = ioutil.WriteFile("./ip2region.db", body, 0644)
+ if err != nil {
+ panic(err)
+ }
+ log.Printf("已下载最新 ip2region 数据库 %s ", "./ip2region.db")
}
diff --git a/vendor/go.dtapp.net/goip/ip2region/ip.db b/vendor/go.dtapp.net/goip/ip2region/ip2region.db
similarity index 100%
rename from vendor/go.dtapp.net/goip/ip2region/ip.db
rename to vendor/go.dtapp.net/goip/ip2region/ip2region.db
diff --git a/vendor/go.dtapp.net/goip/ip2region/ip2region.go b/vendor/go.dtapp.net/goip/ip2region/ip2region.go
deleted file mode 100644
index cef1492..0000000
--- a/vendor/go.dtapp.net/goip/ip2region/ip2region.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package ip2region
-
-import (
- _ "embed"
- "errors"
- "go.dtapp.net/gostring"
- "io/ioutil"
- "log"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- IndexBlockLength = 12
-)
-
-type Ip2Region struct {
- // db file handler
- dbFileHandler *os.File
-
- //header block info
-
- headerSip []int64
- headerPtr []int64
- headerLen int64
-
- // super block index info
- firstIndexPtr int64
- lastIndexPtr int64
- totalBlocks int64
-
- // for memory mode only
- // the original db binary string
-
- dbFile string
-}
-
-//go:embed ip.db
-var dbBinStr []byte
-
-type IpInfo struct {
- IP string `json:"ip,omitempty"` // 输入的ip地址
- CityID int64 `json:"city_id,omitempty"` // 城市ID
- Country string `json:"country,omitempty"` // 国家
- Region string `json:"region,omitempty"` // 区域
- Province string `json:"province,omitempty"` // 省份
- City string `json:"city,omitempty"` // 城市
- ISP string `json:"isp,omitempty"` // 运营商
-}
-
-func (ip IpInfo) String() string {
- return ip.IP + "|" + strconv.FormatInt(ip.CityID, 10) + "|" + ip.Country + "|" + ip.Region + "|" + ip.Province + "|" + ip.City + "|" + ip.ISP
-}
-
-// 获取Ip信息
-func getIpInfo(ipStr string, cityId int64, line []byte) (ipInfo IpInfo) {
-
- lineSlice := strings.Split(string(line), "|")
- length := len(lineSlice)
- ipInfo.CityID = cityId
- if length < 5 {
- for i := 0; i <= 5-length; i++ {
- lineSlice = append(lineSlice, "")
- }
- }
-
- if lineSlice[0] != "0" {
- ipInfo.Country = gostring.SpaceAndLineBreak(lineSlice[0])
- }
- if lineSlice[1] != "0" {
- ipInfo.Region = gostring.SpaceAndLineBreak(lineSlice[1])
- }
- if lineSlice[2] != "0" {
- ipInfo.Province = gostring.SpaceAndLineBreak(lineSlice[2])
- }
- if lineSlice[3] != "0" {
- ipInfo.City = gostring.SpaceAndLineBreak(lineSlice[3])
- }
- if lineSlice[4] != "0" {
- ipInfo.ISP = gostring.SpaceAndLineBreak(lineSlice[4])
- }
-
- ipInfo.IP = ipStr
- return ipInfo
-}
-
-// MemorySearch memory算法:整个数据库全部载入内存,单次查询都在0.1x毫秒内
-func (r *Ip2Region) MemorySearch(ipStr string) (ipInfo IpInfo, err error) {
-
- ipInfo.IP = ipStr
- if net.ParseIP(ipStr).To4() == nil {
- if net.ParseIP(ipStr).To16() == nil {
- return ipInfo, err
- }
- }
-
- if r.totalBlocks == 0 {
-
- if err != nil {
-
- return ipInfo, err
- }
-
- r.firstIndexPtr = getLong(dbBinStr, 0)
- r.lastIndexPtr = getLong(dbBinStr, 4)
- r.totalBlocks = (r.lastIndexPtr-r.firstIndexPtr)/IndexBlockLength + 1
- }
-
- ip, err := ip2long(ipStr)
- if err != nil {
- return ipInfo, err
- }
-
- h := r.totalBlocks
- var dataPtr, l int64
- for l <= h {
-
- m := (l + h) >> 1
- p := r.firstIndexPtr + m*IndexBlockLength
- sip := getLong(dbBinStr, p)
- if ip < sip {
- h = m - 1
- } else {
- eip := getLong(dbBinStr, p+4)
- if ip > eip {
- l = m + 1
- } else {
- dataPtr = getLong(dbBinStr, p+8)
- break
- }
- }
- }
- if dataPtr == 0 {
- return ipInfo, errors.New("not found")
- }
-
- dataLen := (dataPtr >> 24) & 0xFF
- dataPtr = dataPtr & 0x00FFFFFF
- ipInfo = getIpInfo(ipStr, getLong(dbBinStr, dataPtr), dbBinStr[(dataPtr)+4:dataPtr+dataLen])
- return ipInfo, nil
-
-}
-
-func getLong(b []byte, offset int64) int64 {
-
- val := int64(b[offset]) |
- int64(b[offset+1])<<8 |
- int64(b[offset+2])<<16 |
- int64(b[offset+3])<<24
-
- return val
-
-}
-
-func ip2long(IpStr string) (int64, error) {
- bits := strings.Split(IpStr, ".")
- if len(bits) != 4 {
- return 0, errors.New("ip format error")
- }
-
- var sum int64
- for i, n := range bits {
- bit, _ := strconv.ParseInt(n, 10, 64)
- sum += bit << uint(24-8*i)
- }
-
- return sum, nil
-}
-
-func (r *Ip2Region) OnlineDownload() (err error) {
- tmpData, err := getOnline()
- if err != nil {
- return errors.New("下载失败 %s" + err.Error())
- }
- if err := ioutil.WriteFile("./ip2region.db", tmpData, 0644); err == nil {
- log.Printf("已下载最新 ip2region 数据库 %s ", "./ip2region.db")
- } else {
- return errors.New("保存失败")
- }
- return nil
-}
diff --git a/vendor/go.dtapp.net/goip/ip2region/qqery.go b/vendor/go.dtapp.net/goip/ip2region/qqery.go
new file mode 100644
index 0000000..330dedc
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region/qqery.go
@@ -0,0 +1,73 @@
+package ip2region
+
+import (
+ "errors"
+ "net"
+ "strconv"
+)
+
+type QueryResult struct {
+ Ip string `json:"ip,omitempty"` // ip
+ CityId int64 `json:"city_id,omitempty"` // 城市代码
+ Country string `json:"country,omitempty"` // 国家
+ Region string `json:"region,omitempty"` // 区域
+ Province string `json:"province,omitempty"` // 省份
+ City string `json:"city,omitempty"` // 城市
+ Isp string `json:"isp,omitempty"` // 运营商
+}
+
+func (ip QueryResult) String() string {
+ return ip.Ip + "|" + strconv.FormatInt(ip.CityId, 10) + "|" + ip.Country + "|" + ip.Region + "|" + ip.Province + "|" + ip.City + "|" + ip.Isp
+}
+
+// Query memory算法:整个数据库全部载入内存,单次查询都在0.1x毫秒内
+func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
+
+ result.Ip = ipAddress.String()
+
+ if c.totalBlocks == 0 {
+
+ if err != nil {
+
+ return QueryResult{}, err
+ }
+
+ c.firstIndexPtr = getLong(dbBuff, 0)
+ c.lastIndexPtr = getLong(dbBuff, 4)
+ c.totalBlocks = (c.lastIndexPtr-c.firstIndexPtr)/IndexBlockLength + 1
+ }
+
+ ip, err := ip2long(result.Ip)
+ if err != nil {
+ return QueryResult{}, err
+ }
+
+ h := c.totalBlocks
+ var dataPtr, l int64
+ for l <= h {
+
+ m := (l + h) >> 1
+ p := c.firstIndexPtr + m*IndexBlockLength
+ sip := getLong(dbBuff, p)
+ if ip < sip {
+ h = m - 1
+ } else {
+ eip := getLong(dbBuff, p+4)
+ if ip > eip {
+ l = m + 1
+ } else {
+ dataPtr = getLong(dbBuff, p+8)
+ break
+ }
+ }
+ }
+ if dataPtr == 0 {
+ return QueryResult{}, errors.New("not found")
+ }
+
+ dataLen := (dataPtr >> 24) & 0xFF
+ dataPtr = dataPtr & 0x00FFFFFF
+ result = getIpInfo(result.Ip, getLong(dbBuff, dataPtr), dbBuff[(dataPtr)+4:dataPtr+dataLen])
+
+ return result, nil
+}
diff --git a/vendor/go.dtapp.net/goip/ip2region_v2/client.go b/vendor/go.dtapp.net/goip/ip2region_v2/client.go
new file mode 100644
index 0000000..9213643
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region_v2/client.go
@@ -0,0 +1,26 @@
+package ip2region_v2
+
+import _ "embed"
+
+//go:embed ip2region.xdb
+var cBuff []byte
+
+type Client struct {
+ db *Searcher
+}
+
+func New() (*Client, error) {
+
+ var err error
+ c := &Client{}
+
+ // 1、从 dbPath 加载整个 xdb 到内存
+
+ // 2、用全局的 cBuff 创建完全基于内存的查询对象。
+ c.db, err = NewWithBuffer(cBuff)
+ if err != nil {
+ return nil, err
+ }
+
+ return c, err
+}
diff --git a/vendor/go.dtapp.net/goip/ip2region_v2/download.go b/vendor/go.dtapp.net/goip/ip2region_v2/download.go
new file mode 100644
index 0000000..0f84054
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region_v2/download.go
@@ -0,0 +1,23 @@
+package ip2region_v2
+
+import (
+ "io/ioutil"
+ "log"
+ "net/http"
+)
+
+func OnlineDownload() {
+ resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/data/ip2region.xdb?raw=true")
+ if err != nil {
+ panic(err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+
+ err = ioutil.WriteFile("./ip2region.xdb", body, 0644)
+ if err != nil {
+ panic(err)
+ }
+ log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./ip2region.xdb")
+}
diff --git a/vendor/go.dtapp.net/goip/ip2region_v2/ip2region.xdb b/vendor/go.dtapp.net/goip/ip2region_v2/ip2region.xdb
new file mode 100644
index 0000000..31f96a1
Binary files /dev/null and b/vendor/go.dtapp.net/goip/ip2region_v2/ip2region.xdb differ
diff --git a/vendor/go.dtapp.net/goip/ip2region_v2/query.go b/vendor/go.dtapp.net/goip/ip2region_v2/query.go
new file mode 100644
index 0000000..de90582
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region_v2/query.go
@@ -0,0 +1,52 @@
+package ip2region_v2
+
+import (
+ _ "embed"
+ "go.dtapp.net/gostring"
+ "net"
+)
+
+// QueryResult 返回
+type QueryResult struct {
+ Ip string `json:"ip,omitempty"` // ip
+ Country string `json:"country,omitempty"` // 国家
+ Province string `json:"province,omitempty"` // 省份
+ City string `json:"city,omitempty"` // 城市
+ Operator string `json:"operator,omitempty"` // 运营商
+}
+
+func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
+
+ // 备注:并发使用,用整个 xdb 缓存创建的 searcher 对象可以安全用于并发。
+
+ str, err := c.db.SearchByStr(ipAddress.String())
+ if err != nil {
+ return QueryResult{}, err
+ }
+
+ split := gostring.Split(str, "|")
+ if len(split) <= 0 {
+ return QueryResult{}, err
+ }
+
+ result.Ip = ipAddress.String()
+
+ result.Country = split[0]
+ if result.Country == "0" {
+ result.Country = ""
+ }
+ result.Province = split[2]
+ if result.Province == "0" {
+ result.Province = ""
+ }
+ result.City = split[3]
+ if result.City == "0" {
+ result.City = ""
+ }
+ result.Operator = split[4]
+ if result.Operator == "0" {
+ result.Operator = ""
+ }
+
+ return result, err
+}
diff --git a/vendor/go.dtapp.net/goip/ip2region_v2/searcher.go b/vendor/go.dtapp.net/goip/ip2region_v2/searcher.go
new file mode 100644
index 0000000..7d5e9a3
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region_v2/searcher.go
@@ -0,0 +1,240 @@
+package ip2region_v2
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+const (
+ HeaderInfoLength = 256
+ VectorIndexRows = 256
+ VectorIndexCols = 256
+ VectorIndexSize = 8
+ SegmentIndexBlockSize = 14
+)
+
+// --- Index policy define
+
+type IndexPolicy int
+
+const (
+ VectorIndexPolicy IndexPolicy = 1
+ BTreeIndexPolicy IndexPolicy = 2
+)
+
+func (i IndexPolicy) String() string {
+ switch i {
+ case VectorIndexPolicy:
+ return "VectorIndex"
+ case BTreeIndexPolicy:
+ return "BtreeIndex"
+ default:
+ return "unknown"
+ }
+}
+
+// --- Header define
+
+type Header struct {
+ // data []byte
+ Version uint16
+ IndexPolicy IndexPolicy
+ CreatedAt uint32
+ StartIndexPtr uint32
+ EndIndexPtr uint32
+}
+
+func NewHeader(input []byte) (*Header, error) {
+ if len(input) < 16 {
+ return nil, fmt.Errorf("invalid input buffer")
+ }
+
+ return &Header{
+ Version: binary.LittleEndian.Uint16(input),
+ IndexPolicy: IndexPolicy(binary.LittleEndian.Uint16(input[2:])),
+ CreatedAt: binary.LittleEndian.Uint32(input[4:]),
+ StartIndexPtr: binary.LittleEndian.Uint32(input[8:]),
+ EndIndexPtr: binary.LittleEndian.Uint32(input[12:]),
+ }, nil
+}
+
+// --- searcher implementation
+
+type Searcher struct {
+ handle *os.File
+
+ // header info
+ header *Header
+ ioCount int
+
+ // use it only when this feature enabled.
+ // Preload the vector index will reduce the number of IO operations
+ // thus speedup the search process
+ vectorIndex []byte
+
+ // content buffer.
+ // running with the whole xdb file cached
+ contentBuff []byte
+}
+
+func baseNew(dbFile string, vIndex []byte, cBuff []byte) (*Searcher, error) {
+ var err error
+
+ // content buff first
+ if cBuff != nil {
+ return &Searcher{
+ vectorIndex: nil,
+ contentBuff: cBuff,
+ }, nil
+ }
+
+ // open the xdb binary file
+ handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Searcher{
+ handle: handle,
+ vectorIndex: vIndex,
+ }, nil
+}
+
+func NewWithFileOnly(dbFile string) (*Searcher, error) {
+ return baseNew(dbFile, nil, nil)
+}
+
+func NewWithVectorIndex(dbFile string, vIndex []byte) (*Searcher, error) {
+ return baseNew(dbFile, vIndex, nil)
+}
+
+func NewWithBuffer(cBuff []byte) (*Searcher, error) {
+ return baseNew("", nil, cBuff)
+}
+
+func (s *Searcher) Close() {
+ if s.handle != nil {
+ err := s.handle.Close()
+ if err != nil {
+ return
+ }
+ }
+}
+
+// GetIOCount return the global io count for the last search
+func (s *Searcher) GetIOCount() int {
+ return s.ioCount
+}
+
+// SearchByStr find the region for the specified ip string
+func (s *Searcher) SearchByStr(str string) (string, error) {
+ ip, err := CheckIP(str)
+ if err != nil {
+ return "", err
+ }
+
+ return s.Search(ip)
+}
+
+// Search find the region for the specified long ip
+func (s *Searcher) Search(ip uint32) (string, error) {
+ // reset the global ioCount
+ s.ioCount = 0
+
+ // locate the segment index block based on the vector index
+ var il0 = (ip >> 24) & 0xFF
+ var il1 = (ip >> 16) & 0xFF
+ var idx = il0*VectorIndexCols*VectorIndexSize + il1*VectorIndexSize
+ var sPtr, ePtr = uint32(0), uint32(0)
+ if s.vectorIndex != nil {
+ sPtr = binary.LittleEndian.Uint32(s.vectorIndex[idx:])
+ ePtr = binary.LittleEndian.Uint32(s.vectorIndex[idx+4:])
+ } else if s.contentBuff != nil {
+ sPtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx:])
+ ePtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx+4:])
+ } else {
+ // read the vector index block
+ var buff = make([]byte, VectorIndexSize)
+ err := s.read(int64(HeaderInfoLength+idx), buff)
+ if err != nil {
+ return "", fmt.Errorf("read vector index block at %d: %w", HeaderInfoLength+idx, err)
+ }
+
+ sPtr = binary.LittleEndian.Uint32(buff)
+ ePtr = binary.LittleEndian.Uint32(buff[4:])
+ }
+
+ // fmt.Printf("sPtr=%d, ePtr=%d", sPtr, ePtr)
+
+ // binary search the segment index to get the region
+ var dataLen, dataPtr = 0, uint32(0)
+ var buff = make([]byte, SegmentIndexBlockSize)
+ var l, h = 0, int((ePtr - sPtr) / SegmentIndexBlockSize)
+ for l <= h {
+ m := (l + h) >> 1
+ p := sPtr + uint32(m*SegmentIndexBlockSize)
+ err := s.read(int64(p), buff)
+ if err != nil {
+ return "", fmt.Errorf("read segment index at %d: %w", p, err)
+ }
+
+ // decode the data step by step to reduce the unnecessary operations
+ sip := binary.LittleEndian.Uint32(buff)
+ if ip < sip {
+ h = m - 1
+ } else {
+ eip := binary.LittleEndian.Uint32(buff[4:])
+ if ip > eip {
+ l = m + 1
+ } else {
+ dataLen = int(binary.LittleEndian.Uint16(buff[8:]))
+ dataPtr = binary.LittleEndian.Uint32(buff[10:])
+ break
+ }
+ }
+ }
+
+ //fmt.Printf("dataLen: %d, dataPtr: %d", dataLen, dataPtr)
+ if dataLen == 0 {
+ return "", nil
+ }
+
+ // load and return the region data
+ var regionBuff = make([]byte, dataLen)
+ err := s.read(int64(dataPtr), regionBuff)
+ if err != nil {
+ return "", fmt.Errorf("read region at %d: %w", dataPtr, err)
+ }
+
+ return string(regionBuff), nil
+}
+
+// do the data read operation based on the setting.
+// content buffer first or will read from the file.
+// this operation will invoke the Seek for file based read.
+func (s *Searcher) read(offset int64, buff []byte) error {
+ if s.contentBuff != nil {
+ cLen := copy(buff, s.contentBuff[offset:])
+ if cLen != len(buff) {
+ return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
+ }
+ } else {
+ _, err := s.handle.Seek(offset, 0)
+ if err != nil {
+ return fmt.Errorf("seek to %d: %w", offset, err)
+ }
+
+ s.ioCount++
+ rLen, err := s.handle.Read(buff)
+ if err != nil {
+ return fmt.Errorf("handle read: %w", err)
+ }
+
+ if rLen != len(buff) {
+ return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/go.dtapp.net/goip/ip2region_v2/util.go b/vendor/go.dtapp.net/goip/ip2region_v2/util.go
new file mode 100644
index 0000000..b981585
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ip2region_v2/util.go
@@ -0,0 +1,165 @@
+package ip2region_v2
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+var shiftIndex = []int{24, 16, 8, 0}
+
+func CheckIP(ip string) (uint32, error) {
+ var ps = strings.Split(ip, ".")
+ if len(ps) != 4 {
+ return 0, fmt.Errorf("invalid ip address `%s`", ip)
+ }
+
+ var val = uint32(0)
+ for i, s := range ps {
+ d, err := strconv.Atoi(s)
+ if err != nil {
+ return 0, fmt.Errorf("the %dth part `%s` is not an integer", i, s)
+ }
+
+ if d < 0 || d > 255 {
+ return 0, fmt.Errorf("the %dth part `%s` should be an integer bettween 0 and 255", i, s)
+ }
+
+ val |= uint32(d) << shiftIndex[i]
+ }
+
+ // convert the ip to integer
+ return val, nil
+}
+
+func Long2IP(ip uint32) string {
+ return fmt.Sprintf("%d.%d.%d.%d", (ip>>24)&0xFF, (ip>>16)&0xFF, (ip>>8)&0xFF, ip&0xFF)
+}
+
+func MidIP(sip uint32, eip uint32) uint32 {
+ return uint32((uint64(sip) + uint64(eip)) >> 1)
+}
+
+// LoadHeader load the header info from the specified handle
+func LoadHeader(handle *os.File) (*Header, error) {
+ _, err := handle.Seek(0, 0)
+ if err != nil {
+ return nil, fmt.Errorf("seek to the header: %w", err)
+ }
+
+ var buff = make([]byte, HeaderInfoLength)
+ rLen, err := handle.Read(buff)
+ if err != nil {
+ return nil, err
+ }
+
+ if rLen != len(buff) {
+ return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
+ }
+
+ return NewHeader(buff)
+}
+
+// LoadHeaderFromFile load header info from the specified db file path
+func LoadHeaderFromFile(dbFile string) (*Header, error) {
+ handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
+ }
+
+ header, err := LoadHeader(handle)
+ if err != nil {
+ return nil, err
+ }
+
+ _ = handle.Close()
+ return header, nil
+}
+
+// LoadHeaderFromBuff wrap the header info from the content buffer
+func LoadHeaderFromBuff(cBuff []byte) (*Header, error) {
+ return NewHeader(cBuff[0:256])
+}
+
+// LoadVectorIndex util function to load the vector index from the specified file handle
+func LoadVectorIndex(handle *os.File) ([]byte, error) {
+ // load all the vector index block
+ _, err := handle.Seek(HeaderInfoLength, 0)
+ if err != nil {
+ return nil, fmt.Errorf("seek to vector index: %w", err)
+ }
+
+ var buff = make([]byte, VectorIndexRows*VectorIndexCols*VectorIndexSize)
+ rLen, err := handle.Read(buff)
+ if err != nil {
+ return nil, err
+ }
+
+ if rLen != len(buff) {
+ return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
+ }
+
+ return buff, nil
+}
+
+// LoadVectorIndexFromFile load vector index from a specified file path
+func LoadVectorIndexFromFile(dbFile string) ([]byte, error) {
+ handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
+ }
+
+ vIndex, err := LoadVectorIndex(handle)
+ if err != nil {
+ return nil, err
+ }
+
+ _ = handle.Close()
+ return vIndex, nil
+}
+
+// LoadContent load the whole xdb content from the specified file handle
+func LoadContent(handle *os.File) ([]byte, error) {
+ // get file size
+ fi, err := handle.Stat()
+ if err != nil {
+ return nil, fmt.Errorf("stat: %w", err)
+ }
+
+ size := fi.Size()
+
+ // seek to the head of the file
+ _, err = handle.Seek(0, 0)
+ if err != nil {
+ return nil, fmt.Errorf("seek to get xdb file length: %w", err)
+ }
+
+ var buff = make([]byte, size)
+ rLen, err := handle.Read(buff)
+ if err != nil {
+ return nil, err
+ }
+
+ if rLen != len(buff) {
+ return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
+ }
+
+ return buff, nil
+}
+
+// LoadContentFromFile load the whole xdb content from the specified db file path
+func LoadContentFromFile(dbFile string) ([]byte, error) {
+ handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
+ if err != nil {
+ return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
+ }
+
+ cBuff, err := LoadContent(handle)
+ if err != nil {
+ return nil, err
+ }
+
+ _ = handle.Close()
+ return cBuff, nil
+}
diff --git a/vendor/go.dtapp.net/goip/ipv6wry/client.go b/vendor/go.dtapp.net/goip/ipv6wry/client.go
new file mode 100644
index 0000000..f07ca91
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ipv6wry/client.go
@@ -0,0 +1,138 @@
+package ipv6wry
+
+import (
+ _ "embed"
+ "encoding/binary"
+ "log"
+)
+
+var (
+ header []byte
+ country []byte
+ area []byte
+ v6ip uint64
+ offset uint32
+ start uint32
+ end uint32
+)
+
+//go:embed ipv6wry.db
+var datBuff []byte
+
+type Client struct {
+ Offset uint32
+ ItemLen uint32
+ IndexLen uint32
+}
+
+func New() *Client {
+
+ c := &Client{}
+
+ buf := datBuff[0:8]
+ start := binary.LittleEndian.Uint32(buf[:4])
+ end := binary.LittleEndian.Uint32(buf[4:])
+
+ num := int64((end-start)/7 + 1)
+ log.Printf("ipv6wry.db 共加载:%d 条ip记录\n", num)
+
+ return c
+}
+
+// ReadData 从文件中读取数据
+func (c *Client) readData(length uint32) (rs []byte) {
+ end := c.Offset + length
+ dataNum := uint32(len(datBuff))
+ if c.Offset > dataNum {
+ return nil
+ }
+
+ if end > dataNum {
+ end = dataNum
+ }
+ rs = datBuff[c.Offset:end]
+ c.Offset = end
+ return rs
+}
+
+func (c *Client) getAddr() ([]byte, []byte) {
+ mode := c.readData(1)[0]
+ if mode == 0x01 {
+ // [IP][0x01][国家和地区信息的绝对偏移地址]
+ c.Offset = byteToUInt32(c.readData(3))
+ return c.getAddr()
+ }
+ // [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
+ _offset := c.Offset - 1
+ c1 := c.readArea(_offset)
+ if mode == 0x02 {
+ c.Offset = 4 + _offset
+ } else {
+ c.Offset = _offset + uint32(1+len(c1))
+ }
+ c2 := c.readArea(c.Offset)
+ return c1, c2
+}
+
+func (c *Client) readArea(offset uint32) []byte {
+ c.Offset = offset
+ mode := c.readData(1)[0]
+ if mode == 0x01 || mode == 0x02 {
+ return c.readArea(byteToUInt32(c.readData(3)))
+ }
+ c.Offset = offset
+ return c.readString()
+}
+
+func (c *Client) readString() []byte {
+ data := make([]byte, 0)
+ for {
+ buf := c.readData(1)
+ if buf[0] == 0 {
+ break
+ }
+ data = append(data, buf[0])
+ }
+ return data
+}
+
+func (c *Client) searchIndex(ip uint64) uint32 {
+
+ c.ItemLen = 8
+ c.IndexLen = 11
+
+ header = datBuff[8:24]
+ start = binary.LittleEndian.Uint32(header[8:])
+ counts := binary.LittleEndian.Uint32(header[:8])
+ end = start + counts*c.IndexLen
+
+ buf := make([]byte, c.IndexLen)
+
+ for {
+ mid := start + c.IndexLen*(((end-start)/c.IndexLen)>>1)
+ buf = datBuff[mid : mid+c.IndexLen]
+ _ip := binary.LittleEndian.Uint64(buf[:c.ItemLen])
+
+ if end-start == c.IndexLen {
+ if ip >= binary.LittleEndian.Uint64(datBuff[end:end+c.ItemLen]) {
+ buf = datBuff[end : end+c.IndexLen]
+ }
+ return byteToUInt32(buf[c.ItemLen:])
+ }
+
+ if _ip > ip {
+ end = mid
+ } else if _ip < ip {
+ start = mid
+ } else if _ip == ip {
+ return byteToUInt32(buf[c.ItemLen:])
+ }
+ }
+}
+
+func byteToUInt32(data []byte) uint32 {
+ i := uint32(data[0]) & 0xff
+ i |= (uint32(data[1]) << 8) & 0xff00
+ i |= (uint32(data[2]) << 16) & 0xff0000
+ return i
+}
diff --git a/vendor/go.dtapp.net/goip/v6/download.go b/vendor/go.dtapp.net/goip/ipv6wry/download.go
similarity index 76%
rename from vendor/go.dtapp.net/goip/v6/download.go
rename to vendor/go.dtapp.net/goip/ipv6wry/download.go
index 84dc073..9a3339d 100644
--- a/vendor/go.dtapp.net/goip/v6/download.go
+++ b/vendor/go.dtapp.net/goip/ipv6wry/download.go
@@ -1,4 +1,4 @@
-package v6
+package ipv6wry
import (
"github.com/saracen/go7z"
@@ -9,28 +9,39 @@ import (
"os"
)
-func getOnline() (data []byte, err error) {
+func OnlineDownload() {
resp, err := http.Get("https://ip.zxinc.org/ip.7z")
if err != nil {
- return nil, err
+ panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return nil, err
+ panic(err)
}
file7z, err := ioutil.TempFile("", "*")
if err != nil {
- return nil, err
+ panic(err)
}
defer os.Remove(file7z.Name())
- if err := ioutil.WriteFile(file7z.Name(), body, 0644); err == nil {
- return Un7z(file7z.Name())
+ err = ioutil.WriteFile(file7z.Name(), body, 0644)
+ if err != nil {
+ panic(err)
+ }
+
+ tmpData, err := Un7z(file7z.Name())
+ if err != nil {
+ panic(err)
+ }
+
+ err = ioutil.WriteFile("./ipv6wry.db", tmpData, 0644)
+ if err != nil {
+ panic(err)
}
- return
+ log.Printf("已下载最新 ZX IPv6数据库 %s ", "./ipv6wry.db")
}
func Un7z(filePath string) (data []byte, err error) {
diff --git a/vendor/go.dtapp.net/goip/v6/ip.db b/vendor/go.dtapp.net/goip/ipv6wry/ipv6wry.db
similarity index 100%
rename from vendor/go.dtapp.net/goip/v6/ip.db
rename to vendor/go.dtapp.net/goip/ipv6wry/ipv6wry.db
diff --git a/vendor/go.dtapp.net/goip/ipv6wry/query.go b/vendor/go.dtapp.net/goip/ipv6wry/query.go
new file mode 100644
index 0000000..332629b
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/ipv6wry/query.go
@@ -0,0 +1,82 @@
+package ipv6wry
+
+import (
+ "go.dtapp.net/gostring"
+ "math/big"
+ "net"
+ "strings"
+)
+
+// QueryResult 返回
+type QueryResult struct {
+ Ip string `json:"ip,omitempty"` // ip
+ Country string `json:"country,omitempty"` // 国家
+ Province string `json:"province,omitempty"` // 省份
+ City string `json:"city,omitempty"` // 城市
+ Area string `json:"area,omitempty"` // 区域
+ Isp string `json:"isp,omitempty"` // 运营商
+}
+
+// Query ip地址查询对应归属地信息
+func (c *Client) Query(ipAddress net.IP) (result QueryResult) {
+
+ result.Ip = ipAddress.String()
+
+ c.Offset = 0
+
+ tp := big.NewInt(0)
+ op := big.NewInt(0)
+ tp.SetBytes(ipAddress.To16())
+ op.SetString("18446744073709551616", 10)
+ op.Div(tp, op)
+ tp.SetString("FFFFFFFFFFFFFFFF", 16)
+ op.And(op, tp)
+
+ v6ip = op.Uint64()
+ offset = c.searchIndex(v6ip)
+ c.Offset = offset
+
+ country, area = c.getAddr()
+
+ // 解析地区数据
+ info := strings.Split(string(country), "\t")
+ if len(info) > 0 {
+ i := 1
+ for {
+ if i > len(info) {
+ break
+ }
+ switch i {
+ case 1:
+ result.Country = info[i-1]
+ result.Country = gostring.SpaceAndLineBreak(result.Country)
+ case 2:
+ result.Province = info[i-1]
+ result.Province = gostring.SpaceAndLineBreak(result.Province)
+ case 3:
+ result.City = info[i-1]
+ result.City = gostring.SpaceAndLineBreak(result.City)
+ case 4:
+ result.Area = info[i-1]
+ result.Area = gostring.SpaceAndLineBreak(result.Area)
+ }
+ i++ // 自增
+ }
+ } else {
+ result.Country = string(country)
+ result.Country = gostring.SpaceAndLineBreak(result.Country)
+ }
+ // 运营商
+ result.Isp = string(area)
+
+ // Delete ZX (防止不相关的信息产生干扰)
+ if result.Isp == "ZX" || result.Isp == "" {
+ result.Isp = ""
+ } else {
+ result.Isp = " " + result.Isp
+ }
+
+ result.Isp = gostring.SpaceAndLineBreak(result.Isp)
+
+ return result
+}
diff --git a/vendor/go.dtapp.net/goip/is.go b/vendor/go.dtapp.net/goip/is.go
new file mode 100644
index 0000000..da49c9c
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/is.go
@@ -0,0 +1,35 @@
+package goip
+
+import "strings"
+
+var (
+ ipv4 = "IPV4"
+ ipv6 = "IPV6"
+)
+
+func (c *Client) isIpv4OrIpv6(ip string) string {
+ if len(ip) < 7 {
+ return ""
+ }
+ arrIpv4 := strings.Split(ip, ".")
+ if len(arrIpv4) == 4 {
+ //. 判断IPv4
+ for _, val := range arrIpv4 {
+ if !c.CheckIpv4(val) {
+ return ""
+ }
+ }
+ return ipv4
+ }
+ arrIpv6 := strings.Split(ip, ":")
+ if len(arrIpv6) == 8 {
+ // 判断Ipv6
+ for _, val := range arrIpv6 {
+ if !c.CheckIpv6(val) {
+ return "Neither"
+ }
+ }
+ return ipv6
+ }
+ return ""
+}
diff --git a/vendor/go.dtapp.net/goip/qqwry/client.go b/vendor/go.dtapp.net/goip/qqwry/client.go
new file mode 100644
index 0000000..3b51445
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/qqwry/client.go
@@ -0,0 +1,139 @@
+package qqwry
+
+import (
+ _ "embed"
+ "encoding/binary"
+ "log"
+)
+
+var (
+ header []byte
+ country []byte
+ area []byte
+ offset uint32
+ start uint32
+ end uint32
+)
+
+//go:embed qqwry.dat
+var datBuff []byte
+
+type Client struct {
+ Offset uint32
+ ItemLen uint32
+ IndexLen uint32
+}
+
+func New() *Client {
+
+ c := &Client{}
+
+ buf := datBuff[0:8]
+ start := binary.LittleEndian.Uint32(buf[:4])
+ end := binary.LittleEndian.Uint32(buf[4:])
+
+ num := int64((end-start)/7 + 1)
+ log.Printf("qqwry.dat 共加载:%d 条ip记录\n", num)
+
+ return c
+}
+
+// ReadData 从文件中读取数据
+func (c *Client) readData(length uint32) (rs []byte) {
+ end := c.Offset + length
+ dataNum := uint32(len(datBuff))
+ if c.Offset > dataNum {
+ return nil
+ }
+
+ if end > dataNum {
+ end = dataNum
+ }
+ rs = datBuff[c.Offset:end]
+ c.Offset = end
+ return rs
+}
+
+// 获取地址信息
+func (c *Client) getAddr() ([]byte, []byte) {
+ mode := c.readData(1)[0]
+ if mode == 0x01 {
+ // [IP][0x01][国家和地区信息的绝对偏移地址]
+ c.Offset = byteToUInt32(c.readData(3))
+ return c.getAddr()
+ }
+ // [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
+ _offset := c.Offset - 1
+ c1 := c.readArea(_offset)
+ if mode == 0x02 {
+ c.Offset = 4 + _offset
+ } else {
+ c.Offset = _offset + uint32(1+len(c1))
+ }
+ c2 := c.readArea(c.Offset)
+ return c1, c2
+}
+
+// 读取区
+func (c *Client) readArea(offset uint32) []byte {
+ c.Offset = offset
+ mode := c.readData(1)[0]
+ if mode == 0x01 || mode == 0x02 {
+ return c.readArea(byteToUInt32(c.readData(3)))
+ }
+ c.Offset = offset
+ return c.readString()
+}
+
+// 读取字符串
+func (c *Client) readString() []byte {
+ data := make([]byte, 0)
+ for {
+ buf := c.readData(1)
+ if buf[0] == 0 {
+ break
+ }
+ data = append(data, buf[0])
+ }
+ return data
+}
+
+// 搜索索引
+func (c *Client) searchIndex(ip uint32) uint32 {
+ c.ItemLen = 4
+ c.IndexLen = 7
+ header = datBuff[0:8]
+ start = binary.LittleEndian.Uint32(header[:4])
+ end = binary.LittleEndian.Uint32(header[4:])
+
+ buf := make([]byte, c.IndexLen)
+
+ for {
+ mid := start + c.IndexLen*(((end-start)/c.IndexLen)>>1)
+ buf = datBuff[mid : mid+c.IndexLen]
+ _ip := binary.LittleEndian.Uint32(buf[:c.ItemLen])
+
+ if end-start == c.IndexLen {
+ if ip >= binary.LittleEndian.Uint32(datBuff[end:end+c.ItemLen]) {
+ buf = datBuff[end : end+c.IndexLen]
+ }
+ return byteToUInt32(buf[c.ItemLen:])
+ }
+
+ if _ip > ip {
+ end = mid
+ } else if _ip < ip {
+ start = mid
+ } else if _ip == ip {
+ return byteToUInt32(buf[c.ItemLen:])
+ }
+ }
+}
+
+// 字节转UInt32
+func byteToUInt32(data []byte) uint32 {
+ i := uint32(data[0]) & 0xff
+ i |= (uint32(data[1]) << 8) & 0xff00
+ i |= (uint32(data[2]) << 16) & 0xff0000
+ return i
+}
diff --git a/vendor/go.dtapp.net/goip/v4/download.go b/vendor/go.dtapp.net/goip/qqwry/download.go
similarity index 51%
rename from vendor/go.dtapp.net/goip/v4/download.go
rename to vendor/go.dtapp.net/goip/qqwry/download.go
index f6e3d7b..6126330 100644
--- a/vendor/go.dtapp.net/goip/v4/download.go
+++ b/vendor/go.dtapp.net/goip/qqwry/download.go
@@ -1,10 +1,11 @@
-package v4
+package qqwry
import (
"bytes"
"compress/zlib"
"encoding/binary"
"io/ioutil"
+ "log"
"net/http"
)
@@ -25,34 +26,45 @@ func getKey() (uint32, error) {
}
}
-// 在线获取内容
-func getOnline() ([]byte, error) {
+func OnlineDownload() {
resp, err := http.Get("https://update.cz88.net/ip/qqwry.rar")
if err != nil {
- return nil, err
+ panic(err)
}
defer resp.Body.Close()
- if body, err := ioutil.ReadAll(resp.Body); err != nil {
- return nil, err
- } else {
- if key, err := getKey(); err != nil {
- return nil, err
- } else {
- for i := 0; i < 0x200; i++ {
- key = key * 0x805
- key++
- key = key & 0xff
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ panic(err)
+ }
+
+ key, err := getKey()
+ if err != nil {
+ panic(err)
+ }
+
+ for i := 0; i < 0x200; i++ {
+ key = key * 0x805
+ key++
+ key = key & 0xff
- body[i] = byte(uint32(body[i]) ^ key)
- }
+ body[i] = byte(uint32(body[i]) ^ key)
+ }
+
+ reader, err := zlib.NewReader(bytes.NewReader(body))
+ if err != nil {
+ panic(err)
+ }
- reader, err := zlib.NewReader(bytes.NewReader(body))
- if err != nil {
- return nil, err
- }
+ tmpData, err := ioutil.ReadAll(reader)
+ if err != nil {
+ panic(err)
+ }
- return ioutil.ReadAll(reader)
- }
+ err = ioutil.WriteFile("./qqwry.dat", tmpData, 0644)
+ if err != nil {
+ panic(err)
}
+
+ log.Printf("已下载最新 纯真 IPv4数据库 %s ", "./qqwry.dat")
}
diff --git a/vendor/go.dtapp.net/goip/v4/ip.dat b/vendor/go.dtapp.net/goip/qqwry/qqwry.dat
similarity index 100%
rename from vendor/go.dtapp.net/goip/v4/ip.dat
rename to vendor/go.dtapp.net/goip/qqwry/qqwry.dat
diff --git a/vendor/go.dtapp.net/goip/qqwry/query.go b/vendor/go.dtapp.net/goip/qqwry/query.go
new file mode 100644
index 0000000..2e1d7bb
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/qqwry/query.go
@@ -0,0 +1,53 @@
+package qqwry
+
+import (
+ "encoding/binary"
+ "errors"
+ "go.dtapp.net/gostring"
+ "golang.org/x/text/encoding/simplifiedchinese"
+ "net"
+)
+
+// QueryResult 返回
+type QueryResult struct {
+ Ip string `json:"ip,omitempty"` // ip
+ Country string `json:"country,omitempty"` // 国家或地区
+ Area string `json:"area,omitempty"` // 区域
+}
+
+// Query ip地址查询对应归属地信息
+func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
+
+ c.Offset = 0
+
+ // 偏移
+ offset = c.searchIndex(binary.BigEndian.Uint32(ipAddress.To4()))
+ if offset <= 0 {
+ return QueryResult{}, errors.New("搜索失败")
+ }
+
+ result.Ip = ipAddress.String()
+
+ c.Offset = offset + c.ItemLen
+
+ country, area = c.getAddr()
+
+ enc := simplifiedchinese.GBK.NewDecoder()
+
+ result.Country, _ = enc.String(string(country))
+
+ result.Country = gostring.SpaceAndLineBreak(result.Country)
+
+ result.Area, _ = enc.String(string(area))
+
+ // Delete CZ88.NET (防止不相关的信息产生干扰)
+ if result.Area == " CZ88.NET" || result.Area == "" {
+ result.Area = ""
+ } else {
+ result.Area = " " + result.Area
+ }
+
+ result.Area = gostring.SpaceAndLineBreak(result.Area)
+
+ return result, nil
+}
diff --git a/vendor/go.dtapp.net/goip/query.go b/vendor/go.dtapp.net/goip/query.go
new file mode 100644
index 0000000..d6f6763
--- /dev/null
+++ b/vendor/go.dtapp.net/goip/query.go
@@ -0,0 +1,87 @@
+package goip
+
+import (
+ "errors"
+ "go.dtapp.net/goip/geoip"
+ "go.dtapp.net/goip/ip2region"
+ "go.dtapp.net/goip/ip2region_v2"
+ "go.dtapp.net/goip/ipv6wry"
+ "go.dtapp.net/goip/qqwry"
+ "net"
+)
+
+var (
+ QueryIncorrect = errors.New("ip地址不正确")
+)
+
+// QueryQqWry 纯真IP库
+// https://www.cz88.net/
+func (c *Client) QueryQqWry(ipAddress net.IP) (result qqwry.QueryResult, err error) {
+ if ipAddress.To4() == nil {
+ return result, QueryIncorrect
+ }
+
+ query, err := c.qqwryClient.Query(ipAddress)
+ if err != nil {
+ return qqwry.QueryResult{}, err
+ }
+
+ return query, err
+}
+
+// QueryIp2Region ip2region
+// https://github.com/lionsoul2014/ip2region
+func (c *Client) QueryIp2Region(ipAddress net.IP) (result ip2region.QueryResult, err error) {
+ if ipAddress.To4() == nil {
+ return result, QueryIncorrect
+ }
+
+ query, err := c.ip2regionClient.Query(ipAddress)
+ if err != nil {
+ return ip2region.QueryResult{}, err
+ }
+
+ return query, err
+}
+
+// QueryIp2RegionV2 ip2region
+// https://github.com/lionsoul2014/ip2region
+func (c *Client) QueryIp2RegionV2(ipAddress net.IP) (result ip2region_v2.QueryResult, err error) {
+ if ipAddress.To4() == nil {
+ return result, QueryIncorrect
+ }
+
+ query, err := c.ip2regionV2Client.Query(ipAddress)
+ if err != nil {
+ return ip2region_v2.QueryResult{}, err
+ }
+
+ return query, nil
+}
+
+// QueryGeoIp ip2region
+// https://www.maxmind.com/
+func (c *Client) QueryGeoIp(ipAddress net.IP) (result geoip.QueryCityResult, err error) {
+ if ipAddress.String() == "" {
+ return result, QueryIncorrect
+ }
+
+ query, err := c.geoIpClient.QueryCity(ipAddress)
+ if err != nil {
+ return geoip.QueryCityResult{}, err
+ }
+
+ return query, nil
+}
+
+// QueryIpv6wry ip2region
+// https://ip.zxinc.org
+func (c *Client) QueryIpv6wry(ipAddress net.IP) (result ipv6wry.QueryResult, err error) {
+ if ipAddress.To16() == nil {
+ return result, QueryIncorrect
+ }
+
+ query := c.ipv6wryClient.Query(ipAddress)
+
+ return query, nil
+}
diff --git a/vendor/go.dtapp.net/goip/v4/ipv4.go b/vendor/go.dtapp.net/goip/v4/ipv4.go
deleted file mode 100644
index 1ca739e..0000000
--- a/vendor/go.dtapp.net/goip/v4/ipv4.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package v4
-
-import (
- _ "embed"
- "encoding/binary"
- "errors"
- "go.dtapp.net/gostring"
- "golang.org/x/text/encoding/simplifiedchinese"
- "io/ioutil"
- "log"
- "net"
-)
-
-var (
- header []byte
- country []byte
- area []byte
- offset uint32
- start uint32
- end uint32
-)
-
-//go:embed ip.dat
-var dat []byte
-
-type Pointer struct {
- Offset uint32
- ItemLen uint32
- IndexLen uint32
-}
-
-// Result 返回
-type Result struct {
- IP string `json:"ip,omitempty"` // 输入的ip地址
- Country string `json:"country,omitempty"` // 国家或地区
- Area string `json:"area,omitempty"` // 区域
-}
-
-// InitIPV4Data 加载
-func (q *Pointer) InitIPV4Data() int64 {
- buf := dat[0:8]
- start := binary.LittleEndian.Uint32(buf[:4])
- end := binary.LittleEndian.Uint32(buf[4:])
-
- return int64((end-start)/7 + 1)
-}
-
-// ReadData 从文件中读取数据
-func (q *Pointer) readData(length uint32) (rs []byte) {
- end := q.Offset + length
- dataNum := uint32(len(dat))
- if q.Offset > dataNum {
- return nil
- }
-
- if end > dataNum {
- end = dataNum
- }
- rs = dat[q.Offset:end]
- q.Offset = end
- return rs
-}
-
-// Find ip地址查询对应归属地信息
-func (q *Pointer) Find(ipStr string) (res Result) {
-
- // 赋值
- res.IP = ipStr
- if net.ParseIP(ipStr).To4() == nil {
- // 不是ip地址
- return res
- }
-
- q.Offset = 0
-
- // 偏移
- offset = q.searchIndex(binary.BigEndian.Uint32(net.ParseIP(ipStr).To4()))
- if offset <= 0 {
- return
- }
-
- q.Offset = offset + q.ItemLen
-
- country, area = q.getAddr()
-
- enc := simplifiedchinese.GBK.NewDecoder()
-
- res.Country, _ = enc.String(string(country))
- res.Country = gostring.SpaceAndLineBreak(res.Country)
-
- res.Area, _ = enc.String(string(area))
-
- // Delete CZ88.NET (防止不相关的信息产生干扰)
- if res.Area == " CZ88.NET" || res.Area == "" {
- res.Area = ""
- } else {
- res.Area = " " + res.Area
- }
-
- res.Area = gostring.SpaceAndLineBreak(res.Area)
-
- return
-}
-
-// 获取地址信息
-func (q *Pointer) getAddr() ([]byte, []byte) {
- mode := q.readData(1)[0]
- if mode == 0x01 {
- // [IP][0x01][国家和地区信息的绝对偏移地址]
- q.Offset = byteToUInt32(q.readData(3))
- return q.getAddr()
- }
- // [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
- _offset := q.Offset - 1
- c1 := q.readArea(_offset)
- if mode == 0x02 {
- q.Offset = 4 + _offset
- } else {
- q.Offset = _offset + uint32(1+len(c1))
- }
- c2 := q.readArea(q.Offset)
- return c1, c2
-}
-
-// 读取区
-func (q *Pointer) readArea(offset uint32) []byte {
- q.Offset = offset
- mode := q.readData(1)[0]
- if mode == 0x01 || mode == 0x02 {
- return q.readArea(byteToUInt32(q.readData(3)))
- }
- q.Offset = offset
- return q.readString()
-}
-
-// 读取字符串
-func (q *Pointer) readString() []byte {
- data := make([]byte, 0)
- for {
- buf := q.readData(1)
- if buf[0] == 0 {
- break
- }
- data = append(data, buf[0])
- }
- return data
-}
-
-// 搜索索引
-func (q *Pointer) searchIndex(ip uint32) uint32 {
- q.ItemLen = 4
- q.IndexLen = 7
- header = dat[0:8]
- start = binary.LittleEndian.Uint32(header[:4])
- end = binary.LittleEndian.Uint32(header[4:])
-
- buf := make([]byte, q.IndexLen)
-
- for {
- mid := start + q.IndexLen*(((end-start)/q.IndexLen)>>1)
- buf = dat[mid : mid+q.IndexLen]
- _ip := binary.LittleEndian.Uint32(buf[:q.ItemLen])
-
- if end-start == q.IndexLen {
- if ip >= binary.LittleEndian.Uint32(dat[end:end+q.ItemLen]) {
- buf = dat[end : end+q.IndexLen]
- }
- return byteToUInt32(buf[q.ItemLen:])
- }
-
- if _ip > ip {
- end = mid
- } else if _ip < ip {
- start = mid
- } else if _ip == ip {
- return byteToUInt32(buf[q.ItemLen:])
- }
- }
-}
-
-// 字节转UInt32
-func byteToUInt32(data []byte) uint32 {
- i := uint32(data[0]) & 0xff
- i |= (uint32(data[1]) << 8) & 0xff00
- i |= (uint32(data[2]) << 16) & 0xff0000
- return i
-}
-
-// OnlineDownload 在线下载
-func (q *Pointer) OnlineDownload() (err error) {
- tmpData, err := getOnline()
- if err != nil {
- return errors.New("下载失败")
- }
- if err := ioutil.WriteFile("./qqwry.dat", tmpData, 0644); err == nil {
- log.Printf("已下载最新 纯真 IPv4数据库 %s ", "./qqwry.dat")
- } else {
- return errors.New("保存失败")
- }
- return nil
-}
diff --git a/vendor/go.dtapp.net/goip/v6/ipv6.go b/vendor/go.dtapp.net/goip/v6/ipv6.go
deleted file mode 100644
index 024d1ff..0000000
--- a/vendor/go.dtapp.net/goip/v6/ipv6.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package v6
-
-import (
- _ "embed"
- "encoding/binary"
- "errors"
- "go.dtapp.net/gostring"
- "io/ioutil"
- "log"
- "math/big"
- "net"
- "strings"
-)
-
-var (
- header []byte
- country []byte
- area []byte
- v6ip uint64
- offset uint32
- start uint32
- end uint32
-)
-
-type Result struct {
- IP string `json:"ip,omitempty"` // 输入的ip地址
- Country string `json:"country,omitempty"` // 国家
- Province string `json:"province,omitempty"` // 省份
- City string `json:"city,omitempty"` // 城市
- Area string `json:"area,omitempty"` // 区域
- Isp string `json:"isp,omitempty"` // 运营商
-}
-
-//go:embed ip.db
-var dat []byte
-
-type Pointer struct {
- Offset uint32
- ItemLen uint32
- IndexLen uint32
-}
-
-// InitIPV4Data 加载
-func (q *Pointer) InitIPV4Data() int64 {
- buf := dat[0:8]
- start := binary.LittleEndian.Uint32(buf[:4])
- end := binary.LittleEndian.Uint32(buf[4:])
-
- return int64((end-start)/7 + 1)
-}
-
-// ReadData 从文件中读取数据
-func (q *Pointer) readData(length uint32) (rs []byte) {
- end := q.Offset + length
- dataNum := uint32(len(dat))
- if q.Offset > dataNum {
- return nil
- }
-
- if end > dataNum {
- end = dataNum
- }
- rs = dat[q.Offset:end]
- q.Offset = end
- return rs
-}
-
-// Find ip地址查询对应归属地信息
-func (q *Pointer) Find(ipStr string) (res Result) {
-
- res = Result{}
- res.IP = ipStr
- if net.ParseIP(ipStr).To16() == nil {
- return Result{}
- }
-
- q.Offset = 0
-
- tp := big.NewInt(0)
- op := big.NewInt(0)
- tp.SetBytes(net.ParseIP(ipStr).To16())
- op.SetString("18446744073709551616", 10)
- op.Div(tp, op)
- tp.SetString("FFFFFFFFFFFFFFFF", 16)
- op.And(op, tp)
-
- v6ip = op.Uint64()
- offset = q.searchIndex(v6ip)
- q.Offset = offset
-
- country, area = q.getAddr()
-
- // 解析地区数据
- info := strings.Split(string(country), "\t")
- if len(info) > 0 {
- i := 1
- for {
- if i > len(info) {
- break
- }
- switch i {
- case 1:
- res.Country = info[i-1]
- res.Country = gostring.SpaceAndLineBreak(res.Country)
- case 2:
- res.Province = info[i-1]
- res.Province = gostring.SpaceAndLineBreak(res.Province)
- case 3:
- res.City = info[i-1]
- res.City = gostring.SpaceAndLineBreak(res.City)
- case 4:
- res.Area = info[i-1]
- res.Area = gostring.SpaceAndLineBreak(res.Area)
- }
- i++ // 自增
- }
- } else {
- res.Country = string(country)
- res.Country = gostring.SpaceAndLineBreak(res.Country)
- }
- // 运营商
- res.Isp = string(area)
-
- // Delete ZX (防止不相关的信息产生干扰)
- if res.Isp == "ZX" || res.Isp == "" {
- res.Isp = ""
- } else {
- res.Isp = " " + res.Isp
- }
-
- res.Isp = gostring.SpaceAndLineBreak(res.Isp)
-
- return
-}
-
-func (q *Pointer) getAddr() ([]byte, []byte) {
- mode := q.readData(1)[0]
- if mode == 0x01 {
- // [IP][0x01][国家和地区信息的绝对偏移地址]
- q.Offset = byteToUInt32(q.readData(3))
- return q.getAddr()
- }
- // [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
- _offset := q.Offset - 1
- c1 := q.readArea(_offset)
- if mode == 0x02 {
- q.Offset = 4 + _offset
- } else {
- q.Offset = _offset + uint32(1+len(c1))
- }
- c2 := q.readArea(q.Offset)
- return c1, c2
-}
-
-func (q *Pointer) readArea(offset uint32) []byte {
- q.Offset = offset
- mode := q.readData(1)[0]
- if mode == 0x01 || mode == 0x02 {
- return q.readArea(byteToUInt32(q.readData(3)))
- }
- q.Offset = offset
- return q.readString()
-}
-
-func (q *Pointer) readString() []byte {
- data := make([]byte, 0)
- for {
- buf := q.readData(1)
- if buf[0] == 0 {
- break
- }
- data = append(data, buf[0])
- }
- return data
-}
-
-func (q *Pointer) searchIndex(ip uint64) uint32 {
-
- q.ItemLen = 8
- q.IndexLen = 11
-
- header = dat[8:24]
- start = binary.LittleEndian.Uint32(header[8:])
- counts := binary.LittleEndian.Uint32(header[:8])
- end = start + counts*q.IndexLen
-
- buf := make([]byte, q.IndexLen)
-
- for {
- mid := start + q.IndexLen*(((end-start)/q.IndexLen)>>1)
- buf = dat[mid : mid+q.IndexLen]
- _ip := binary.LittleEndian.Uint64(buf[:q.ItemLen])
-
- if end-start == q.IndexLen {
- if ip >= binary.LittleEndian.Uint64(dat[end:end+q.ItemLen]) {
- buf = dat[end : end+q.IndexLen]
- }
- return byteToUInt32(buf[q.ItemLen:])
- }
-
- if _ip > ip {
- end = mid
- } else if _ip < ip {
- start = mid
- } else if _ip == ip {
- return byteToUInt32(buf[q.ItemLen:])
- }
- }
-}
-
-func byteToUInt32(data []byte) uint32 {
- i := uint32(data[0]) & 0xff
- i |= (uint32(data[1]) << 8) & 0xff00
- i |= (uint32(data[2]) << 16) & 0xff0000
- return i
-}
-
-// OnlineDownload 在线下载
-func (q *Pointer) OnlineDownload() (err error) {
- tmpData, err := getOnline()
- if err != nil {
- return errors.New("下载失败")
- }
- if err := ioutil.WriteFile("./ipv6wry.db", tmpData, 0644); err == nil {
- log.Printf("已下载最新 ZX IPv6数据库 %s ", "./ipv6wry.db")
- } else {
- return errors.New("保存失败")
- }
- return nil
-}
diff --git a/vendor/go.dtapp.net/golog/api_gorm.go b/vendor/go.dtapp.net/golog/api_gorm.go
index 344c177..a0fbbc8 100644
--- a/vendor/go.dtapp.net/golog/api_gorm.go
+++ b/vendor/go.dtapp.net/golog/api_gorm.go
@@ -16,6 +16,32 @@ import (
"unicode/utf8"
)
+// 模型结构体
+type apiPostgresqlLog struct {
+ LogId uint `gorm:"primaryKey;comment:【记录】编号" json:"log_id,omitempty"` //【记录】编号
+ TraceId string `gorm:"index;comment:【系统】跟踪编号" json:"trace_id,omitempty"` //【系统】跟踪编号
+ RequestTime time.Time `gorm:"index;comment:【请求】时间" json:"request_time,omitempty"` //【请求】时间
+ RequestUri string `gorm:"comment:【请求】链接" json:"request_uri,omitempty"` //【请求】链接
+ RequestUrl string `gorm:"comment:【请求】链接" json:"request_url,omitempty"` //【请求】链接
+ RequestApi string `gorm:"index;comment:【请求】接口" json:"request_api,omitempty"` //【请求】接口
+ RequestMethod string `gorm:"index;comment:【请求】方式" json:"request_method,omitempty"` //【请求】方式
+ RequestParams string `gorm:"comment:【请求】参数" json:"request_params,omitempty"` //【请求】参数
+ RequestHeader string `gorm:"comment:【请求】头部" json:"request_header,omitempty"` //【请求】头部
+ RequestIp string `gorm:"index;comment:【请求】请求Ip" json:"request_ip,omitempty"` //【请求】请求Ip
+ ResponseHeader string `gorm:"comment:【返回】头部" json:"response_header,omitempty"` //【返回】头部
+ ResponseStatusCode int `gorm:"index;comment:【返回】状态码" json:"response_status_code,omitempty"` //【返回】状态码
+ ResponseBody string `gorm:"comment:【返回】数据" json:"response_content,omitempty"` //【返回】数据
+ ResponseContentLength int64 `gorm:"comment:【返回】大小" json:"response_content_length,omitempty"` //【返回】大小
+ ResponseTime time.Time `gorm:"index;comment:【返回】时间" json:"response_time,omitempty"` //【返回】时间
+ SystemHostName string `gorm:"index;comment:【系统】主机名" json:"system_host_name,omitempty"` //【系统】主机名
+ SystemInsideIp string `gorm:"index;comment:【系统】内网ip" json:"system_inside_ip,omitempty"` //【系统】内网ip
+ SystemOs string `gorm:"index;comment:【系统】系统类型" json:"system_os,omitempty"` //【系统】系统类型
+ SystemArch string `gorm:"index;comment:【系统】系统架构" json:"system_arch,omitempty"` //【系统】系统架构
+ SystemCpuQuantity int `gorm:"index;comment:【系统】CPU核数" json:"system_cpu_quantity,omitempty"` //【系统】CPU核数
+ GoVersion string `gorm:"index;comment:【程序】Go版本" json:"go_version,omitempty"` //【程序】Go版本
+ SdkVersion string `gorm:"index;comment:【程序】Sdk版本" json:"sdk_version,omitempty"` //【程序】Sdk版本
+}
+
// ApiGormClientConfig 接口实例配置
type ApiGormClientConfig struct {
GormClientFun apiGormClientFun // 日志配置
@@ -82,32 +108,6 @@ func (c *ApiClient) gormAutoMigrate() (err error) {
return
}
-// 模型结构体
-type apiPostgresqlLog struct {
- LogId uint `gorm:"primaryKey;comment:【记录】编号" json:"log_id,omitempty"` //【记录】编号
- TraceId string `gorm:"index;comment:【系统】跟踪编号" json:"trace_id,omitempty"` //【系统】跟踪编号
- RequestTime time.Time `gorm:"index;comment:【请求】时间" json:"request_time,omitempty"` //【请求】时间
- RequestUri string `gorm:"comment:【请求】链接" json:"request_uri,omitempty"` //【请求】链接
- RequestUrl string `gorm:"comment:【请求】链接" json:"request_url,omitempty"` //【请求】链接
- RequestApi string `gorm:"index;comment:【请求】接口" json:"request_api,omitempty"` //【请求】接口
- RequestMethod string `gorm:"index;comment:【请求】方式" json:"request_method,omitempty"` //【请求】方式
- RequestParams string `gorm:"comment:【请求】参数" json:"request_params,omitempty"` //【请求】参数
- RequestHeader string `gorm:"comment:【请求】头部" json:"request_header,omitempty"` //【请求】头部
- RequestIp string `gorm:"index;comment:【请求】请求Ip" json:"request_ip,omitempty"` //【请求】请求Ip
- ResponseHeader string `gorm:"comment:【返回】头部" json:"response_header,omitempty"` //【返回】头部
- ResponseStatusCode int `gorm:"index;comment:【返回】状态码" json:"response_status_code,omitempty"` //【返回】状态码
- ResponseBody string `gorm:"comment:【返回】数据" json:"response_content,omitempty"` //【返回】数据
- ResponseContentLength int64 `gorm:"comment:【返回】大小" json:"response_content_length,omitempty"` //【返回】大小
- ResponseTime time.Time `gorm:"index;comment:【返回】时间" json:"response_time,omitempty"` //【返回】时间
- SystemHostName string `gorm:"index;comment:【系统】主机名" json:"system_host_name,omitempty"` //【系统】主机名
- SystemInsideIp string `gorm:"index;comment:【系统】内网ip" json:"system_inside_ip,omitempty"` //【系统】内网ip
- SystemOs string `gorm:"index;comment:【系统】系统类型" json:"system_os,omitempty"` //【系统】系统类型
- SystemArch string `gorm:"index;comment:【系统】系统架构" json:"system_arch,omitempty"` //【系统】系统架构
- SystemCpuQuantity int `gorm:"index;comment:【系统】CPU核数" json:"system_cpu_quantity,omitempty"` //【系统】CPU核数
- GoVersion string `gorm:"index;comment:【程序】Go版本" json:"go_version,omitempty"` //【程序】Go版本
- SdkVersion string `gorm:"index;comment:【程序】Sdk版本" json:"sdk_version,omitempty"` //【程序】Sdk版本
-}
-
// 记录日志
func (c *ApiClient) gormRecord(ctx context.Context, postgresqlLog apiPostgresqlLog) (err error) {
diff --git a/vendor/go.dtapp.net/golog/api_mongo.go b/vendor/go.dtapp.net/golog/api_mongo.go
index 27684cf..25704e1 100644
--- a/vendor/go.dtapp.net/golog/api_mongo.go
+++ b/vendor/go.dtapp.net/golog/api_mongo.go
@@ -17,6 +17,33 @@ import (
"runtime"
)
+// 模型结构体
+type apiMongolLog struct {
+ LogId primitive.ObjectID `json:"log_id,omitempty" bson:"_id,omitempty"` //【记录】编号
+ LogTime primitive.DateTime `json:"log_time,omitempty" bson:"log_time,omitempty"` //【记录】时间
+ TraceId string `json:"trace_id,omitempty" bson:"trace_id,omitempty"` //【记录】跟踪编号
+ RequestTime dorm.BsonTime `json:"request_time,omitempty" bson:"request_time,omitempty"` //【请求】时间
+ RequestUri string `json:"request_uri,omitempty" bson:"request_uri,omitempty"` //【请求】链接
+ RequestUrl string `json:"request_url,omitempty" bson:"request_url,omitempty"` //【请求】链接
+ RequestApi string `json:"request_api,omitempty" bson:"request_api,omitempty"` //【请求】接口
+ RequestMethod string `json:"request_method,omitempty" bson:"request_method,omitempty"` //【请求】方式
+ RequestParams interface{} `json:"request_params,omitempty" bson:"request_params,omitempty"` //【请求】参数
+ RequestHeader interface{} `json:"request_header,omitempty" bson:"request_header,omitempty"` //【请求】头部
+ RequestIp string `json:"request_ip,omitempty" bson:"request_ip,omitempty"` //【请求】请求Ip
+ ResponseHeader interface{} `json:"response_header,omitempty" bson:"response_header,omitempty"` //【返回】头部
+ ResponseStatusCode int `json:"response_status_code,omitempty" bson:"response_status_code,omitempty"` //【返回】状态码
+ ResponseBody interface{} `json:"response_body,omitempty" bson:"response_body,omitempty"` //【返回】内容
+ ResponseContentLength int64 `json:"response_content_length,omitempty" bson:"response_content_length,omitempty"` //【返回】大小
+ ResponseTime dorm.BsonTime `json:"response_time,omitempty" bson:"response_time,omitempty"` //【返回】时间
+ SystemHostName string `json:"system_host_name,omitempty" bson:"system_host_name,omitempty"` //【系统】主机名
+ SystemInsideIp string `json:"system_inside_ip,omitempty" bson:"system_inside_ip,omitempty"` //【系统】内网ip
+ SystemOs string `json:"system_os,omitempty" bson:"system_os,omitempty"` //【系统】系统类型
+ SystemArch string `json:"system_arch,omitempty" bson:"system_arch,omitempty"` //【系统】系统架构
+ SystemCpuQuantity int `json:"system_cpu_quantity,omitempty" bson:"system_cpu_quantity,omitempty"` //【系统】CPU核数
+ GoVersion string `json:"go_version,omitempty" bson:"go_version,omitempty"` //【程序】Go版本
+ SdkVersion string `json:"sdk_version,omitempty" bson:"sdk_version,omitempty"` //【程序】Sdk版本
+}
+
// ApiMongoClientConfig 接口实例配置
type ApiMongoClientConfig struct {
MongoClientFun apiMongoClientFun // 日志配置
@@ -88,83 +115,84 @@ func (c *ApiClient) mongoCreateCollection(ctx context.Context) {
"listCollections", 1,
}}).Decode(&commandResult)
if commandErr != nil {
- c.zapLog.WithLogger().Sugar().Errorf("检查时间序列集合:%s", commandErr)
+ c.zapLog.WithTraceId(ctx).Sugar().Errorf("检查时间序列集合:%s", commandErr)
} else {
err := c.mongoClient.Db.Database(c.mongoConfig.databaseName).CreateCollection(ctx, c.mongoConfig.collectionName, options.CreateCollection().SetTimeSeriesOptions(options.TimeSeries().SetTimeField("log_time")))
if err != nil {
- c.zapLog.WithLogger().Sugar().Errorf("创建时间序列集合:%s", err)
+ c.zapLog.WithTraceId(ctx).Sugar().Errorf("创建时间序列集合:%s", err)
}
}
}
// 创建索引
func (c *ApiClient) mongoCreateIndexes(ctx context.Context) {
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"trace_id", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"log_time", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"request_time", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"request_method", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"response_status_code", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"response_time", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"system_host_name", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"system_inside_ip", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"system_os", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"system_arch", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"system_cpu_quantity", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"go_version", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{Keys: bson.D{
- {"sdk_version", -1},
- }}))
-}
-
-// 模型结构体
-type apiMongolLog struct {
- LogId primitive.ObjectID `json:"log_id,omitempty" bson:"_id,omitempty"` //【记录】编号
- LogTime primitive.DateTime `json:"log_time,omitempty" bson:"log_time,omitempty"` //【记录】时间
- TraceId string `json:"trace_id,omitempty" bson:"trace_id,omitempty"` //【记录】跟踪编号
- RequestTime dorm.BsonTime `json:"request_time,omitempty" bson:"request_time,omitempty"` //【请求】时间
- RequestUri string `json:"request_uri,omitempty" bson:"request_uri,omitempty"` //【请求】链接
- RequestUrl string `json:"request_url,omitempty" bson:"request_url,omitempty"` //【请求】链接
- RequestApi string `json:"request_api,omitempty" bson:"request_api,omitempty"` //【请求】接口
- RequestMethod string `json:"request_method,omitempty" bson:"request_method,omitempty"` //【请求】方式
- RequestParams interface{} `json:"request_params,omitempty" bson:"request_params,omitempty"` //【请求】参数
- RequestHeader interface{} `json:"request_header,omitempty" bson:"request_header,omitempty"` //【请求】头部
- RequestIp string `json:"request_ip,omitempty" bson:"request_ip,omitempty"` //【请求】请求Ip
- ResponseHeader interface{} `json:"response_header,omitempty" bson:"response_header,omitempty"` //【返回】头部
- ResponseStatusCode int `json:"response_status_code,omitempty" bson:"response_status_code,omitempty"` //【返回】状态码
- ResponseBody interface{} `json:"response_body,omitempty" bson:"response_body,omitempty"` //【返回】内容
- ResponseContentLength int64 `json:"response_content_length,omitempty" bson:"response_content_length,omitempty"` //【返回】大小
- ResponseTime dorm.BsonTime `json:"response_time,omitempty" bson:"response_time,omitempty"` //【返回】时间
- SystemHostName string `json:"system_host_name,omitempty" bson:"system_host_name,omitempty"` //【系统】主机名
- SystemInsideIp string `json:"system_inside_ip,omitempty" bson:"system_inside_ip,omitempty"` //【系统】内网ip
- SystemOs string `json:"system_os,omitempty" bson:"system_os,omitempty"` //【系统】系统类型
- SystemArch string `json:"system_arch,omitempty" bson:"system_arch,omitempty"` //【系统】系统架构
- SystemCpuQuantity int `json:"system_cpu_quantity,omitempty" bson:"system_cpu_quantity,omitempty"` //【系统】CPU核数
- GoVersion string `json:"go_version,omitempty" bson:"go_version,omitempty"` //【程序】Go版本
- SdkVersion string `json:"sdk_version,omitempty" bson:"sdk_version,omitempty"` //【程序】Sdk版本
+ indexes, err := c.mongoClient.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).CreateManyIndexes(ctx, []mongo.IndexModel{
+ {
+ Keys: bson.D{{
+ Key: "trace_id",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_time",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_method",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "response_status_code",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "response_time",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_host_name",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_inside_ip",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_os",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_arch",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_cpu_quantity",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "go_version",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "sdk_version",
+ Value: -1,
+ }},
+ },
+ })
+ if err != nil {
+ c.zapLog.WithTraceId(ctx).Sugar().Errorf("创建索引:%s", err)
+ }
+ c.zapLog.WithTraceId(ctx).Sugar().Infof("创建索引:%s", indexes)
}
// 记录日志
@@ -180,7 +208,7 @@ func (c *ApiClient) mongoRecord(ctx context.Context, mongoLog apiMongolLog) (err
mongoLog.SystemCpuQuantity = c.config.maxProCs //【系统】CPU核数
mongoLog.LogId = primitive.NewObjectID() //【记录】编号
- _, err = c.mongoClient.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).InsertOne(mongoLog)
+ _, err = c.mongoClient.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).InsertOne(ctx, mongoLog)
if err != nil {
c.zapLog.WithTraceId(ctx).Sugar().Errorf("[golog.api.mongoRecord]:%s", err)
}
@@ -203,7 +231,7 @@ func (c *ApiClient) MongoDelete(ctx context.Context, hour int64) (*mongo.DeleteR
func (c *ApiClient) MongoMiddleware(ctx context.Context, request gorequest.Response, sdkVersion string) {
data := apiMongolLog{
LogTime: primitive.NewDateTimeFromTime(request.RequestTime), //【记录】时间
- RequestTime: dorm.BsonTime(request.RequestTime), //【请求】时间
+ RequestTime: dorm.NewBsonTimeFromTime(request.RequestTime), //【请求】时间
RequestUri: request.RequestUri, //【请求】链接
RequestUrl: gourl.UriParse(request.RequestUri).Url, //【请求】链接
RequestApi: gourl.UriParse(request.RequestUri).Path, //【请求】接口
@@ -213,7 +241,7 @@ func (c *ApiClient) MongoMiddleware(ctx context.Context, request gorequest.Respo
ResponseHeader: request.ResponseHeader, //【返回】头部
ResponseStatusCode: request.ResponseStatusCode, //【返回】状态码
ResponseContentLength: request.ResponseContentLength, //【返回】大小
- ResponseTime: dorm.BsonTime(request.ResponseTime), //【返回】时间
+ ResponseTime: dorm.NewBsonTimeFromTime(request.ResponseTime), //【返回】时间
SdkVersion: sdkVersion, //【程序】Sdk版本
}
if request.ResponseHeader.Get("Content-Type") == "image/jpeg" || request.ResponseHeader.Get("Content-Type") == "image/png" || request.ResponseHeader.Get("Content-Type") == "image/jpg" {
@@ -242,7 +270,7 @@ func (c *ApiClient) MongoMiddleware(ctx context.Context, request gorequest.Respo
func (c *ApiClient) MongoMiddlewareXml(ctx context.Context, request gorequest.Response, sdkVersion string) {
data := apiMongolLog{
LogTime: primitive.NewDateTimeFromTime(request.RequestTime), //【记录】时间
- RequestTime: dorm.BsonTime(request.RequestTime), //【请求】时间
+ RequestTime: dorm.NewBsonTimeFromTime(request.RequestTime), //【请求】时间
RequestUri: request.RequestUri, //【请求】链接
RequestUrl: gourl.UriParse(request.RequestUri).Url, //【请求】链接
RequestApi: gourl.UriParse(request.RequestUri).Path, //【请求】接口
@@ -252,7 +280,7 @@ func (c *ApiClient) MongoMiddlewareXml(ctx context.Context, request gorequest.Re
ResponseHeader: request.ResponseHeader, //【返回】头部
ResponseStatusCode: request.ResponseStatusCode, //【返回】状态码
ResponseContentLength: request.ResponseContentLength, //【返回】大小
- ResponseTime: dorm.BsonTime(request.ResponseTime), //【返回】时间
+ ResponseTime: dorm.NewBsonTimeFromTime(request.ResponseTime), //【返回】时间
SdkVersion: sdkVersion, //【程序】Sdk版本
}
if request.ResponseHeader.Get("Content-Type") == "image/jpeg" || request.ResponseHeader.Get("Content-Type") == "image/png" || request.ResponseHeader.Get("Content-Type") == "image/jpg" {
@@ -281,7 +309,7 @@ func (c *ApiClient) MongoMiddlewareXml(ctx context.Context, request gorequest.Re
func (c *ApiClient) MongoMiddlewareCustom(ctx context.Context, api string, request gorequest.Response, sdkVersion string) {
data := apiMongolLog{
LogTime: primitive.NewDateTimeFromTime(request.RequestTime), //【记录】时间
- RequestTime: dorm.BsonTime(request.RequestTime), //【请求】时间
+ RequestTime: dorm.NewBsonTimeFromTime(request.RequestTime), //【请求】时间
RequestUri: request.RequestUri, //【请求】链接
RequestUrl: gourl.UriParse(request.RequestUri).Url, //【请求】链接
RequestApi: api, //【请求】接口
@@ -291,7 +319,7 @@ func (c *ApiClient) MongoMiddlewareCustom(ctx context.Context, api string, reque
ResponseHeader: request.ResponseHeader, //【返回】头部
ResponseStatusCode: request.ResponseStatusCode, //【返回】状态码
ResponseContentLength: request.ResponseContentLength, //【返回】大小
- ResponseTime: dorm.BsonTime(request.ResponseTime), //【返回】时间
+ ResponseTime: dorm.NewBsonTimeFromTime(request.ResponseTime), //【返回】时间
SdkVersion: sdkVersion, //【程序】Sdk版本
}
if request.ResponseHeader.Get("Content-Type") == "image/jpeg" || request.ResponseHeader.Get("Content-Type") == "image/png" || request.ResponseHeader.Get("Content-Type") == "image/jpg" {
diff --git a/vendor/go.dtapp.net/golog/const.go b/vendor/go.dtapp.net/golog/const.go
index f7144db..3de75f4 100644
--- a/vendor/go.dtapp.net/golog/const.go
+++ b/vendor/go.dtapp.net/golog/const.go
@@ -1,5 +1,5 @@
package golog
const (
- Version = "1.0.73"
+ Version = "1.0.77"
)
diff --git a/vendor/go.dtapp.net/golog/gin.go b/vendor/go.dtapp.net/golog/gin.go
index 4236597..db98b4a 100644
--- a/vendor/go.dtapp.net/golog/gin.go
+++ b/vendor/go.dtapp.net/golog/gin.go
@@ -214,22 +214,30 @@ func (c *GinClient) Middleware() gin.HandlerFunc {
clientIp := gorequest.ClientIp(ginCtx.Request)
- requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", "", ""
+ var requestClientIpCountry string
+ var requestClientIpProvince string
+ var requestClientIpCity string
+ var requestClientIpIsp string
+ var requestClientIpLocationLatitude float64
+ var requestClientIpLocationLongitude float64
if c.ipService != nil {
if net.ParseIP(clientIp).To4() != nil {
// IPv4
- _, info := c.ipService.Ipv4(clientIp)
- requestClientIpCountry = info.Country
- requestClientIpRegion = info.Region
- requestClientIpProvince = info.Province
- requestClientIpCity = info.City
- requestClientIpIsp = info.ISP
+ info := c.ipService.Analyse(clientIp)
+ requestClientIpCountry = info.Ip2regionV2info.Country
+ requestClientIpProvince = info.Ip2regionV2info.Province
+ requestClientIpCity = info.Ip2regionV2info.City
+ requestClientIpIsp = info.Ip2regionV2info.Operator
+ requestClientIpLocationLatitude = info.GeoipInfo.Location.Latitude
+ requestClientIpLocationLongitude = info.GeoipInfo.Location.Longitude
} else if net.ParseIP(clientIp).To16() != nil {
// IPv6
- info := c.ipService.Ipv6(clientIp)
- requestClientIpCountry = info.Country
- requestClientIpProvince = info.Province
- requestClientIpCity = info.City
+ info := c.ipService.Analyse(clientIp)
+ requestClientIpCountry = info.Ipv6wryInfo.Country
+ requestClientIpProvince = info.Ipv6wryInfo.Province
+ requestClientIpCity = info.Ipv6wryInfo.City
+ requestClientIpLocationLatitude = info.GeoipInfo.Location.Latitude
+ requestClientIpLocationLongitude = info.GeoipInfo.Location.Longitude
}
}
@@ -241,12 +249,12 @@ func (c *GinClient) Middleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{gormRecordJson}保存数据:%s", data)
}
- c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{gormRecordXml}保存数据:%s", data)
}
- c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
}
}
// 记录
@@ -255,12 +263,12 @@ func (c *GinClient) Middleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{mongoRecordJson}保存数据:%s", data)
}
- c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp, requestClientIpLocationLatitude, requestClientIpLocationLongitude)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{mongoRecordXml}保存数据:%s", data)
}
- c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp, requestClientIpLocationLatitude, requestClientIpLocationLongitude)
}
}
}()
diff --git a/vendor/go.dtapp.net/golog/gin_gorm.go b/vendor/go.dtapp.net/golog/gin_gorm.go
index 1edfdff..dccb125 100644
--- a/vendor/go.dtapp.net/golog/gin_gorm.go
+++ b/vendor/go.dtapp.net/golog/gin_gorm.go
@@ -20,6 +20,40 @@ import (
"time"
)
+// 模型结构体
+type ginPostgresqlLog struct {
+ LogId uint `gorm:"primaryKey;comment:【记录】编号" json:"log_id,omitempty"` //【记录】编号
+ TraceId string `gorm:"index;comment:【系统】跟踪编号" json:"trace_id,omitempty"` //【系统】跟踪编号
+ RequestTime time.Time `gorm:"index;comment:【请求】时间" json:"request_time,omitempty"` //【请求】时间
+ RequestUri string `gorm:"comment:【请求】请求链接 域名+路径+参数" json:"request_uri,omitempty"` //【请求】请求链接 域名+路径+参数
+ RequestUrl string `gorm:"comment:【请求】请求链接 域名+路径" json:"request_url,omitempty"` //【请求】请求链接 域名+路径
+ RequestApi string `gorm:"index;comment:【请求】请求接口 路径" json:"request_api,omitempty"` //【请求】请求接口 路径
+ RequestMethod string `gorm:"index;comment:【请求】请求方式" json:"request_method,omitempty"` //【请求】请求方式
+ RequestProto string `gorm:"comment:【请求】请求协议" json:"request_proto,omitempty"` //【请求】请求协议
+ RequestUa string `gorm:"comment:【请求】请求UA" json:"request_ua,omitempty"` //【请求】请求UA
+ RequestReferer string `gorm:"comment:【请求】请求referer" json:"request_referer,omitempty"` //【请求】请求referer
+ RequestBody string `gorm:"comment:【请求】请求主体" json:"request_body,omitempty"` //【请求】请求主体
+ RequestUrlQuery string `gorm:"comment:【请求】请求URL参数" json:"request_url_query,omitempty"` //【请求】请求URL参数
+ RequestIp string `gorm:"index;comment:【请求】请求客户端Ip" json:"request_ip,omitempty"` //【请求】请求客户端Ip
+ RequestIpCountry string `gorm:"index;comment:【请求】请求客户端城市" json:"request_ip_country,omitempty"` //【请求】请求客户端城市
+ RequestIpProvince string `gorm:"index;comment:【请求】请求客户端省份" json:"request_ip_province,omitempty"` //【请求】请求客户端省份
+ RequestIpCity string `gorm:"index;comment:【请求】请求客户端城市" json:"request_ip_city,omitempty"` //【请求】请求客户端城市
+ RequestIpIsp string `gorm:"index;comment:【请求】请求客户端运营商" json:"request_ip_isp,omitempty"` //【请求】请求客户端运营商
+ RequestHeader string `gorm:"comment:【请求】请求头" json:"request_header,omitempty"` //【请求】请求头
+ ResponseTime time.Time `gorm:"index;comment:【返回】时间" json:"response_time,omitempty"` //【返回】时间
+ ResponseCode int `gorm:"index;comment:【返回】状态码" json:"response_code,omitempty"` //【返回】状态码
+ ResponseMsg string `gorm:"comment:【返回】描述" json:"response_msg,omitempty"` //【返回】描述
+ ResponseData string `gorm:"comment:【返回】数据" json:"response_data,omitempty"` //【返回】数据
+ CostTime int64 `gorm:"comment:【系统】花费时间" json:"cost_time,omitempty"` //【系统】花费时间
+ SystemHostName string `gorm:"index;comment:【系统】主机名" json:"system_host_name,omitempty"` //【系统】主机名
+ SystemInsideIp string `gorm:"index;comment:【系统】内网ip" json:"system_inside_ip,omitempty"` //【系统】内网ip
+ SystemOs string `gorm:"index;comment:【系统】系统类型" json:"system_os,omitempty"` //【系统】系统类型
+ SystemArch string `gorm:"index;comment:【系统】系统架构" json:"system_arch,omitempty"` //【系统】系统架构
+ SystemCpuQuantity int `gorm:"index;comment:【系统】CPU核数" json:"system_cpu_quantity,omitempty"` //【系统】CPU核数
+ GoVersion string `gorm:"index;comment:【程序】Go版本" json:"go_version,omitempty"` //【程序】Go版本
+ SdkVersion string `gorm:"index;comment:【程序】Sdk版本" json:"sdk_version,omitempty"` //【程序】Sdk版本
+}
+
// GinGormClientConfig 框架实例配置
type GinGormClientConfig struct {
IpService *goip.Client // ip服务
@@ -82,41 +116,6 @@ func (c *GinClient) gormAutoMigrate() (err error) {
return err
}
-// 模型结构体
-type ginPostgresqlLog struct {
- LogId uint `gorm:"primaryKey;comment:【记录】编号" json:"log_id,omitempty"` //【记录】编号
- TraceId string `gorm:"index;comment:【系统】跟踪编号" json:"trace_id,omitempty"` //【系统】跟踪编号
- RequestTime time.Time `gorm:"index;comment:【请求】时间" json:"request_time,omitempty"` //【请求】时间
- RequestUri string `gorm:"comment:【请求】请求链接 域名+路径+参数" json:"request_uri,omitempty"` //【请求】请求链接 域名+路径+参数
- RequestUrl string `gorm:"comment:【请求】请求链接 域名+路径" json:"request_url,omitempty"` //【请求】请求链接 域名+路径
- RequestApi string `gorm:"index;comment:【请求】请求接口 路径" json:"request_api,omitempty"` //【请求】请求接口 路径
- RequestMethod string `gorm:"index;comment:【请求】请求方式" json:"request_method,omitempty"` //【请求】请求方式
- RequestProto string `gorm:"comment:【请求】请求协议" json:"request_proto,omitempty"` //【请求】请求协议
- RequestUa string `gorm:"comment:【请求】请求UA" json:"request_ua,omitempty"` //【请求】请求UA
- RequestReferer string `gorm:"comment:【请求】请求referer" json:"request_referer,omitempty"` //【请求】请求referer
- RequestBody string `gorm:"comment:【请求】请求主体" json:"request_body,omitempty"` //【请求】请求主体
- RequestUrlQuery string `gorm:"comment:【请求】请求URL参数" json:"request_url_query,omitempty"` //【请求】请求URL参数
- RequestIp string `gorm:"index;comment:【请求】请求客户端Ip" json:"request_ip,omitempty"` //【请求】请求客户端Ip
- RequestIpCountry string `gorm:"index;comment:【请求】请求客户端城市" json:"request_ip_country,omitempty"` //【请求】请求客户端城市
- RequestIpRegion string `gorm:"index;comment:【请求】请求客户端区域" json:"request_ip_region,omitempty"` //【请求】请求客户端区域
- RequestIpProvince string `gorm:"index;comment:【请求】请求客户端省份" json:"request_ip_province,omitempty"` //【请求】请求客户端省份
- RequestIpCity string `gorm:"index;comment:【请求】请求客户端城市" json:"request_ip_city,omitempty"` //【请求】请求客户端城市
- RequestIpIsp string `gorm:"index;comment:【请求】请求客户端运营商" json:"request_ip_isp,omitempty"` //【请求】请求客户端运营商
- RequestHeader string `gorm:"comment:【请求】请求头" json:"request_header,omitempty"` //【请求】请求头
- ResponseTime time.Time `gorm:"index;comment:【返回】时间" json:"response_time,omitempty"` //【返回】时间
- ResponseCode int `gorm:"index;comment:【返回】状态码" json:"response_code,omitempty"` //【返回】状态码
- ResponseMsg string `gorm:"comment:【返回】描述" json:"response_msg,omitempty"` //【返回】描述
- ResponseData string `gorm:"comment:【返回】数据" json:"response_data,omitempty"` //【返回】数据
- CostTime int64 `gorm:"comment:【系统】花费时间" json:"cost_time,omitempty"` //【系统】花费时间
- SystemHostName string `gorm:"index;comment:【系统】主机名" json:"system_host_name,omitempty"` //【系统】主机名
- SystemInsideIp string `gorm:"index;comment:【系统】内网ip" json:"system_inside_ip,omitempty"` //【系统】内网ip
- SystemOs string `gorm:"index;comment:【系统】系统类型" json:"system_os,omitempty"` //【系统】系统类型
- SystemArch string `gorm:"index;comment:【系统】系统架构" json:"system_arch,omitempty"` //【系统】系统架构
- SystemCpuQuantity int `gorm:"index;comment:【系统】CPU核数" json:"system_cpu_quantity,omitempty"` //【系统】CPU核数
- GoVersion string `gorm:"index;comment:【程序】Go版本" json:"go_version,omitempty"` //【程序】Go版本
- SdkVersion string `gorm:"index;comment:【程序】Sdk版本" json:"sdk_version,omitempty"` //【程序】Sdk版本
-}
-
// gormRecord 记录日志
func (c *GinClient) gormRecord(postgresqlLog ginPostgresqlLog) (err error) {
@@ -138,7 +137,7 @@ func (c *GinClient) gormRecord(postgresqlLog ginPostgresqlLog) (err error) {
return
}
-func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
+func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.gormRecordJson]收到保存数据要求:%s", c.gormConfig.tableName)
@@ -156,7 +155,6 @@ func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestT
RequestUrlQuery: dorm.JsonEncodeNoError(ginCtx.Request.URL.Query()), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
- RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
@@ -192,7 +190,7 @@ func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestT
}
}
-func (c *GinClient) gormRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
+func (c *GinClient) gormRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.gormRecordXml]收到保存数据要求:%s", c.gormConfig.tableName)
@@ -210,7 +208,6 @@ func (c *GinClient) gormRecordXml(ginCtx *gin.Context, traceId string, requestTi
RequestUrlQuery: dorm.JsonEncodeNoError(ginCtx.Request.URL.Query()), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
- RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
@@ -305,22 +302,24 @@ func (c *GinClient) GormMiddleware() gin.HandlerFunc {
clientIp := gorequest.ClientIp(ginCtx.Request)
- requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", "", ""
+ var requestClientIpCountry string
+ var requestClientIpProvince string
+ var requestClientIpCity string
+ var requestClientIpIsp string
if c.ipService != nil {
if net.ParseIP(clientIp).To4() != nil {
// IPv4
- _, info := c.ipService.Ipv4(clientIp)
- requestClientIpCountry = info.Country
- requestClientIpRegion = info.Region
- requestClientIpProvince = info.Province
- requestClientIpCity = info.City
- requestClientIpIsp = info.ISP
+ info := c.ipService.Analyse(clientIp)
+ requestClientIpCountry = info.Ip2regionV2info.Country
+ requestClientIpProvince = info.Ip2regionV2info.Province
+ requestClientIpCity = info.Ip2regionV2info.City
+ requestClientIpIsp = info.Ip2regionV2info.Operator
} else if net.ParseIP(clientIp).To16() != nil {
// IPv6
- info := c.ipService.Ipv6(clientIp)
- requestClientIpCountry = info.Country
- requestClientIpProvince = info.Province
- requestClientIpCity = info.City
+ info := c.ipService.Analyse(clientIp)
+ requestClientIpCountry = info.Ipv6wryInfo.Country
+ requestClientIpProvince = info.Ipv6wryInfo.Province
+ requestClientIpCity = info.Ipv6wryInfo.City
}
}
@@ -333,12 +332,12 @@ func (c *GinClient) GormMiddleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.GormMiddleware]准备使用{gormRecordJson}保存数据:%s", data)
}
- c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.GormMiddleware]准备使用{gormRecordXml}保存数据:%s", data)
}
- c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
}
}
}()
diff --git a/vendor/go.dtapp.net/golog/gin_mongo.go b/vendor/go.dtapp.net/golog/gin_mongo.go
index 77ed3e7..76f33b0 100644
--- a/vendor/go.dtapp.net/golog/gin_mongo.go
+++ b/vendor/go.dtapp.net/golog/gin_mongo.go
@@ -23,6 +23,47 @@ import (
"time"
)
+type ginMongoLogRequestIpLocationLocation struct {
+ Type string `json:"type,omitempty" bson:"type,omitempty"` // GeoJSON类型
+ Coordinates []float64 `json:"coordinates,omitempty" bson:"coordinates,omitempty"` // 经度,纬度
+}
+
+// 模型结构体
+type ginMongoLog struct {
+ LogId primitive.ObjectID `json:"log_id,omitempty" bson:"_id,omitempty"` //【记录】编号
+ LogTime primitive.DateTime `json:"log_time,omitempty" bson:"log_time,omitempty"` //【记录】时间
+ TraceId string `json:"trace_id,omitempty" bson:"trace_id,omitempty"` //【记录】跟踪编号
+ RequestTime dorm.BsonTime `json:"request_time,omitempty" bson:"request_time,omitempty"` //【请求】时间
+ RequestUri string `json:"request_uri,omitempty" bson:"request_uri,omitempty"` //【请求】请求链接 域名+路径+参数
+ RequestUrl string `json:"request_url,omitempty" bson:"request_url,omitempty"` //【请求】请求链接 域名+路径
+ RequestApi string `json:"request_api,omitempty" bson:"request_api,omitempty"` //【请求】请求接口 路径
+ RequestMethod string `json:"request_method,omitempty" bson:"request_method,omitempty"` //【请求】请求方式
+ RequestProto string `json:"request_proto,omitempty" bson:"request_proto,omitempty"` //【请求】请求协议
+ RequestUa string `json:"request_ua,omitempty" bson:"request_ua,omitempty"` //【请求】请求UA
+ RequestReferer string `json:"request_referer,omitempty" bson:"request_referer,omitempty"` //【请求】请求referer
+ RequestBody interface{} `json:"request_body,omitempty" bson:"request_body,omitempty"` //【请求】请求主体
+ RequestUrlQuery interface{} `json:"request_url_query,omitempty" bson:"request_url_query,omitempty"` //【请求】请求URL参数
+ RequestIp string `json:"request_ip,omitempty" bson:"request_ip,omitempty"` //【请求】请求客户端Ip
+ RequestIpCountry string `json:"request_ip_country,omitempty" bson:"request_ip_country,omitempty"` //【请求】请求客户端城市
+ RequestIpProvince string `json:"request_ip_province,omitempty" bson:"request_ip_province,omitempty"` //【请求】请求客户端省份
+ RequestIpCity string `json:"request_ip_city,omitempty" bson:"request_ip_city,omitempty"` //【请求】请求客户端城市
+ RequestIpIsp string `json:"request_ip_isp,omitempty" bson:"request_ip_isp,omitempty"` //【请求】请求客户端运营商
+ RequestIpLocation ginMongoLogRequestIpLocationLocation `json:"request_ip_location,omitempty" bson:"request_ip_location,omitempty"` //【请求】请求客户端位置
+ RequestHeader interface{} `json:"request_header,omitempty" bson:"request_header,omitempty"` //【请求】请求头
+ ResponseTime dorm.BsonTime `json:"response_time,omitempty" bson:"response_time,omitempty"` //【返回】时间
+ ResponseCode int `json:"response_code,omitempty" bson:"response_code,omitempty"` //【返回】状态码
+ ResponseMsg string `json:"response_msg,omitempty" bson:"response_msg,omitempty"` //【返回】描述
+ ResponseData interface{} `json:"response_data,omitempty" bson:"response_data,omitempty"` //【返回】数据
+ CostTime int64 `json:"cost_time,omitempty" bson:"cost_time,omitempty"` //【系统】花费时间
+ SystemHostName string `json:"system_host_name,omitempty" bson:"system_host_name,omitempty"` //【系统】主机名
+ SystemInsideIp string `json:"system_inside_ip,omitempty" bson:"system_inside_ip,omitempty"` //【系统】内网ip
+ SystemOs string `json:"system_os,omitempty" bson:"system_os,omitempty"` //【系统】系统类型
+ SystemArch string `json:"system_arch,omitempty" bson:"system_arch,omitempty"` //【系统】系统架构
+ SystemCpuQuantity int `json:"system_cpu_quantity,omitempty" bson:"system_cpu_quantity,omitempty"` //【系统】CPU核数
+ GoVersion string `json:"go_version,omitempty" bson:"go_version,omitempty"` //【程序】Go版本
+ SdkVersion string `json:"sdk_version,omitempty" bson:"sdk_version,omitempty"` //【程序】Sdk版本
+}
+
// GinMongoClientConfig 框架实例配置
type GinMongoClientConfig struct {
IpService *goip.Client // ip服务
@@ -90,137 +131,123 @@ func (c *GinClient) mongoCreateCollection(ctx context.Context) {
"listCollections", 1,
}}).Decode(&commandResult)
if commandErr != nil {
- c.zapLog.WithLogger().Sugar().Error("检查时间序列集合:", commandErr)
+ c.zapLog.WithTraceId(ctx).Sugar().Error("检查时间序列集合:", commandErr)
} else {
err := c.mongoClient.Db.Database(c.mongoConfig.databaseName).CreateCollection(ctx, c.mongoConfig.collectionName, options.CreateCollection().SetTimeSeriesOptions(options.TimeSeries().SetTimeField("log_time")))
if err != nil {
- c.zapLog.WithLogger().Sugar().Error("创建时间序列集合:", err)
+ c.zapLog.WithTraceId(ctx).Sugar().Error("创建时间序列集合:", err)
}
}
}
// 创建索引
func (c *GinClient) mongoCreateIndexes(ctx context.Context) {
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"trace_id", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"log_time", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_time", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_method", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_proto", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_ip", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_ip_country", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_ip_region", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_ip_province", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_ip_city", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"request_ip_isp", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"response_time", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"response_code", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"system_host_name", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"system_inside_ip", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"system_os", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"system_arch", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"system_cpu_quantity", 1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"go_version", -1},
- }}))
- c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
- Keys: bson.D{
- {"sdk_version", -1},
- }}))
-}
-
-// 模型结构体
-type ginMongoLog struct {
- LogId primitive.ObjectID `json:"log_id,omitempty" bson:"_id,omitempty"` //【记录】编号
- LogTime primitive.DateTime `json:"log_time,omitempty" bson:"log_time,omitempty"` //【记录】时间
- TraceId string `json:"trace_id,omitempty" bson:"trace_id,omitempty"` //【记录】跟踪编号
- RequestTime dorm.BsonTime `json:"request_time,omitempty" bson:"request_time,omitempty"` //【请求】时间
- RequestUri string `json:"request_uri,omitempty" bson:"request_uri,omitempty"` //【请求】请求链接 域名+路径+参数
- RequestUrl string `json:"request_url,omitempty" bson:"request_url,omitempty"` //【请求】请求链接 域名+路径
- RequestApi string `json:"request_api,omitempty" bson:"request_api,omitempty"` //【请求】请求接口 路径
- RequestMethod string `json:"request_method,omitempty" bson:"request_method,omitempty"` //【请求】请求方式
- RequestProto string `json:"request_proto,omitempty" bson:"request_proto,omitempty"` //【请求】请求协议
- RequestUa string `json:"request_ua,omitempty" bson:"request_ua,omitempty"` //【请求】请求UA
- RequestReferer string `json:"request_referer,omitempty" bson:"request_referer,omitempty"` //【请求】请求referer
- RequestBody interface{} `json:"request_body,omitempty" bson:"request_body,omitempty"` //【请求】请求主体
- RequestUrlQuery interface{} `json:"request_url_query,omitempty" bson:"request_url_query,omitempty"` //【请求】请求URL参数
- RequestIp string `json:"request_ip,omitempty" bson:"request_ip,omitempty"` //【请求】请求客户端Ip
- RequestIpCountry string `json:"request_ip_country,omitempty" bson:"request_ip_country,omitempty"` //【请求】请求客户端城市
- RequestIpRegion string `json:"request_ip_region,omitempty" bson:"request_ip_region,omitempty"` //【请求】请求客户端区域
- RequestIpProvince string `json:"request_ip_province,omitempty" bson:"request_ip_province,omitempty"` //【请求】请求客户端省份
- RequestIpCity string `json:"request_ip_city,omitempty" bson:"request_ip_city,omitempty"` //【请求】请求客户端城市
- RequestIpIsp string `json:"request_ip_isp,omitempty" bson:"request_ip_isp,omitempty"` //【请求】请求客户端运营商
- RequestHeader interface{} `json:"request_header,omitempty" bson:"request_header,omitempty"` //【请求】请求头
- ResponseTime dorm.BsonTime `json:"response_time,omitempty" bson:"response_time,omitempty"` //【返回】时间
- ResponseCode int `json:"response_code,omitempty" bson:"response_code,omitempty"` //【返回】状态码
- ResponseMsg string `json:"response_msg,omitempty" bson:"response_msg,omitempty"` //【返回】描述
- ResponseData interface{} `json:"response_data,omitempty" bson:"response_data,omitempty"` //【返回】数据
- CostTime int64 `json:"cost_time,omitempty" bson:"cost_time,omitempty"` //【系统】花费时间
- SystemHostName string `json:"system_host_name,omitempty" bson:"system_host_name,omitempty"` //【系统】主机名
- SystemInsideIp string `json:"system_inside_ip,omitempty" bson:"system_inside_ip,omitempty"` //【系统】内网ip
- SystemOs string `json:"system_os,omitempty" bson:"system_os,omitempty"` //【系统】系统类型
- SystemArch string `json:"system_arch,omitempty" bson:"system_arch,omitempty"` //【系统】系统架构
- SystemCpuQuantity int `json:"system_cpu_quantity,omitempty" bson:"system_cpu_quantity,omitempty"` //【系统】CPU核数
- GoVersion string `json:"go_version,omitempty" bson:"go_version,omitempty"` //【程序】Go版本
- SdkVersion string `json:"sdk_version,omitempty" bson:"sdk_version,omitempty"` //【程序】Sdk版本
+ indexes, err := c.mongoClient.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).CreateManyIndexes(ctx, []mongo.IndexModel{
+ {
+ Keys: bson.D{{
+ Key: "trace_id",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_time",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_method",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_proto",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_ip",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_ip_country",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_ip_province",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_ip_city",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_ip_isp",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "response_time",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "response_code",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_host_name",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_inside_ip",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_os",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_arch",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "system_cpu_quantity",
+ Value: 1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "go_version",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "sdk_version",
+ Value: -1,
+ }},
+ }, {
+ Keys: bson.D{{
+ Key: "request_ip_location",
+ Value: "2dsphere",
+ }},
+ },
+ })
+ if err != nil {
+ c.zapLog.WithTraceId(ctx).Sugar().Errorf("创建索引:%s", err)
+ }
+ c.zapLog.WithTraceId(ctx).Sugar().Infof("创建索引:%s", indexes)
}
// 记录日志
-func (c *GinClient) mongoRecord(mongoLog ginMongoLog) (err error) {
+func (c *GinClient) mongoRecord(ctx context.Context, mongoLog ginMongoLog) (err error) {
mongoLog.SystemHostName = c.mongoConfig.hostname //【系统】主机名
mongoLog.SystemInsideIp = c.mongoConfig.insideIp //【系统】内网ip
@@ -231,7 +258,7 @@ func (c *GinClient) mongoRecord(mongoLog ginMongoLog) (err error) {
mongoLog.SystemCpuQuantity = c.config.maxProCs //【系统】CPU核数
mongoLog.LogId = primitive.NewObjectID() //【记录】编号
- _, err = c.mongoClient.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).InsertOne(mongoLog)
+ _, err = c.mongoClient.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).InsertOne(ctx, mongoLog)
if err != nil {
c.zapLog.WithTraceIdStr(mongoLog.TraceId).Sugar().Errorf("[golog.gin.mongoRecord]:%s", err)
}
@@ -239,7 +266,9 @@ func (c *GinClient) mongoRecord(mongoLog ginMongoLog) (err error) {
return err
}
-func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
+func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string, requestClientIpLocationLatitude, requestClientIpLocationLongitude float64) {
+
+ var ctx = gotrace_id.SetGinTraceIdContext(context.Background(), ginCtx)
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.mongoRecordJson]收到保存数据要求:%s,%s", c.mongoConfig.databaseName, c.mongoConfig.collectionName)
@@ -248,7 +277,7 @@ func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, request
data := ginMongoLog{
TraceId: traceId, //【记录】跟踪编号
LogTime: primitive.NewDateTimeFromTime(requestTime), //【记录】时间
- RequestTime: dorm.BsonTime(requestTime), //【请求】时间
+ RequestTime: dorm.NewBsonTimeFromTime(requestTime), //【请求】时间
RequestUrl: ginCtx.Request.RequestURI, //【请求】请求链接
RequestApi: gourl.UriFilterExcludeQueryString(ginCtx.Request.RequestURI), //【请求】请求接口
RequestMethod: ginCtx.Request.Method, //【请求】请求方式
@@ -258,12 +287,11 @@ func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, request
RequestUrlQuery: ginCtx.Request.URL.Query(), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
- RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
RequestHeader: ginCtx.Request.Header, //【请求】请求头
- ResponseTime: dorm.BsonTime(gotime.Current().Time), //【返回】时间
+ ResponseTime: dorm.NewBsonTimeCurrent(), //【返回】时间
ResponseCode: responseCode, //【返回】状态码
ResponseData: c.jsonUnmarshal(responseBody), //【返回】数据
CostTime: endTime - startTime, //【系统】花费时间
@@ -282,17 +310,26 @@ func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, request
}
}
+ if requestClientIpLocationLatitude != 0 && requestClientIpLocationLongitude != 0 {
+ data.RequestIpLocation = ginMongoLogRequestIpLocationLocation{
+ Type: "Point",
+ Coordinates: []float64{requestClientIpLocationLongitude, requestClientIpLocationLatitude},
+ }
+ }
+
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.mongoRecordJson.data]:%+v", data)
}
- err := c.mongoRecord(data)
+ err := c.mongoRecord(ctx, data)
if err != nil {
c.zapLog.WithTraceIdStr(traceId).Sugar().Errorf("[golog.gin.mongoRecordJson]:%s", err)
}
}
-func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
+func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string, requestClientIpLocationLatitude, requestClientIpLocationLongitude float64) {
+
+ var ctx = gotrace_id.SetGinTraceIdContext(context.Background(), ginCtx)
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.mongoRecordXml]收到保存数据要求:%s,%s", c.mongoConfig.databaseName, c.mongoConfig.collectionName)
@@ -301,7 +338,7 @@ func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestT
data := ginMongoLog{
TraceId: traceId, //【记录】跟踪编号
LogTime: primitive.NewDateTimeFromTime(requestTime), //【记录】时间
- RequestTime: dorm.BsonTime(requestTime), //【请求】时间
+ RequestTime: dorm.NewBsonTimeFromTime(requestTime), //【请求】时间
RequestUrl: ginCtx.Request.RequestURI, //【请求】请求链接
RequestApi: gourl.UriFilterExcludeQueryString(ginCtx.Request.RequestURI), //【请求】请求接口
RequestMethod: ginCtx.Request.Method, //【请求】请求方式
@@ -311,12 +348,11 @@ func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestT
RequestUrlQuery: ginCtx.Request.URL.Query(), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
- RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
RequestHeader: ginCtx.Request.Header, //【请求】请求头
- ResponseTime: dorm.BsonTime(gotime.Current().Time), //【返回】时间
+ ResponseTime: dorm.NewBsonTimeCurrent(), //【返回】时间
ResponseCode: responseCode, //【返回】状态码
ResponseData: c.jsonUnmarshal(responseBody), //【返回】数据
CostTime: endTime - startTime, //【系统】花费时间
@@ -335,11 +371,18 @@ func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestT
}
}
+ if requestClientIpLocationLatitude != 0 && requestClientIpLocationLongitude != 0 {
+ data.RequestIpLocation = ginMongoLogRequestIpLocationLocation{
+ Type: "Point",
+ Coordinates: []float64{requestClientIpLocationLongitude, requestClientIpLocationLatitude},
+ }
+ }
+
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.mongoRecordXml.data]:%+v", data)
}
- err := c.mongoRecord(data)
+ err := c.mongoRecord(ctx, data)
if err != nil {
c.zapLog.WithTraceIdStr(traceId).Sugar().Errorf("[golog.gin.mongoRecordXml]:%s", err)
}
@@ -404,22 +447,30 @@ func (c *GinClient) MongoMiddleware() gin.HandlerFunc {
clientIp := gorequest.ClientIp(ginCtx.Request)
- requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", "", ""
+ var requestClientIpCountry string
+ var requestClientIpProvince string
+ var requestClientIpCity string
+ var requestClientIpIsp string
+ var requestClientIpLocationLatitude float64
+ var requestClientIpLocationLongitude float64
if c.ipService != nil {
if net.ParseIP(clientIp).To4() != nil {
// IPv4
- _, info := c.ipService.Ipv4(clientIp)
- requestClientIpCountry = info.Country
- requestClientIpRegion = info.Region
- requestClientIpProvince = info.Province
- requestClientIpCity = info.City
- requestClientIpIsp = info.ISP
+ info := c.ipService.Analyse(clientIp)
+ requestClientIpCountry = info.Ip2regionV2info.Country
+ requestClientIpProvince = info.Ip2regionV2info.Province
+ requestClientIpCity = info.Ip2regionV2info.City
+ requestClientIpIsp = info.Ip2regionV2info.Operator
+ requestClientIpLocationLatitude = info.GeoipInfo.Location.Latitude
+ requestClientIpLocationLongitude = info.GeoipInfo.Location.Longitude
} else if net.ParseIP(clientIp).To16() != nil {
// IPv6
- info := c.ipService.Ipv6(clientIp)
- requestClientIpCountry = info.Country
- requestClientIpProvince = info.Province
- requestClientIpCity = info.City
+ info := c.ipService.Analyse(clientIp)
+ requestClientIpCountry = info.Ipv6wryInfo.Country
+ requestClientIpProvince = info.Ipv6wryInfo.Province
+ requestClientIpCity = info.Ipv6wryInfo.City
+ requestClientIpLocationLatitude = info.GeoipInfo.Location.Latitude
+ requestClientIpLocationLongitude = info.GeoipInfo.Location.Longitude
}
}
@@ -432,12 +483,12 @@ func (c *GinClient) MongoMiddleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.MongoMiddleware]准备使用{mongoRecordJson}保存数据:%s", data)
}
- c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp, requestClientIpLocationLatitude, requestClientIpLocationLongitude)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.MongoMiddleware]准备使用{mongoRecordXml}保存数据:%s", data)
}
- c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
+ c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp, requestClientIpLocationLatitude, requestClientIpLocationLongitude)
}
}
}()
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index 3b2335d..1b2b424 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -214,11 +214,6 @@ esac
if [ "$GOOSARCH" == "aix_ppc64" ]; then
# aix/ppc64 script generates files instead of writing to stdin.
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
- elif [ "$GOOS" == "darwin" ]; then
- # 1.12 and later, syscalls via libSystem
- echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
- # 1.13 and later, syscalls via libSystem (including syscallPtr)
- echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
elif [ "$GOOS" == "illumos" ]; then
# illumos code generation requires a --illumos switch
echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
deleted file mode 100644
index b009860..0000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin && go1.12 && !go1.13
-// +build darwin,go1.12,!go1.13
-
-package unix
-
-import (
- "unsafe"
-)
-
-const _SYS_GETDIRENTRIES64 = 344
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- // To implement this using libSystem we'd need syscall_syscallPtr for
- // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall
- // back to raw syscalls for this func on Go 1.12.
- var p unsafe.Pointer
- if len(buf) > 0 {
- p = unsafe.Pointer(&buf[0])
- } else {
- p = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- return n, errnoErr(e1)
- }
- return n, nil
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
deleted file mode 100644
index 1259f6d..0000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin && go1.13
-// +build darwin,go1.13
-
-package unix
-
-import "unsafe"
-
-//sys closedir(dir uintptr) (err error)
-//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
-
-func fdopendir(fd int) (dir uintptr, err error) {
- r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
- dir = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-var libc_fdopendir_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- // Simulate Getdirentries using fdopendir/readdir_r/closedir.
- // We store the number of entries to skip in the seek
- // offset of fd. See issue #31368.
- // It's not the full required semantics, but should handle the case
- // of calling Getdirentries or ReadDirent repeatedly.
- // It won't handle assigning the results of lseek to *basep, or handle
- // the directory being edited underfoot.
- skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
- if err != nil {
- return 0, err
- }
-
- // We need to duplicate the incoming file descriptor
- // because the caller expects to retain control of it, but
- // fdopendir expects to take control of its argument.
- // Just Dup'ing the file descriptor is not enough, as the
- // result shares underlying state. Use Openat to make a really
- // new file descriptor referring to the same directory.
- fd2, err := Openat(fd, ".", O_RDONLY, 0)
- if err != nil {
- return 0, err
- }
- d, err := fdopendir(fd2)
- if err != nil {
- Close(fd2)
- return 0, err
- }
- defer closedir(d)
-
- var cnt int64
- for {
- var entry Dirent
- var entryp *Dirent
- e := readdir_r(d, &entry, &entryp)
- if e != 0 {
- return n, errnoErr(e)
- }
- if entryp == nil {
- break
- }
- if skip > 0 {
- skip--
- cnt++
- continue
- }
-
- reclen := int(entry.Reclen)
- if reclen > len(buf) {
- // Not enough room. Return for now.
- // The counter will let us know where we should start up again.
- // Note: this strategy for suspending in the middle and
- // restarting is O(n^2) in the length of the directory. Oh well.
- break
- }
-
- // Copy entry into return buffer.
- s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
- copy(buf, s)
-
- buf = buf[reclen:]
- n += reclen
- cnt++
- }
- // Set the seek offset of the input fd to record
- // how many files we've already returned.
- _, err = Seek(fd, cnt, 0 /* SEEK_SET */)
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 4f87f16..1f63382 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -19,6 +19,96 @@ import (
"unsafe"
)
+//sys closedir(dir uintptr) (err error)
+//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
+
+func fdopendir(fd int) (dir uintptr, err error) {
+ r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
+ dir = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_fdopendir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
+
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ // Simulate Getdirentries using fdopendir/readdir_r/closedir.
+ // We store the number of entries to skip in the seek
+ // offset of fd. See issue #31368.
+ // It's not the full required semantics, but should handle the case
+ // of calling Getdirentries or ReadDirent repeatedly.
+ // It won't handle assigning the results of lseek to *basep, or handle
+ // the directory being edited underfoot.
+ skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
+ if err != nil {
+ return 0, err
+ }
+
+ // We need to duplicate the incoming file descriptor
+ // because the caller expects to retain control of it, but
+ // fdopendir expects to take control of its argument.
+ // Just Dup'ing the file descriptor is not enough, as the
+ // result shares underlying state. Use Openat to make a really
+ // new file descriptor referring to the same directory.
+ fd2, err := Openat(fd, ".", O_RDONLY, 0)
+ if err != nil {
+ return 0, err
+ }
+ d, err := fdopendir(fd2)
+ if err != nil {
+ Close(fd2)
+ return 0, err
+ }
+ defer closedir(d)
+
+ var cnt int64
+ for {
+ var entry Dirent
+ var entryp *Dirent
+ e := readdir_r(d, &entry, &entryp)
+ if e != 0 {
+ return n, errnoErr(e)
+ }
+ if entryp == nil {
+ break
+ }
+ if skip > 0 {
+ skip--
+ cnt++
+ continue
+ }
+
+ reclen := int(entry.Reclen)
+ if reclen > len(buf) {
+ // Not enough room. Return for now.
+ // The counter will let us know where we should start up again.
+ // Note: this strategy for suspending in the middle and
+ // restarting is O(n^2) in the length of the directory. Oh well.
+ break
+ }
+
+ // Copy entry into return buffer.
+ s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
+ copy(buf, s)
+
+ buf = buf[reclen:]
+ n += reclen
+ cnt++
+ }
+ // Set the seek offset of the input fd to record
+ // how many files we've already returned.
+ _, err = Seek(fd, cnt, 0 /* SEEK_SET */)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
deleted file mode 100644
index a06eb09..0000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-//go:build darwin && amd64 && go1.13
-// +build darwin,amd64,go1.13
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func closedir(dir uintptr) (err error) {
- _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-var libc_closedir_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
- r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
- res = Errno(r0)
- return
-}
-
-var libc_readdir_r_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
deleted file mode 100644
index f5bb40e..0000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
+++ /dev/null
@@ -1,25 +0,0 @@
-// go run mkasm.go darwin amd64
-// Code generated by the command above; DO NOT EDIT.
-
-//go:build go1.13
-// +build go1.13
-
-#include "textflag.h"
-
-TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_fdopendir(SB)
-
-GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
-
-TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_closedir(SB)
-
-GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
-
-TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_readdir_r(SB)
-
-GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
-DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 467deed..c2461c4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -1,8 +1,8 @@
-// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
+// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
-//go:build darwin && amd64 && go1.12
-// +build darwin,amd64,go1.12
+//go:build darwin && amd64
+// +build darwin,amd64
package unix
@@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func closedir(dir uintptr) (err error) {
+ _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_closedir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
+ r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
+ res = Errno(r0)
+ return
+}
+
+var libc_readdir_r_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe(p *[2]int32) (err error) {
_, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index b41467a..95fe4c0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -1,11 +1,14 @@
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
-//go:build go1.12
-// +build go1.12
-
#include "textflag.h"
+TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fdopendir(SB)
+
+GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
+
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
@@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
+TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_closedir(SB)
+
+GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
+
+TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readdir_r(SB)
+
+GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
+
TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
deleted file mode 100644
index cec595d..0000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-//go:build darwin && arm64 && go1.13
-// +build darwin,arm64,go1.13
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func closedir(dir uintptr) (err error) {
- _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-var libc_closedir_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
- r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
- res = Errno(r0)
- return
-}
-
-var libc_readdir_r_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
deleted file mode 100644
index 0c3f76b..0000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
+++ /dev/null
@@ -1,25 +0,0 @@
-// go run mkasm.go darwin arm64
-// Code generated by the command above; DO NOT EDIT.
-
-//go:build go1.13
-// +build go1.13
-
-#include "textflag.h"
-
-TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_fdopendir(SB)
-
-GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
-
-TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_closedir(SB)
-
-GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
-
-TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_readdir_r(SB)
-
-GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
-DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 35938d3..26a0fdc 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -1,8 +1,8 @@
-// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go
+// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
-//go:build darwin && arm64 && go1.12
-// +build darwin,arm64,go1.12
+//go:build darwin && arm64
+// +build darwin,arm64
package unix
@@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func closedir(dir uintptr) (err error) {
+ _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_closedir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
+ r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
+ res = Errno(r0)
+ return
+}
+
+var libc_readdir_r_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe(p *[2]int32) (err error) {
_, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index e1f9204..efa5b4c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -1,11 +1,14 @@
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
-//go:build go1.12
-// +build go1.12
-
#include "textflag.h"
+TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fdopendir(SB)
+
+GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
+
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
@@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
+TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_closedir(SB)
+
+GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
+
+TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readdir_r(SB)
+
+GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
+
TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
diff --git a/vendor/gorm.io/gorm/.gitignore b/vendor/gorm.io/gorm/.gitignore
index 45505cc..7273332 100644
--- a/vendor/gorm.io/gorm/.gitignore
+++ b/vendor/gorm.io/gorm/.gitignore
@@ -3,4 +3,5 @@ documents
coverage.txt
_book
.idea
-vendor
\ No newline at end of file
+vendor
+.vscode
diff --git a/vendor/gorm.io/gorm/association.go b/vendor/gorm.io/gorm/association.go
index 35e10dd..06229ca 100644
--- a/vendor/gorm.io/gorm/association.go
+++ b/vendor/gorm.io/gorm/association.go
@@ -507,7 +507,9 @@ func (association *Association) buildCondition() *DB {
joinStmt.AddClause(queryClause)
}
joinStmt.Build("WHERE")
- tx.Clauses(clause.Expr{SQL: strings.Replace(joinStmt.SQL.String(), "WHERE ", "", 1), Vars: joinStmt.Vars})
+ if len(joinStmt.SQL.String()) > 0 {
+ tx.Clauses(clause.Expr{SQL: strings.Replace(joinStmt.SQL.String(), "WHERE ", "", 1), Vars: joinStmt.Vars})
+ }
}
tx = tx.Session(&Session{QueryFields: true}).Clauses(clause.From{Joins: []clause.Join{{
diff --git a/vendor/gorm.io/gorm/callbacks/associations.go b/vendor/gorm.io/gorm/callbacks/associations.go
index 4a50e6c..00e00fc 100644
--- a/vendor/gorm.io/gorm/callbacks/associations.go
+++ b/vendor/gorm.io/gorm/callbacks/associations.go
@@ -206,7 +206,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
}
}
- cacheKey := utils.ToStringKey(relPrimaryValues)
+ cacheKey := utils.ToStringKey(relPrimaryValues...)
if len(relPrimaryValues) != len(rel.FieldSchema.PrimaryFields) || !identityMap[cacheKey] {
identityMap[cacheKey] = true
if isPtr {
@@ -292,7 +292,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
}
}
- cacheKey := utils.ToStringKey(relPrimaryValues)
+ cacheKey := utils.ToStringKey(relPrimaryValues...)
if len(relPrimaryValues) != len(rel.FieldSchema.PrimaryFields) || !identityMap[cacheKey] {
identityMap[cacheKey] = true
distinctElems = reflect.Append(distinctElems, elem)
diff --git a/vendor/gorm.io/gorm/callbacks/update.go b/vendor/gorm.io/gorm/callbacks/update.go
index 42ffe2f..b596df9 100644
--- a/vendor/gorm.io/gorm/callbacks/update.go
+++ b/vendor/gorm.io/gorm/callbacks/update.go
@@ -70,10 +70,12 @@ func Update(config *Config) func(db *gorm.DB) {
if db.Statement.SQL.Len() == 0 {
db.Statement.SQL.Grow(180)
db.Statement.AddClauseIfNotExists(clause.Update{})
- if set := ConvertToAssignments(db.Statement); len(set) != 0 {
- db.Statement.AddClause(set)
- } else if _, ok := db.Statement.Clauses["SET"]; !ok {
- return
+ if _, ok := db.Statement.Clauses["SET"]; !ok {
+ if set := ConvertToAssignments(db.Statement); len(set) != 0 {
+ db.Statement.AddClause(set)
+ } else {
+ return
+ }
}
db.Statement.Build(db.Statement.BuildClauses...)
@@ -158,21 +160,21 @@ func ConvertToAssignments(stmt *gorm.Statement) (set clause.Set) {
switch stmt.ReflectValue.Kind() {
case reflect.Slice, reflect.Array:
if size := stmt.ReflectValue.Len(); size > 0 {
- var primaryKeyExprs []clause.Expression
+ var isZero bool
for i := 0; i < size; i++ {
- exprs := make([]clause.Expression, len(stmt.Schema.PrimaryFields))
- var notZero bool
- for idx, field := range stmt.Schema.PrimaryFields {
- value, isZero := field.ValueOf(stmt.Context, stmt.ReflectValue.Index(i))
- exprs[idx] = clause.Eq{Column: field.DBName, Value: value}
- notZero = notZero || !isZero
- }
- if notZero {
- primaryKeyExprs = append(primaryKeyExprs, clause.And(exprs...))
+ for _, field := range stmt.Schema.PrimaryFields {
+ _, isZero = field.ValueOf(stmt.Context, stmt.ReflectValue.Index(i))
+ if !isZero {
+ break
+ }
}
}
- stmt.AddClause(clause.Where{Exprs: []clause.Expression{clause.And(clause.Or(primaryKeyExprs...))}})
+ if !isZero {
+ _, primaryValues := schema.GetIdentityFieldValuesMap(stmt.Context, stmt.ReflectValue, stmt.Schema.PrimaryFields)
+ column, values := schema.ToQueryValues("", stmt.Schema.PrimaryFieldDBNames, primaryValues)
+ stmt.AddClause(clause.Where{Exprs: []clause.Expression{clause.IN{Column: column, Values: values}}})
+ }
}
case reflect.Struct:
for _, field := range stmt.Schema.PrimaryFields {
diff --git a/vendor/gorm.io/gorm/finisher_api.go b/vendor/gorm.io/gorm/finisher_api.go
index 7a3f27b..835a698 100644
--- a/vendor/gorm.io/gorm/finisher_api.go
+++ b/vendor/gorm.io/gorm/finisher_api.go
@@ -13,7 +13,7 @@ import (
"gorm.io/gorm/utils"
)
-// Create insert the value into database
+// Create inserts value, returning the inserted data's primary key in value's id
func (db *DB) Create(value interface{}) (tx *DB) {
if db.CreateBatchSize > 0 {
return db.CreateInBatches(value, db.CreateBatchSize)
@@ -24,7 +24,7 @@ func (db *DB) Create(value interface{}) (tx *DB) {
return tx.callbacks.Create().Execute(tx)
}
-// CreateInBatches insert the value in batches into database
+// CreateInBatches inserts value in batches of batchSize
func (db *DB) CreateInBatches(value interface{}, batchSize int) (tx *DB) {
reflectValue := reflect.Indirect(reflect.ValueOf(value))
@@ -68,7 +68,7 @@ func (db *DB) CreateInBatches(value interface{}, batchSize int) (tx *DB) {
return
}
-// Save update value in database, if the value doesn't have primary key, will insert it
+// Save updates value in database. If value doesn't contain a matching primary key, value is inserted.
func (db *DB) Save(value interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.Dest = value
@@ -114,7 +114,7 @@ func (db *DB) Save(value interface{}) (tx *DB) {
return
}
-// First find first record that match given conditions, order by primary key
+// First finds the first record ordered by primary key, matching given conditions conds
func (db *DB) First(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.Limit(1).Order(clause.OrderByColumn{
Column: clause.Column{Table: clause.CurrentTable, Name: clause.PrimaryKey},
@@ -129,7 +129,7 @@ func (db *DB) First(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
-// Take return a record that match given conditions, the order will depend on the database implementation
+// Take finds the first record returned by the database in no specified order, matching given conditions conds
func (db *DB) Take(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.Limit(1)
if len(conds) > 0 {
@@ -142,7 +142,7 @@ func (db *DB) Take(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
-// Last find last record that match given conditions, order by primary key
+// Last finds the last record ordered by primary key, matching given conditions conds
func (db *DB) Last(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.Limit(1).Order(clause.OrderByColumn{
Column: clause.Column{Table: clause.CurrentTable, Name: clause.PrimaryKey},
@@ -158,7 +158,7 @@ func (db *DB) Last(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
-// Find find records that match given conditions
+// Find finds all records matching given conditions conds
func (db *DB) Find(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.getInstance()
if len(conds) > 0 {
@@ -170,7 +170,7 @@ func (db *DB) Find(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
-// FindInBatches find records in batches
+// FindInBatches finds all records in batches of batchSize
func (db *DB) FindInBatches(dest interface{}, batchSize int, fc func(tx *DB, batch int) error) *DB {
var (
tx = db.Order(clause.OrderByColumn{
@@ -202,7 +202,9 @@ func (db *DB) FindInBatches(dest interface{}, batchSize int, fc func(tx *DB, bat
batch++
if result.Error == nil && result.RowsAffected != 0 {
- tx.AddError(fc(result, batch))
+ fcTx := result.Session(&Session{NewDB: true})
+ fcTx.RowsAffected = result.RowsAffected
+ tx.AddError(fc(fcTx, batch))
} else if result.Error != nil {
tx.AddError(result.Error)
}
@@ -284,7 +286,8 @@ func (db *DB) assignInterfacesToValue(values ...interface{}) {
}
}
-// FirstOrInit gets the first matched record or initialize a new instance with given conditions (only works with struct or map conditions)
+// FirstOrInit finds the first matching record, otherwise if not found initializes a new instance with given conds.
+// Each conds must be a struct or map.
func (db *DB) FirstOrInit(dest interface{}, conds ...interface{}) (tx *DB) {
queryTx := db.Limit(1).Order(clause.OrderByColumn{
Column: clause.Column{Table: clause.CurrentTable, Name: clause.PrimaryKey},
@@ -310,7 +313,8 @@ func (db *DB) FirstOrInit(dest interface{}, conds ...interface{}) (tx *DB) {
return
}
-// FirstOrCreate gets the first matched record or create a new one with given conditions (only works with struct, map conditions)
+// FirstOrCreate finds the first matching record, otherwise if not found creates a new instance with given conds.
+// Each conds must be a struct or map.
func (db *DB) FirstOrCreate(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.getInstance()
queryTx := db.Session(&Session{}).Limit(1).Order(clause.OrderByColumn{
@@ -358,14 +362,14 @@ func (db *DB) FirstOrCreate(dest interface{}, conds ...interface{}) (tx *DB) {
return tx
}
-// Update update attributes with callbacks, refer: https://gorm.io/docs/update.html#Update-Changed-Fields
+// Update updates column with value using callbacks. Reference: https://gorm.io/docs/update.html#Update-Changed-Fields
func (db *DB) Update(column string, value interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.Dest = map[string]interface{}{column: value}
return tx.callbacks.Update().Execute(tx)
}
-// Updates update attributes with callbacks, refer: https://gorm.io/docs/update.html#Update-Changed-Fields
+// Updates updates attributes using callbacks. values must be a struct or map. Reference: https://gorm.io/docs/update.html#Update-Changed-Fields
func (db *DB) Updates(values interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.Dest = values
@@ -386,7 +390,9 @@ func (db *DB) UpdateColumns(values interface{}) (tx *DB) {
return tx.callbacks.Update().Execute(tx)
}
-// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition
+// Delete deletes value matching given conditions. If value contains primary key it is included in the conditions. If
+// value includes a deleted_at field, then Delete performs a soft delete instead by setting deleted_at with the current
+// time if null.
func (db *DB) Delete(value interface{}, conds ...interface{}) (tx *DB) {
tx = db.getInstance()
if len(conds) > 0 {
@@ -480,7 +486,7 @@ func (db *DB) Rows() (*sql.Rows, error) {
return rows, tx.Error
}
-// Scan scan value to a struct
+// Scan scans selected value to the struct dest
func (db *DB) Scan(dest interface{}) (tx *DB) {
config := *db.Config
currentLogger, newLogger := config.Logger, logger.Recorder.New()
@@ -505,7 +511,7 @@ func (db *DB) Scan(dest interface{}) (tx *DB) {
return
}
-// Pluck used to query single column from a model as a map
+// Pluck queries a single column from a model, returning in the slice dest. E.g.:
// var ages []int64
// db.Model(&users).Pluck("age", &ages)
func (db *DB) Pluck(column string, dest interface{}) (tx *DB) {
@@ -548,7 +554,8 @@ func (db *DB) ScanRows(rows *sql.Rows, dest interface{}) error {
return tx.Error
}
-// Connection use a db conn to execute Multiple commands,this conn will put conn pool after it is executed.
+// Connection uses a db connection to execute an arbitrary number of commands in fc. When finished, the connection is
+// returned to the connection pool.
func (db *DB) Connection(fc func(tx *DB) error) (err error) {
if db.Error != nil {
return db.Error
@@ -570,7 +577,9 @@ func (db *DB) Connection(fc func(tx *DB) error) (err error) {
return fc(tx)
}
-// Transaction start a transaction as a block, return error will rollback, otherwise to commit.
+// Transaction start a transaction as a block, return error will rollback, otherwise to commit. Transaction executes an
+// arbitrary number of commands in fc within a transaction. On success the changes are committed; if an error occurs
+// they are rolled back.
func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err error) {
panicked := true
@@ -613,7 +622,7 @@ func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err er
return
}
-// Begin begins a transaction
+// Begin begins a transaction with any transaction options opts
func (db *DB) Begin(opts ...*sql.TxOptions) *DB {
var (
// clone statement
@@ -642,7 +651,7 @@ func (db *DB) Begin(opts ...*sql.TxOptions) *DB {
return tx
}
-// Commit commit a transaction
+// Commit commits the changes in a transaction
func (db *DB) Commit() *DB {
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil && !reflect.ValueOf(committer).IsNil() {
db.AddError(committer.Commit())
@@ -652,7 +661,7 @@ func (db *DB) Commit() *DB {
return db
}
-// Rollback rollback a transaction
+// Rollback rollbacks the changes in a transaction
func (db *DB) Rollback() *DB {
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil {
if !reflect.ValueOf(committer).IsNil() {
@@ -682,7 +691,7 @@ func (db *DB) RollbackTo(name string) *DB {
return db
}
-// Exec execute raw sql
+// Exec executes raw sql
func (db *DB) Exec(sql string, values ...interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.SQL = strings.Builder{}
diff --git a/vendor/gorm.io/gorm/gorm.go b/vendor/gorm.io/gorm/gorm.go
index 6a6bb03..1f1dac2 100644
--- a/vendor/gorm.io/gorm/gorm.go
+++ b/vendor/gorm.io/gorm/gorm.go
@@ -300,7 +300,8 @@ func (db *DB) WithContext(ctx context.Context) *DB {
// Debug start debug mode
func (db *DB) Debug() (tx *DB) {
- return db.Session(&Session{
+ tx = db.getInstance()
+ return tx.Session(&Session{
Logger: db.Logger.LogMode(logger.Info),
})
}
@@ -412,7 +413,7 @@ func (db *DB) SetupJoinTable(model interface{}, field string, joinTable interfac
relation, ok := modelSchema.Relationships.Relations[field]
isRelation := ok && relation.JoinTable != nil
if !isRelation {
- return fmt.Errorf("failed to found relation: %s", field)
+ return fmt.Errorf("failed to find relation: %s", field)
}
for _, ref := range relation.References {
diff --git a/vendor/gorm.io/gorm/logger/logger.go b/vendor/gorm.io/gorm/logger/logger.go
index 2ffd28d..ce08856 100644
--- a/vendor/gorm.io/gorm/logger/logger.go
+++ b/vendor/gorm.io/gorm/logger/logger.go
@@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"log"
"os"
"time"
@@ -68,8 +68,8 @@ type Interface interface {
}
var (
- // Discard Discard logger will print any log to ioutil.Discard
- Discard = New(log.New(ioutil.Discard, "", log.LstdFlags), Config{})
+ // Discard Discard logger will print any log to io.Discard
+ Discard = New(log.New(io.Discard, "", log.LstdFlags), Config{})
// Default Default logger
Default = New(log.New(os.Stdout, "\r\n", log.LstdFlags), Config{
SlowThreshold: 200 * time.Millisecond,
diff --git a/vendor/gorm.io/gorm/logger/sql.go b/vendor/gorm.io/gorm/logger/sql.go
index c8b194c..bcacc7c 100644
--- a/vendor/gorm.io/gorm/logger/sql.go
+++ b/vendor/gorm.io/gorm/logger/sql.go
@@ -30,6 +30,8 @@ func isPrintable(s string) bool {
var convertibleTypes = []reflect.Type{reflect.TypeOf(time.Time{}), reflect.TypeOf(false), reflect.TypeOf([]byte{})}
+var numericPlaceholderRe = regexp.MustCompile(`\$\d+\$`)
+
// ExplainSQL generate SQL string with given parameters, the generated SQL is expected to be used in logger, execute it might introduce a SQL injection vulnerability
func ExplainSQL(sql string, numericPlaceholder *regexp.Regexp, escaper string, avars ...interface{}) string {
var (
@@ -138,9 +140,18 @@ func ExplainSQL(sql string, numericPlaceholder *regexp.Regexp, escaper string, a
sql = newSQL.String()
} else {
sql = numericPlaceholder.ReplaceAllString(sql, "$$$1$$")
- for idx, v := range vars {
- sql = strings.Replace(sql, "$"+strconv.Itoa(idx+1)+"$", v, 1)
- }
+
+ sql = numericPlaceholderRe.ReplaceAllStringFunc(sql, func(v string) string {
+ num := v[1 : len(v)-1]
+ n, _ := strconv.Atoi(num)
+
+ // position var start from 1 ($1, $2)
+ n -= 1
+ if n >= 0 && n <= len(vars)-1 {
+ return vars[n]
+ }
+ return v
+ })
}
return sql
diff --git a/vendor/gorm.io/gorm/migrator/migrator.go b/vendor/gorm.io/gorm/migrator/migrator.go
index 87ac774..e6782a1 100644
--- a/vendor/gorm.io/gorm/migrator/migrator.go
+++ b/vendor/gorm.io/gorm/migrator/migrator.go
@@ -15,7 +15,7 @@ import (
)
var (
- regFullDataType = regexp.MustCompile(`[^\d]*(\d+)[^\d]?`)
+ regFullDataType = regexp.MustCompile(`\D*(\d+)\D?`)
)
// Migrator m struct
@@ -135,12 +135,12 @@ func (m Migrator) AutoMigrate(values ...interface{}) error {
}
}
}
+ }
- for _, chk := range stmt.Schema.ParseCheckConstraints() {
- if !tx.Migrator().HasConstraint(value, chk.Name) {
- if err := tx.Migrator().CreateConstraint(value, chk.Name); err != nil {
- return err
- }
+ for _, chk := range stmt.Schema.ParseCheckConstraints() {
+ if !tx.Migrator().HasConstraint(value, chk.Name) {
+ if err := tx.Migrator().CreateConstraint(value, chk.Name); err != nil {
+ return err
}
}
}
diff --git a/vendor/gorm.io/gorm/scan.go b/vendor/gorm.io/gorm/scan.go
index 6250fb5..2db4316 100644
--- a/vendor/gorm.io/gorm/scan.go
+++ b/vendor/gorm.io/gorm/scan.go
@@ -66,30 +66,32 @@ func (db *DB) scanIntoStruct(rows Rows, reflectValue reflect.Value, values []int
db.RowsAffected++
db.AddError(rows.Scan(values...))
- joinedSchemaMap := make(map[*schema.Field]interface{}, 0)
+ joinedSchemaMap := make(map[*schema.Field]interface{})
for idx, field := range fields {
- if field != nil {
- if len(joinFields) == 0 || joinFields[idx][0] == nil {
- db.AddError(field.Set(db.Statement.Context, reflectValue, values[idx]))
- } else {
- joinSchema := joinFields[idx][0]
- relValue := joinSchema.ReflectValueOf(db.Statement.Context, reflectValue)
- if relValue.Kind() == reflect.Ptr {
- if _, ok := joinedSchemaMap[joinSchema]; !ok {
- if value := reflect.ValueOf(values[idx]).Elem(); value.Kind() == reflect.Ptr && value.IsNil() {
- continue
- }
+ if field == nil {
+ continue
+ }
- relValue.Set(reflect.New(relValue.Type().Elem()))
- joinedSchemaMap[joinSchema] = nil
+ if len(joinFields) == 0 || joinFields[idx][0] == nil {
+ db.AddError(field.Set(db.Statement.Context, reflectValue, values[idx]))
+ } else {
+ joinSchema := joinFields[idx][0]
+ relValue := joinSchema.ReflectValueOf(db.Statement.Context, reflectValue)
+ if relValue.Kind() == reflect.Ptr {
+ if _, ok := joinedSchemaMap[joinSchema]; !ok {
+ if value := reflect.ValueOf(values[idx]).Elem(); value.Kind() == reflect.Ptr && value.IsNil() {
+ continue
}
+
+ relValue.Set(reflect.New(relValue.Type().Elem()))
+ joinedSchemaMap[joinSchema] = nil
}
- db.AddError(joinFields[idx][1].Set(db.Statement.Context, relValue, values[idx]))
}
-
- // release data to pool
- field.NewValuePool.Put(values[idx])
+ db.AddError(joinFields[idx][1].Set(db.Statement.Context, relValue, values[idx]))
}
+
+ // release data to pool
+ field.NewValuePool.Put(values[idx])
}
}
diff --git a/vendor/gorm.io/gorm/schema/field.go b/vendor/gorm.io/gorm/schema/field.go
index d4dfbd6..1589d98 100644
--- a/vendor/gorm.io/gorm/schema/field.go
+++ b/vendor/gorm.io/gorm/schema/field.go
@@ -403,18 +403,14 @@ func (schema *Schema) ParseField(fieldStruct reflect.StructField) *Field {
}
if ef.PrimaryKey {
- if val, ok := ef.TagSettings["PRIMARYKEY"]; ok && utils.CheckTruth(val) {
- ef.PrimaryKey = true
- } else if val, ok := ef.TagSettings["PRIMARY_KEY"]; ok && utils.CheckTruth(val) {
- ef.PrimaryKey = true
- } else {
+ if !utils.CheckTruth(ef.TagSettings["PRIMARYKEY"], ef.TagSettings["PRIMARY_KEY"]) {
ef.PrimaryKey = false
if val, ok := ef.TagSettings["AUTOINCREMENT"]; !ok || !utils.CheckTruth(val) {
ef.AutoIncrement = false
}
- if ef.DefaultValue == "" {
+ if !ef.AutoIncrement && ef.DefaultValue == "" {
ef.HasDefaultValue = false
}
}
@@ -472,9 +468,6 @@ func (field *Field) setupValuerAndSetter() {
oldValuerOf := field.ValueOf
field.ValueOf = func(ctx context.Context, v reflect.Value) (interface{}, bool) {
value, zero := oldValuerOf(ctx, v)
- if zero {
- return value, zero
- }
s, ok := value.(SerializerValuerInterface)
if !ok {
@@ -487,7 +480,7 @@ func (field *Field) setupValuerAndSetter() {
Destination: v,
Context: ctx,
fieldValue: value,
- }, false
+ }, zero
}
}
diff --git a/vendor/gorm.io/gorm/schema/schema.go b/vendor/gorm.io/gorm/schema/schema.go
index eca113e..3791237 100644
--- a/vendor/gorm.io/gorm/schema/schema.go
+++ b/vendor/gorm.io/gorm/schema/schema.go
@@ -112,7 +112,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
schemaCacheKey = modelType
}
- // Load exist schmema cache, return if exists
+ // Load exist schema cache, return if exists
if v, ok := cacheStore.Load(schemaCacheKey); ok {
s := v.(*Schema)
// Wait for the initialization of other goroutines to complete
@@ -146,7 +146,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
// When the schema initialization is completed, the channel will be closed
defer close(schema.initialized)
- // Load exist schmema cache, return if exists
+ // Load exist schema cache, return if exists
if v, ok := cacheStore.Load(schemaCacheKey); ok {
s := v.(*Schema)
// Wait for the initialization of other goroutines to complete
diff --git a/vendor/gorm.io/gorm/schema/serializer.go b/vendor/gorm.io/gorm/schema/serializer.go
index 758a642..00a4f85 100644
--- a/vendor/gorm.io/gorm/schema/serializer.go
+++ b/vendor/gorm.io/gorm/schema/serializer.go
@@ -88,7 +88,9 @@ func (JSONSerializer) Scan(ctx context.Context, field *Field, dst reflect.Value,
return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue)
}
- err = json.Unmarshal(bytes, fieldValue.Interface())
+ if len(bytes) > 0 {
+ err = json.Unmarshal(bytes, fieldValue.Interface())
+ }
}
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
@@ -117,9 +119,15 @@ func (UnixSecondSerializer) Scan(ctx context.Context, field *Field, dst reflect.
// Value implements serializer interface
func (UnixSecondSerializer) Value(ctx context.Context, field *Field, dst reflect.Value, fieldValue interface{}) (result interface{}, err error) {
+ rv := reflect.ValueOf(fieldValue)
switch v := fieldValue.(type) {
- case int64, int, uint, uint64, int32, uint32, int16, uint16, *int64, *int, *uint, *uint64, *int32, *uint32, *int16, *uint16:
- result = time.Unix(reflect.Indirect(reflect.ValueOf(v)).Int(), 0)
+ case int64, int, uint, uint64, int32, uint32, int16, uint16:
+ result = time.Unix(reflect.Indirect(rv).Int(), 0)
+ case *int64, *int, *uint, *uint64, *int32, *uint32, *int16, *uint16:
+ if rv.IsZero() {
+ return nil, nil
+ }
+ result = time.Unix(reflect.Indirect(rv).Int(), 0)
default:
err = fmt.Errorf("invalid field type %#v for UnixSecondSerializer, only int, uint supported", v)
}
@@ -142,8 +150,10 @@ func (GobSerializer) Scan(ctx context.Context, field *Field, dst reflect.Value,
default:
return fmt.Errorf("failed to unmarshal gob value: %#v", dbValue)
}
- decoder := gob.NewDecoder(bytes.NewBuffer(bytesValue))
- err = decoder.Decode(fieldValue.Interface())
+ if len(bytesValue) > 0 {
+ decoder := gob.NewDecoder(bytes.NewBuffer(bytesValue))
+ err = decoder.Decode(fieldValue.Interface())
+ }
}
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return
diff --git a/vendor/gorm.io/gorm/statement.go b/vendor/gorm.io/gorm/statement.go
index 850af6c..cc26fe3 100644
--- a/vendor/gorm.io/gorm/statement.go
+++ b/vendor/gorm.io/gorm/statement.go
@@ -650,7 +650,7 @@ func (stmt *Statement) Changed(fields ...string) bool {
return false
}
-var nameMatcher = regexp.MustCompile(`^[\W]?(?:[a-z_0-9]+?)[\W]?\.[\W]?([a-z_0-9]+?)[\W]?$`)
+var nameMatcher = regexp.MustCompile(`^(?:\W?(\w+?)\W?\.)?\W?(\w+?)\W?$`)
// SelectAndOmitColumns get select and omit columns, select -> true, omit -> false
func (stmt *Statement) SelectAndOmitColumns(requireCreate, requireUpdate bool) (map[string]bool, bool) {
@@ -672,8 +672,8 @@ func (stmt *Statement) SelectAndOmitColumns(requireCreate, requireUpdate bool) (
}
} else if field := stmt.Schema.LookUpField(column); field != nil && field.DBName != "" {
results[field.DBName] = true
- } else if matches := nameMatcher.FindStringSubmatch(column); len(matches) == 2 {
- results[matches[1]] = true
+ } else if matches := nameMatcher.FindStringSubmatch(column); len(matches) == 3 && (matches[1] == stmt.Table || matches[1] == "") {
+ results[matches[2]] = true
} else {
results[column] = true
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 7c1fc88..9b22900 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -34,7 +34,7 @@ github.com/go-playground/locales/currency
# github.com/go-playground/universal-translator v0.18.0
## explicit; go 1.13
github.com/go-playground/universal-translator
-# github.com/go-playground/validator/v10 v10.11.0
+# github.com/go-playground/validator/v10 v10.11.1
## explicit; go 1.13
github.com/go-playground/validator/v10
# github.com/go-redis/redis/v9 v9.0.0-beta.2
@@ -108,8 +108,8 @@ github.com/jinzhu/now
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.9
-## explicit; go 1.16
+# github.com/klauspost/compress v1.15.10
+## explicit; go 1.17
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -140,6 +140,12 @@ github.com/montanaflynn/stats
# github.com/natefinch/lumberjack v2.0.0+incompatible
## explicit
github.com/natefinch/lumberjack
+# github.com/oschwald/geoip2-golang v1.8.0
+## explicit; go 1.18
+github.com/oschwald/geoip2-golang
+# github.com/oschwald/maxminddb-golang v1.10.0
+## explicit; go 1.18
+github.com/oschwald/maxminddb-golang
# github.com/pelletier/go-toml/v2 v2.0.5
## explicit; go 1.16
github.com/pelletier/go-toml/v2
@@ -248,19 +254,21 @@ github.com/xdg-go/stringprep
# github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
## explicit; go 1.12
github.com/youmark/pkcs8
-# go.dtapp.net/dorm v1.0.33
+# go.dtapp.net/dorm v1.0.36
## explicit; go 1.19
go.dtapp.net/dorm
# go.dtapp.net/goarray v1.0.1
## explicit; go 1.18
go.dtapp.net/goarray
-# go.dtapp.net/goip v1.0.30
+# go.dtapp.net/goip v1.0.34
## explicit; go 1.19
go.dtapp.net/goip
+go.dtapp.net/goip/geoip
go.dtapp.net/goip/ip2region
-go.dtapp.net/goip/v4
-go.dtapp.net/goip/v6
-# go.dtapp.net/golog v1.0.73
+go.dtapp.net/goip/ip2region_v2
+go.dtapp.net/goip/ipv6wry
+go.dtapp.net/goip/qqwry
+# go.dtapp.net/golog v1.0.77
## explicit; go 1.19
go.dtapp.net/golog
# go.dtapp.net/gorandom v1.0.1
@@ -355,7 +363,7 @@ golang.org/x/net/idna
# golang.org/x/sync v0.0.0-20220907140024-f12130a52804
## explicit
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.0.0-20220913175220-63ea55921009
+# golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/internal/unsafeheader
@@ -411,8 +419,8 @@ gorm.io/driver/mysql
# gorm.io/driver/postgres v1.3.9
## explicit; go 1.14
gorm.io/driver/postgres
-# gorm.io/gorm v1.23.8
-## explicit; go 1.14
+# gorm.io/gorm v1.23.9
+## explicit; go 1.16
gorm.io/gorm
gorm.io/gorm/callbacks
gorm.io/gorm/clause