- update ip

master v1.0.74
李光春 2 years ago
parent b664b74537
commit c8a251ca62

@ -1,5 +1,5 @@
package golog
const (
Version = "1.0.73"
Version = "1.0.74"
)

@ -214,22 +214,21 @@ func (c *GinClient) Middleware() gin.HandlerFunc {
clientIp := gorequest.ClientIp(ginCtx.Request)
requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", "", ""
requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", ""
if c.ipService != nil {
if net.ParseIP(clientIp).To4() != nil {
// IPv4
_, info := c.ipService.Ipv4(clientIp)
requestClientIpCountry = info.Country
requestClientIpRegion = info.Region
requestClientIpProvince = info.Province
requestClientIpCity = info.City
requestClientIpIsp = info.ISP
info := c.ipService.Analyse(clientIp)
requestClientIpCountry = info.Ip2regionV2info.Country
requestClientIpProvince = info.Ip2regionV2info.Province
requestClientIpCity = info.Ip2regionV2info.City
requestClientIpIsp = info.Ip2regionV2info.Operator
} else if net.ParseIP(clientIp).To16() != nil {
// IPv6
info := c.ipService.Ipv6(clientIp)
requestClientIpCountry = info.Country
requestClientIpProvince = info.Province
requestClientIpCity = info.City
info := c.ipService.Analyse(clientIp)
requestClientIpCountry = info.Ipv6wryInfo.Country
requestClientIpProvince = info.Ipv6wryInfo.Province
requestClientIpCity = info.Ipv6wryInfo.City
}
}
@ -241,12 +240,12 @@ func (c *GinClient) Middleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{gormRecordJson}保存数据:%s", data)
}
c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{gormRecordXml}保存数据:%s", data)
}
c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
}
}
// 记录
@ -255,12 +254,12 @@ func (c *GinClient) Middleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{mongoRecordJson}保存数据:%s", data)
}
c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.Middleware]准备使用{mongoRecordXml}保存数据:%s", data)
}
c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
}
}
}()

@ -98,7 +98,6 @@ type ginPostgresqlLog struct {
RequestUrlQuery string `gorm:"comment:【请求】请求URL参数" json:"request_url_query,omitempty"` //【请求】请求URL参数
RequestIp string `gorm:"index;comment:【请求】请求客户端Ip" json:"request_ip,omitempty"` //【请求】请求客户端Ip
RequestIpCountry string `gorm:"index;comment:【请求】请求客户端城市" json:"request_ip_country,omitempty"` //【请求】请求客户端城市
RequestIpRegion string `gorm:"index;comment:【请求】请求客户端区域" json:"request_ip_region,omitempty"` //【请求】请求客户端区域
RequestIpProvince string `gorm:"index;comment:【请求】请求客户端省份" json:"request_ip_province,omitempty"` //【请求】请求客户端省份
RequestIpCity string `gorm:"index;comment:【请求】请求客户端城市" json:"request_ip_city,omitempty"` //【请求】请求客户端城市
RequestIpIsp string `gorm:"index;comment:【请求】请求客户端运营商" json:"request_ip_isp,omitempty"` //【请求】请求客户端运营商
@ -138,7 +137,7 @@ func (c *GinClient) gormRecord(postgresqlLog ginPostgresqlLog) (err error) {
return
}
func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.gormRecordJson]收到保存数据要求:%s", c.gormConfig.tableName)
@ -156,7 +155,6 @@ func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestT
RequestUrlQuery: dorm.JsonEncodeNoError(ginCtx.Request.URL.Query()), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
@ -192,7 +190,7 @@ func (c *GinClient) gormRecordJson(ginCtx *gin.Context, traceId string, requestT
}
}
func (c *GinClient) gormRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
func (c *GinClient) gormRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.gormRecordXml]收到保存数据要求:%s", c.gormConfig.tableName)
@ -210,7 +208,6 @@ func (c *GinClient) gormRecordXml(ginCtx *gin.Context, traceId string, requestTi
RequestUrlQuery: dorm.JsonEncodeNoError(ginCtx.Request.URL.Query()), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
@ -305,22 +302,21 @@ func (c *GinClient) GormMiddleware() gin.HandlerFunc {
clientIp := gorequest.ClientIp(ginCtx.Request)
requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", "", ""
requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", ""
if c.ipService != nil {
if net.ParseIP(clientIp).To4() != nil {
// IPv4
_, info := c.ipService.Ipv4(clientIp)
requestClientIpCountry = info.Country
requestClientIpRegion = info.Region
requestClientIpProvince = info.Province
requestClientIpCity = info.City
requestClientIpIsp = info.ISP
info := c.ipService.Analyse(clientIp)
requestClientIpCountry = info.Ip2regionV2info.Country
requestClientIpProvince = info.Ip2regionV2info.Province
requestClientIpCity = info.Ip2regionV2info.City
requestClientIpIsp = info.Ip2regionV2info.Operator
} else if net.ParseIP(clientIp).To16() != nil {
// IPv6
info := c.ipService.Ipv6(clientIp)
requestClientIpCountry = info.Country
requestClientIpProvince = info.Province
requestClientIpCity = info.City
info := c.ipService.Analyse(clientIp)
requestClientIpCountry = info.Ipv6wryInfo.Country
requestClientIpProvince = info.Ipv6wryInfo.Province
requestClientIpCity = info.Ipv6wryInfo.City
}
}
@ -333,12 +329,12 @@ func (c *GinClient) GormMiddleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.GormMiddleware]准备使用{gormRecordJson}保存数据:%s", data)
}
c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.gormRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.GormMiddleware]准备使用{gormRecordXml}保存数据:%s", data)
}
c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.gormRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
}
}
}()

@ -129,10 +129,6 @@ func (c *GinClient) mongoCreateIndexes(ctx context.Context) {
Keys: bson.D{
{"request_ip_country", 1},
}}))
c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
Keys: bson.D{
{"request_ip_region", 1},
}}))
c.zapLog.WithLogger().Sugar().Infof(c.mongoClient.Db.Database(c.mongoConfig.databaseName).Collection(c.mongoConfig.collectionName).Indexes().CreateOne(ctx, mongo.IndexModel{
Keys: bson.D{
{"request_ip_province", 1},
@ -200,7 +196,6 @@ type ginMongoLog struct {
RequestUrlQuery interface{} `json:"request_url_query,omitempty" bson:"request_url_query,omitempty"` //【请求】请求URL参数
RequestIp string `json:"request_ip,omitempty" bson:"request_ip,omitempty"` //【请求】请求客户端Ip
RequestIpCountry string `json:"request_ip_country,omitempty" bson:"request_ip_country,omitempty"` //【请求】请求客户端城市
RequestIpRegion string `json:"request_ip_region,omitempty" bson:"request_ip_region,omitempty"` //【请求】请求客户端区域
RequestIpProvince string `json:"request_ip_province,omitempty" bson:"request_ip_province,omitempty"` //【请求】请求客户端省份
RequestIpCity string `json:"request_ip_city,omitempty" bson:"request_ip_city,omitempty"` //【请求】请求客户端城市
RequestIpIsp string `json:"request_ip_isp,omitempty" bson:"request_ip_isp,omitempty"` //【请求】请求客户端运营商
@ -239,7 +234,7 @@ func (c *GinClient) mongoRecord(mongoLog ginMongoLog) (err error) {
return err
}
func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.mongoRecordJson]收到保存数据要求:%s,%s", c.mongoConfig.databaseName, c.mongoConfig.collectionName)
@ -258,7 +253,6 @@ func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, request
RequestUrlQuery: ginCtx.Request.URL.Query(), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
@ -292,7 +286,7 @@ func (c *GinClient) mongoRecordJson(ginCtx *gin.Context, traceId string, request
}
}
func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestTime time.Time, requestBody []byte, responseCode int, responseBody string, startTime, endTime int64, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp string) {
if c.logDebug {
c.zapLog.WithLogger().Sugar().Infof("[golog.gin.mongoRecordXml]收到保存数据要求:%s,%s", c.mongoConfig.databaseName, c.mongoConfig.collectionName)
@ -311,7 +305,6 @@ func (c *GinClient) mongoRecordXml(ginCtx *gin.Context, traceId string, requestT
RequestUrlQuery: ginCtx.Request.URL.Query(), //【请求】请求URL参数
RequestIp: clientIp, //【请求】请求客户端Ip
RequestIpCountry: requestClientIpCountry, //【请求】请求客户端城市
RequestIpRegion: requestClientIpRegion, //【请求】请求客户端区域
RequestIpProvince: requestClientIpProvince, //【请求】请求客户端省份
RequestIpCity: requestClientIpCity, //【请求】请求客户端城市
RequestIpIsp: requestClientIpIsp, //【请求】请求客户端运营商
@ -404,22 +397,21 @@ func (c *GinClient) MongoMiddleware() gin.HandlerFunc {
clientIp := gorequest.ClientIp(ginCtx.Request)
requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", "", ""
requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp := "", "", "", ""
if c.ipService != nil {
if net.ParseIP(clientIp).To4() != nil {
// IPv4
_, info := c.ipService.Ipv4(clientIp)
requestClientIpCountry = info.Country
requestClientIpRegion = info.Region
requestClientIpProvince = info.Province
requestClientIpCity = info.City
requestClientIpIsp = info.ISP
info := c.ipService.Analyse(clientIp)
requestClientIpCountry = info.Ip2regionV2info.Country
requestClientIpProvince = info.Ip2regionV2info.Province
requestClientIpCity = info.Ip2regionV2info.City
requestClientIpIsp = info.Ip2regionV2info.Operator
} else if net.ParseIP(clientIp).To16() != nil {
// IPv6
info := c.ipService.Ipv6(clientIp)
requestClientIpCountry = info.Country
requestClientIpProvince = info.Province
requestClientIpCity = info.City
info := c.ipService.Analyse(clientIp)
requestClientIpCountry = info.Ipv6wryInfo.Country
requestClientIpProvince = info.Ipv6wryInfo.Province
requestClientIpCity = info.Ipv6wryInfo.City
}
}
@ -432,12 +424,12 @@ func (c *GinClient) MongoMiddleware() gin.HandlerFunc {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.MongoMiddleware]准备使用{mongoRecordJson}保存数据:%s", data)
}
c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.mongoRecordJson(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
} else {
if c.logDebug {
c.zapLog.WithTraceIdStr(traceId).Sugar().Infof("[golog.gin.MongoMiddleware]准备使用{mongoRecordXml}保存数据:%s", data)
}
c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpRegion, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
c.mongoRecordXml(ginCtx, traceId, requestTime, data, responseCode, responseBody, startTime, endTime, clientIp, requestClientIpCountry, requestClientIpProvince, requestClientIpCity, requestClientIpIsp)
}
}
}()

@ -6,14 +6,14 @@ require (
github.com/gin-gonic/gin v1.8.1
github.com/natefinch/lumberjack v2.0.0+incompatible
go.dtapp.net/dorm v1.0.33
go.dtapp.net/goip v1.0.30
go.dtapp.net/goip v1.0.33
go.dtapp.net/gorequest v1.0.31
go.dtapp.net/gotime v1.0.5
go.dtapp.net/gotrace_id v1.0.6
go.dtapp.net/gourl v1.0.0
go.mongodb.org/mongo-driver v1.10.2
go.uber.org/zap v1.23.0
gorm.io/gorm v1.23.8
gorm.io/gorm v1.23.9
)
require (
@ -48,6 +48,8 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/montanaflynn/stats v0.6.6 // indirect
github.com/oschwald/geoip2-golang v1.8.0 // indirect
github.com/oschwald/maxminddb-golang v1.10.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda // indirect
@ -78,7 +80,7 @@ require (
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
golang.org/x/sync v0.0.0-20220907140024-f12130a52804 // indirect
golang.org/x/sys v0.0.0-20220913175220-63ea55921009 // indirect
golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect

@ -362,6 +362,10 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/oschwald/geoip2-golang v1.8.0 h1:KfjYB8ojCEn/QLqsDU0AzrJ3R5Qa9vFlx3z6SLNcKTs=
github.com/oschwald/geoip2-golang v1.8.0/go.mod h1:R7bRvYjOeaoenAp9sKRS8GX5bJWcZ0laWO5+DauEktw=
github.com/oschwald/maxminddb-golang v1.10.0 h1:Xp1u0ZhqkSuopaKmk1WwHtjF0H9Hd9181uj2MQ5Vndg=
github.com/oschwald/maxminddb-golang v1.10.0/go.mod h1:Y2ELenReaLAZ0b400URyGwvYxHV1dLIxBuyOsyYjHK0=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
@ -497,8 +501,8 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.dtapp.net/dorm v1.0.33 h1:QRAVEQ6Uf3WENSOrXytzzH+PjH90JySowd3jbB9PQjw=
go.dtapp.net/dorm v1.0.33/go.mod h1:4WNSzrUGs7YIudq1cRZQNkHOlPAbG6thI3mXX1tQcYY=
go.dtapp.net/goip v1.0.30 h1:/wP2ewSNWLzG2Oh2VsTfQCv/2rw1KKi9XerD4rQaMLM=
go.dtapp.net/goip v1.0.30/go.mod h1:9l8e/slVanziGXfvrUwOMx6028EV/lzN5vVpixmtUYY=
go.dtapp.net/goip v1.0.33 h1:n6dLTfwiWp2Pw5pKjHSv1QSDJtuLEWNCjNHOTqJQHc0=
go.dtapp.net/goip v1.0.33/go.mod h1:EctL6B8ue/kZKPr+kKZPU6YTTpNhihane9BHHffwo6Q=
go.dtapp.net/gorandom v1.0.1 h1:IWfMClh1ECPvyUjlqD7MwLq4mZdUusD1qAwAdsvEJBs=
go.dtapp.net/gorandom v1.0.1/go.mod h1:ZPdgalKpvFV/ATQqR0k4ns/F/IpITAZpx6WkWirr5Y8=
go.dtapp.net/gorequest v1.0.31 h1:r/OoU5Y00TbJjkQtpvwjsb/pllqO0UQQjFRY1veZYZc=
@ -639,8 +643,8 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220913175220-63ea55921009 h1:PuvuRMeLWqsf/ZdT1UUZz0syhioyv1mzuFZsXs4fvhw=
golang.org/x/sys v0.0.0-20220913175220-63ea55921009/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 h1:ohgcoMbSofXygzo6AD2I1kz3BFmW1QArPYTtwEM3UXc=
golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -729,8 +733,9 @@ gorm.io/driver/mysql v1.3.6/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10
gorm.io/driver/postgres v1.3.9 h1:lWGiVt5CijhQAg0PWB7Od1RNcBw/jS4d2cAScBcSDXg=
gorm.io/driver/postgres v1.3.9/go.mod h1:qw/FeqjxmYqW5dBcYNBsnhQULIApQdk7YuuDPktVi1U=
gorm.io/gorm v1.23.7/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.23.8 h1:h8sGJ+biDgBA1AD1Ha9gFCx7h8npU7AsLdlkX0n2TpE=
gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.23.9 h1:NSHG021i+MCznokeXR3udGaNyFyBQJW8MbjrJMVCfGw=
gorm.io/gorm v1.23.9/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

@ -0,0 +1,3 @@
.vscode
*.out
*.test

@ -0,0 +1,3 @@
[submodule "test-data"]
path = test-data
url = https://github.com/maxmind/MaxMind-DB.git

@ -0,0 +1,472 @@
[run]
deadline = "10m"
tests = true
[linters]
disable-all = true
enable = [
"asciicheck",
"bidichk",
"bodyclose",
"containedctx",
"contextcheck",
"deadcode",
"depguard",
"durationcheck",
"errcheck",
"errchkjson",
"errname",
"errorlint",
"exportloopref",
"forbidigo",
#"forcetypeassert",
"goconst",
"gocyclo",
"gocritic",
"godot",
"gofumpt",
"gomodguard",
"gosec",
"gosimple",
"govet",
"grouper",
"ineffassign",
"lll",
"makezero",
"maintidx",
"misspell",
"nakedret",
"nilerr",
"noctx",
"nolintlint",
"nosprintfhostport",
"predeclared",
"revive",
"rowserrcheck",
"sqlclosecheck",
"staticcheck",
"structcheck",
"stylecheck",
"tenv",
"tparallel",
"typecheck",
"unconvert",
"unparam",
"unused",
"varcheck",
"vetshadow",
"wastedassign",
]
# Please note that we only use depguard for stdlib as gomodguard only
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
[linters-settings.depguard]
list-type = "blacklist"
include-go-root = true
packages = [
# ioutil is deprecated. The functions have been moved elsewhere:
# https://golang.org/doc/go1.16#ioutil
"io/ioutil",
]
[linters-settings.errcheck]
# Don't allow setting of error to the blank identifier. If there is a legtimate
# reason, there should be a nolint with an explanation.
check-blank = true
exclude-functions = [
# If we are rolling back a transaction, we are often already in an error
# state.
'(*database/sql.Tx).Rollback',
# It is reasonable to ignore errors if Cleanup fails in most cases.
'(*github.com/google/renameio/v2.PendingFile).Cleanup',
# We often don't care if removing a file failed (e.g., it doesn't exist)
'os.Remove',
'os.RemoveAll',
]
# Ignoring Close so that we don't have to have a bunch of
# `defer func() { _ = r.Close() }()` constructs when we
# don't actually care about the error.
ignore = "Close,fmt:.*"
[linters-settings.errorlint]
errorf = true
asserts = true
comparison = true
[linters-settings.exhaustive]
default-signifies-exhaustive = true
[linters-settings.forbidigo]
# Forbid the following identifiers
forbid = [
"^minFraud*",
"^maxMind*",
]
[linters-settings.gocritic]
enabled-checks = [
"appendAssign",
"appendCombine",
"argOrder",
"assignOp",
"badCall",
"badCond",
"badLock",
"badRegexp",
"badSorting",
"boolExprSimplify",
"builtinShadow",
"builtinShadowDecl",
"captLocal",
"caseOrder",
"codegenComment",
"commentedOutCode",
"commentedOutImport",
"commentFormatting",
"defaultCaseOrder",
# Revive's defer rule already captures this. This caught no extra cases.
# "deferInLoop",
"deferUnlambda",
"deprecatedComment",
"docStub",
"dupArg",
"dupBranchBody",
"dupCase",
"dupImport",
"dupSubExpr",
"dynamicFmtString",
"elseif",
"emptyDecl",
"emptyFallthrough",
"emptyStringTest",
"equalFold",
"evalOrder",
"exitAfterDefer",
"exposedSyncMutex",
"externalErrorReassign",
# Given that all of our code runs on Linux and the / separate should
# work fine, this seems less important.
# "filepathJoin",
"flagDeref",
"flagName",
"hexLiteral",
"ifElseChain",
"importShadow",
"indexAlloc",
"initClause",
"ioutilDeprecated",
"mapKey",
"methodExprCall",
"nestingReduce",
"newDeref",
"nilValReturn",
"octalLiteral",
"offBy1",
"paramTypeCombine",
"preferDecodeRune",
"preferFilepathJoin",
"preferFprint",
"preferStringWriter",
"preferWriteByte",
"ptrToRefParam",
"rangeExprCopy",
"rangeValCopy",
"redundantSprint",
"regexpMust",
"regexpPattern",
# This might be good, but I don't think we want to encourage
# significant changes to regexes as we port stuff from Perl.
# "regexpSimplify",
"ruleguard",
"singleCaseSwitch",
"sliceClear",
"sloppyLen",
# This seems like it might also be good, but a lot of existing code
# fails.
# "sloppyReassign",
"returnAfterHttpError",
"sloppyTypeAssert",
"sortSlice",
"sprintfQuotedString",
"sqlQuery",
"stringsCompare",
"stringXbytes",
"switchTrue",
"syncMapLoadAndDelete",
"timeExprSimplify",
"todoCommentWithoutDetail",
"tooManyResultsChecker",
"truncateCmp",
"typeAssertChain",
"typeDefFirst",
"typeSwitchVar",
"typeUnparen",
"underef",
"unlabelStmt",
"unlambda",
# I am not sure we would want this linter and a lot of existing
# code fails.
# "unnamedResult",
"unnecessaryBlock",
"unnecessaryDefer",
"unslice",
"valSwap",
"weakCond",
"wrapperFunc",
"yodaStyleExpr",
# This requires explanations for "nolint" directives. This would be
# nice for gosec ones, but I am not sure we want it generally unless
# we can get the false positive rate lower.
# "whyNoLint"
]
[linters-settings.gofumpt]
extra-rules = true
lang-version = "1.18"
[linters-settings.govet]
"enable-all" = true
[linters-settings.lll]
line-length = 120
tab-width = 4
[linters-settings.nolintlint]
allow-leading-space = false
allow-unused = false
allow-no-explanation = ["lll", "misspell"]
require-explanation = true
require-specific = true
[linters-settings.revive]
ignore-generated-header = true
severity = "warning"
# This might be nice but it is so common that it is hard
# to enable.
# [[linters-settings.revive.rules]]
# name = "add-constant"
# [[linters-settings.revive.rules]]
# name = "argument-limit"
[[linters-settings.revive.rules]]
name = "atomic"
[[linters-settings.revive.rules]]
name = "bare-return"
[[linters-settings.revive.rules]]
name = "blank-imports"
[[linters-settings.revive.rules]]
name = "bool-literal-in-expr"
[[linters-settings.revive.rules]]
name = "call-to-gc"
# [[linters-settings.revive.rules]]
# name = "cognitive-complexity"
# Probably a good rule, but we have a lot of names that
# only have case differences.
# [[linters-settings.revive.rules]]
# name = "confusing-naming"
# [[linters-settings.revive.rules]]
# name = "confusing-results"
[[linters-settings.revive.rules]]
name = "constant-logical-expr"
[[linters-settings.revive.rules]]
name = "context-as-argument"
[[linters-settings.revive.rules]]
name = "context-keys-type"
# [[linters-settings.revive.rules]]
# name = "cyclomatic"
# [[linters-settings.revive.rules]]
# name = "deep-exit"
[[linters-settings.revive.rules]]
name = "defer"
[[linters-settings.revive.rules]]
name = "dot-imports"
[[linters-settings.revive.rules]]
name = "duplicated-imports"
[[linters-settings.revive.rules]]
name = "early-return"
[[linters-settings.revive.rules]]
name = "empty-block"
[[linters-settings.revive.rules]]
name = "empty-lines"
[[linters-settings.revive.rules]]
name = "errorf"
[[linters-settings.revive.rules]]
name = "error-naming"
[[linters-settings.revive.rules]]
name = "error-return"
[[linters-settings.revive.rules]]
name = "error-strings"
[[linters-settings.revive.rules]]
name = "exported"
# [[linters-settings.revive.rules]]
# name = "file-header"
# We have a lot of flag parameters. This linter probably makes
# a good point, but we would need some cleanup or a lot of nolints.
# [[linters-settings.revive.rules]]
# name = "flag-parameter"
# [[linters-settings.revive.rules]]
# name = "function-result-limit"
[[linters-settings.revive.rules]]
name = "get-return"
[[linters-settings.revive.rules]]
name = "identical-branches"
[[linters-settings.revive.rules]]
name = "if-return"
[[linters-settings.revive.rules]]
name = "imports-blacklist"
[[linters-settings.revive.rules]]
name = "import-shadowing"
[[linters-settings.revive.rules]]
name = "increment-decrement"
[[linters-settings.revive.rules]]
name = "indent-error-flow"
# [[linters-settings.revive.rules]]
# name = "line-length-limit"
# [[linters-settings.revive.rules]]
# name = "max-public-structs"
[[linters-settings.revive.rules]]
name = "modifies-parameter"
[[linters-settings.revive.rules]]
name = "modifies-value-receiver"
# We frequently use nested structs, particularly in tests.
# [[linters-settings.revive.rules]]
# name = "nested-structs"
[[linters-settings.revive.rules]]
name = "optimize-operands-order"
[[linters-settings.revive.rules]]
name = "package-comments"
[[linters-settings.revive.rules]]
name = "range"
[[linters-settings.revive.rules]]
name = "range-val-address"
[[linters-settings.revive.rules]]
name = "range-val-in-closure"
[[linters-settings.revive.rules]]
name = "receiver-naming"
[[linters-settings.revive.rules]]
name = "redefines-builtin-id"
[[linters-settings.revive.rules]]
name = "string-of-int"
[[linters-settings.revive.rules]]
name = "struct-tag"
[[linters-settings.revive.rules]]
name = "superfluous-else"
[[linters-settings.revive.rules]]
name = "time-naming"
[[linters-settings.revive.rules]]
name = "unconditional-recursion"
[[linters-settings.revive.rules]]
name = "unexported-naming"
[[linters-settings.revive.rules]]
name = "unexported-return"
# This is covered elsewhere and we want to ignore some
# functions such as fmt.Fprintf.
# [[linters-settings.revive.rules]]
# name = "unhandled-error"
[[linters-settings.revive.rules]]
name = "unnecessary-stmt"
[[linters-settings.revive.rules]]
name = "unreachable-code"
[[linters-settings.revive.rules]]
name = "unused-parameter"
# We generally have unused receivers in tests for meeting the
# requirements of an interface.
# [[linters-settings.revive.rules]]
# name = "unused-receiver"
# This probably makes sense after we upgrade to 1.18
# [[linters-settings.revive.rules]]
# name = "use-any"
[[linters-settings.revive.rules]]
name = "useless-break"
[[linters-settings.revive.rules]]
name = "var-declaration"
[[linters-settings.revive.rules]]
name = "var-naming"
[[linters-settings.revive.rules]]
name = "waitgroup-by-value"
[linters-settings.unparam]
check-exported = true
[[issues.exclude-rules]]
linters = [
"govet"
]
# we want to enable almost all govet rules. It is easier to just filter out
# the ones we don't want:
#
# * fieldalignment - way too noisy. Although it is very useful in particular
# cases where we are trying to use as little memory as possible, having
# it go off on every struct isn't helpful.
# * shadow - although often useful, it complains about _many_ err
# shadowing assignments and some others where shadowing is clear.
text = "^(fieldalignment|shadow)"

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

@ -0,0 +1,93 @@
# GeoIP2 Reader for Go #
[![PkgGoDev](https://pkg.go.dev/badge/github.com/oschwald/geoip2-golang)](https://pkg.go.dev/github.com/oschwald/geoip2-golang)
This library reads MaxMind [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/)
and [GeoIP2](http://www.maxmind.com/en/geolocation_landing) databases.
This library is built using
[the Go maxminddb reader](https://github.com/oschwald/maxminddb-golang).
All data for the database record is decoded using this library. If you only
need several fields, you may get superior performance by using maxminddb's
`Lookup` directly with a result struct that only contains the required fields.
(See [example_test.go](https://github.com/oschwald/maxminddb-golang/blob/main/example_test.go)
in the maxminddb repository for an example of this.)
## Installation ##
```
go get github.com/oschwald/geoip2-golang
```
## Usage ##
[See GoDoc](http://godoc.org/github.com/oschwald/geoip2-golang) for
documentation and examples.
## Example ##
```go
package main
import (
"fmt"
"log"
"net"
"github.com/oschwald/geoip2-golang"
)
func main() {
db, err := geoip2.Open("GeoIP2-City.mmdb")
if err != nil {
log.Fatal(err)
}
defer db.Close()
// If you are using strings that may be invalid, check that ip is not nil
ip := net.ParseIP("81.2.69.142")
record, err := db.City(ip)
if err != nil {
log.Fatal(err)
}
fmt.Printf("Portuguese (BR) city name: %v\n", record.City.Names["pt-BR"])
if len(record.Subdivisions) > 0 {
fmt.Printf("English subdivision name: %v\n", record.Subdivisions[0].Names["en"])
}
fmt.Printf("Russian country name: %v\n", record.Country.Names["ru"])
fmt.Printf("ISO country code: %v\n", record.Country.IsoCode)
fmt.Printf("Time zone: %v\n", record.Location.TimeZone)
fmt.Printf("Coordinates: %v, %v\n", record.Location.Latitude, record.Location.Longitude)
// Output:
// Portuguese (BR) city name: Londres
// English subdivision name: England
// Russian country name: Великобритания
// ISO country code: GB
// Time zone: Europe/London
// Coordinates: 51.5142, -0.0931
}
```
## Testing ##
Make sure you checked out test data submodule:
```
git submodule init
git submodule update
```
Execute test suite:
```
go test
```
## Contributing ##
Contributions welcome! Please fork the repository and open a pull request
with your changes.
## License ##
This is free software, licensed under the ISC license.

@ -0,0 +1,418 @@
// Package geoip2 provides an easy-to-use API for the MaxMind GeoIP2 and
// GeoLite2 databases; this package does not support GeoIP Legacy databases.
//
// The structs provided by this package match the internal structure of
// the data in the MaxMind databases.
//
// See github.com/oschwald/maxminddb-golang for more advanced used cases.
package geoip2
import (
"fmt"
"net"
"github.com/oschwald/maxminddb-golang"
)
// The Enterprise struct corresponds to the data in the GeoIP2 Enterprise
// database.
type Enterprise struct {
City struct {
Confidence uint8 `maxminddb:"confidence"`
GeoNameID uint `maxminddb:"geoname_id"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"city"`
Continent struct {
Code string `maxminddb:"code"`
GeoNameID uint `maxminddb:"geoname_id"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"continent"`
Country struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
Confidence uint8 `maxminddb:"confidence"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
} `maxminddb:"country"`
Location struct {
AccuracyRadius uint16 `maxminddb:"accuracy_radius"`
Latitude float64 `maxminddb:"latitude"`
Longitude float64 `maxminddb:"longitude"`
MetroCode uint `maxminddb:"metro_code"`
TimeZone string `maxminddb:"time_zone"`
} `maxminddb:"location"`
Postal struct {
Code string `maxminddb:"code"`
Confidence uint8 `maxminddb:"confidence"`
} `maxminddb:"postal"`
RegisteredCountry struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
Confidence uint8 `maxminddb:"confidence"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
} `maxminddb:"registered_country"`
RepresentedCountry struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
Type string `maxminddb:"type"`
} `maxminddb:"represented_country"`
Subdivisions []struct {
Confidence uint8 `maxminddb:"confidence"`
GeoNameID uint `maxminddb:"geoname_id"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"subdivisions"`
Traits struct {
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
ConnectionType string `maxminddb:"connection_type"`
Domain string `maxminddb:"domain"`
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
IsLegitimateProxy bool `maxminddb:"is_legitimate_proxy"`
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
ISP string `maxminddb:"isp"`
MobileCountryCode string `maxminddb:"mobile_country_code"`
MobileNetworkCode string `maxminddb:"mobile_network_code"`
Organization string `maxminddb:"organization"`
StaticIPScore float64 `maxminddb:"static_ip_score"`
UserType string `maxminddb:"user_type"`
} `maxminddb:"traits"`
}
// The City struct corresponds to the data in the GeoIP2/GeoLite2 City
// databases.
type City struct {
City struct {
GeoNameID uint `maxminddb:"geoname_id"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"city"`
Continent struct {
Code string `maxminddb:"code"`
GeoNameID uint `maxminddb:"geoname_id"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"continent"`
Country struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"country"`
Location struct {
AccuracyRadius uint16 `maxminddb:"accuracy_radius"`
Latitude float64 `maxminddb:"latitude"`
Longitude float64 `maxminddb:"longitude"`
MetroCode uint `maxminddb:"metro_code"`
TimeZone string `maxminddb:"time_zone"`
} `maxminddb:"location"`
Postal struct {
Code string `maxminddb:"code"`
} `maxminddb:"postal"`
RegisteredCountry struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"registered_country"`
RepresentedCountry struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
Type string `maxminddb:"type"`
} `maxminddb:"represented_country"`
Subdivisions []struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"subdivisions"`
Traits struct {
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
} `maxminddb:"traits"`
}
// The Country struct corresponds to the data in the GeoIP2/GeoLite2
// Country databases.
type Country struct {
Continent struct {
Code string `maxminddb:"code"`
GeoNameID uint `maxminddb:"geoname_id"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"continent"`
Country struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"country"`
RegisteredCountry struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
} `maxminddb:"registered_country"`
RepresentedCountry struct {
GeoNameID uint `maxminddb:"geoname_id"`
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
IsoCode string `maxminddb:"iso_code"`
Names map[string]string `maxminddb:"names"`
Type string `maxminddb:"type"`
} `maxminddb:"represented_country"`
Traits struct {
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
} `maxminddb:"traits"`
}
// The AnonymousIP struct corresponds to the data in the GeoIP2
// Anonymous IP database.
type AnonymousIP struct {
IsAnonymous bool `maxminddb:"is_anonymous"`
IsAnonymousVPN bool `maxminddb:"is_anonymous_vpn"`
IsHostingProvider bool `maxminddb:"is_hosting_provider"`
IsPublicProxy bool `maxminddb:"is_public_proxy"`
IsResidentialProxy bool `maxminddb:"is_residential_proxy"`
IsTorExitNode bool `maxminddb:"is_tor_exit_node"`
}
// The ASN struct corresponds to the data in the GeoLite2 ASN database.
type ASN struct {
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
}
// The ConnectionType struct corresponds to the data in the GeoIP2
// Connection-Type database.
type ConnectionType struct {
ConnectionType string `maxminddb:"connection_type"`
}
// The Domain struct corresponds to the data in the GeoIP2 Domain database.
type Domain struct {
Domain string `maxminddb:"domain"`
}
// The ISP struct corresponds to the data in the GeoIP2 ISP database.
type ISP struct {
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
ISP string `maxminddb:"isp"`
MobileCountryCode string `maxminddb:"mobile_country_code"`
MobileNetworkCode string `maxminddb:"mobile_network_code"`
Organization string `maxminddb:"organization"`
}
type databaseType int
const (
isAnonymousIP = 1 << iota
isASN
isCity
isConnectionType
isCountry
isDomain
isEnterprise
isISP
)
// Reader holds the maxminddb.Reader struct. It can be created using the
// Open and FromBytes functions.
type Reader struct {
mmdbReader *maxminddb.Reader
databaseType databaseType
}
// InvalidMethodError is returned when a lookup method is called on a
// database that it does not support. For instance, calling the ISP method
// on a City database.
type InvalidMethodError struct {
Method string
DatabaseType string
}
func (e InvalidMethodError) Error() string {
return fmt.Sprintf(`geoip2: the %s method does not support the %s database`,
e.Method, e.DatabaseType)
}
// UnknownDatabaseTypeError is returned when an unknown database type is
// opened.
type UnknownDatabaseTypeError struct {
DatabaseType string
}
func (e UnknownDatabaseTypeError) Error() string {
return fmt.Sprintf(`geoip2: reader does not support the %q database type`,
e.DatabaseType)
}
// Open takes a string path to a file and returns a Reader struct or an error.
// The database file is opened using a memory map. Use the Close method on the
// Reader object to return the resources to the system.
func Open(file string) (*Reader, error) {
reader, err := maxminddb.Open(file)
if err != nil {
return nil, err
}
dbType, err := getDBType(reader)
return &Reader{reader, dbType}, err
}
// FromBytes takes a byte slice corresponding to a GeoIP2/GeoLite2 database
// file and returns a Reader struct or an error. Note that the byte slice is
// used directly; any modification of it after opening the database will result
// in errors while reading from the database.
func FromBytes(bytes []byte) (*Reader, error) {
reader, err := maxminddb.FromBytes(bytes)
if err != nil {
return nil, err
}
dbType, err := getDBType(reader)
return &Reader{reader, dbType}, err
}
func getDBType(reader *maxminddb.Reader) (databaseType, error) {
switch reader.Metadata.DatabaseType {
case "GeoIP2-Anonymous-IP":
return isAnonymousIP, nil
case "DBIP-ASN-Lite (compat=GeoLite2-ASN)",
"GeoLite2-ASN":
return isASN, nil
// We allow City lookups on Country for back compat
case "DBIP-City-Lite",
"DBIP-Country-Lite",
"DBIP-Country",
"DBIP-Location (compat=City)",
"GeoLite2-City",
"GeoIP2-City",
"GeoIP2-City-Africa",
"GeoIP2-City-Asia-Pacific",
"GeoIP2-City-Europe",
"GeoIP2-City-North-America",
"GeoIP2-City-South-America",
"GeoIP2-Precision-City",
"GeoLite2-Country",
"GeoIP2-Country":
return isCity | isCountry, nil
case "GeoIP2-Connection-Type":
return isConnectionType, nil
case "GeoIP2-Domain":
return isDomain, nil
case "DBIP-ISP (compat=Enterprise)",
"DBIP-Location-ISP (compat=Enterprise)",
"GeoIP2-Enterprise":
return isEnterprise | isCity | isCountry, nil
case "GeoIP2-ISP",
"GeoIP2-Precision-ISP":
return isISP | isASN, nil
default:
return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType}
}
}
// Enterprise takes an IP address as a net.IP struct and returns an Enterprise
// struct and/or an error. This is intended to be used with the GeoIP2
// Enterprise database.
func (r *Reader) Enterprise(ipAddress net.IP) (*Enterprise, error) {
if isEnterprise&r.databaseType == 0 {
return nil, InvalidMethodError{"Enterprise", r.Metadata().DatabaseType}
}
var enterprise Enterprise
err := r.mmdbReader.Lookup(ipAddress, &enterprise)
return &enterprise, err
}
// City takes an IP address as a net.IP struct and returns a City struct
// and/or an error. Although this can be used with other databases, this
// method generally should be used with the GeoIP2 or GeoLite2 City databases.
func (r *Reader) City(ipAddress net.IP) (*City, error) {
if isCity&r.databaseType == 0 {
return nil, InvalidMethodError{"City", r.Metadata().DatabaseType}
}
var city City
err := r.mmdbReader.Lookup(ipAddress, &city)
return &city, err
}
// Country takes an IP address as a net.IP struct and returns a Country struct
// and/or an error. Although this can be used with other databases, this
// method generally should be used with the GeoIP2 or GeoLite2 Country
// databases.
func (r *Reader) Country(ipAddress net.IP) (*Country, error) {
if isCountry&r.databaseType == 0 {
return nil, InvalidMethodError{"Country", r.Metadata().DatabaseType}
}
var country Country
err := r.mmdbReader.Lookup(ipAddress, &country)
return &country, err
}
// AnonymousIP takes an IP address as a net.IP struct and returns a
// AnonymousIP struct and/or an error.
func (r *Reader) AnonymousIP(ipAddress net.IP) (*AnonymousIP, error) {
if isAnonymousIP&r.databaseType == 0 {
return nil, InvalidMethodError{"AnonymousIP", r.Metadata().DatabaseType}
}
var anonIP AnonymousIP
err := r.mmdbReader.Lookup(ipAddress, &anonIP)
return &anonIP, err
}
// ASN takes an IP address as a net.IP struct and returns a ASN struct and/or
// an error.
func (r *Reader) ASN(ipAddress net.IP) (*ASN, error) {
if isASN&r.databaseType == 0 {
return nil, InvalidMethodError{"ASN", r.Metadata().DatabaseType}
}
var val ASN
err := r.mmdbReader.Lookup(ipAddress, &val)
return &val, err
}
// ConnectionType takes an IP address as a net.IP struct and returns a
// ConnectionType struct and/or an error.
func (r *Reader) ConnectionType(ipAddress net.IP) (*ConnectionType, error) {
if isConnectionType&r.databaseType == 0 {
return nil, InvalidMethodError{"ConnectionType", r.Metadata().DatabaseType}
}
var val ConnectionType
err := r.mmdbReader.Lookup(ipAddress, &val)
return &val, err
}
// Domain takes an IP address as a net.IP struct and returns a
// Domain struct and/or an error.
func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) {
if isDomain&r.databaseType == 0 {
return nil, InvalidMethodError{"Domain", r.Metadata().DatabaseType}
}
var val Domain
err := r.mmdbReader.Lookup(ipAddress, &val)
return &val, err
}
// ISP takes an IP address as a net.IP struct and returns a ISP struct and/or
// an error.
func (r *Reader) ISP(ipAddress net.IP) (*ISP, error) {
if isISP&r.databaseType == 0 {
return nil, InvalidMethodError{"ISP", r.Metadata().DatabaseType}
}
var val ISP
err := r.mmdbReader.Lookup(ipAddress, &val)
return &val, err
}
// Metadata takes no arguments and returns a struct containing metadata about
// the MaxMind database in use by the Reader.
func (r *Reader) Metadata() maxminddb.Metadata {
return r.mmdbReader.Metadata
}
// Close unmaps the database file from virtual memory and returns the
// resources to the system.
func (r *Reader) Close() error {
return r.mmdbReader.Close()
}

@ -0,0 +1,4 @@
.vscode
*.out
*.sw?
*.test

@ -0,0 +1,3 @@
[submodule "test-data"]
path = test-data
url = https://github.com/maxmind/MaxMind-DB.git

@ -0,0 +1,472 @@
[run]
deadline = "10m"
tests = true
[linters]
disable-all = true
enable = [
"asciicheck",
"bidichk",
"bodyclose",
"containedctx",
"contextcheck",
"deadcode",
"depguard",
"durationcheck",
"errcheck",
"errchkjson",
"errname",
"errorlint",
"exportloopref",
"forbidigo",
#"forcetypeassert",
"goconst",
"gocyclo",
"gocritic",
"godot",
"gofumpt",
"gomodguard",
"gosec",
"gosimple",
"govet",
"grouper",
"ineffassign",
"lll",
"makezero",
"maintidx",
"misspell",
"nakedret",
"nilerr",
"noctx",
"nolintlint",
"nosprintfhostport",
"predeclared",
"revive",
"rowserrcheck",
"sqlclosecheck",
"staticcheck",
"structcheck",
"stylecheck",
"tenv",
"tparallel",
"typecheck",
"unconvert",
"unparam",
"unused",
"varcheck",
"vetshadow",
"wastedassign",
]
# Please note that we only use depguard for stdlib as gomodguard only
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
[linters-settings.depguard]
list-type = "blacklist"
include-go-root = true
packages = [
# ioutil is deprecated. The functions have been moved elsewhere:
# https://golang.org/doc/go1.16#ioutil
"io/ioutil",
]
[linters-settings.errcheck]
# Don't allow setting of error to the blank identifier. If there is a legtimate
# reason, there should be a nolint with an explanation.
check-blank = true
exclude-functions = [
# If we are rolling back a transaction, we are often already in an error
# state.
'(*database/sql.Tx).Rollback',
# It is reasonable to ignore errors if Cleanup fails in most cases.
'(*github.com/google/renameio/v2.PendingFile).Cleanup',
# We often don't care if removing a file failed (e.g., it doesn't exist)
'os.Remove',
'os.RemoveAll',
]
# Ignoring Close so that we don't have to have a bunch of
# `defer func() { _ = r.Close() }()` constructs when we
# don't actually care about the error.
ignore = "Close,fmt:.*"
[linters-settings.errorlint]
errorf = true
asserts = true
comparison = true
[linters-settings.exhaustive]
default-signifies-exhaustive = true
[linters-settings.forbidigo]
# Forbid the following identifiers
forbid = [
"^minFraud*",
"^maxMind*",
]
[linters-settings.gocritic]
enabled-checks = [
"appendAssign",
"appendCombine",
"argOrder",
"assignOp",
"badCall",
"badCond",
"badLock",
"badRegexp",
"badSorting",
"boolExprSimplify",
"builtinShadow",
"builtinShadowDecl",
"captLocal",
"caseOrder",
"codegenComment",
"commentedOutCode",
"commentedOutImport",
"commentFormatting",
"defaultCaseOrder",
# Revive's defer rule already captures this. This caught no extra cases.
# "deferInLoop",
"deferUnlambda",
"deprecatedComment",
"docStub",
"dupArg",
"dupBranchBody",
"dupCase",
"dupImport",
"dupSubExpr",
"dynamicFmtString",
"elseif",
"emptyDecl",
"emptyFallthrough",
"emptyStringTest",
"equalFold",
"evalOrder",
"exitAfterDefer",
"exposedSyncMutex",
"externalErrorReassign",
# Given that all of our code runs on Linux and the / separate should
# work fine, this seems less important.
# "filepathJoin",
"flagDeref",
"flagName",
"hexLiteral",
"ifElseChain",
"importShadow",
"indexAlloc",
"initClause",
"ioutilDeprecated",
"mapKey",
"methodExprCall",
"nestingReduce",
"newDeref",
"nilValReturn",
"octalLiteral",
"offBy1",
"paramTypeCombine",
"preferDecodeRune",
"preferFilepathJoin",
"preferFprint",
"preferStringWriter",
"preferWriteByte",
"ptrToRefParam",
"rangeExprCopy",
"rangeValCopy",
"redundantSprint",
"regexpMust",
"regexpPattern",
# This might be good, but I don't think we want to encourage
# significant changes to regexes as we port stuff from Perl.
# "regexpSimplify",
"ruleguard",
"singleCaseSwitch",
"sliceClear",
"sloppyLen",
# This seems like it might also be good, but a lot of existing code
# fails.
# "sloppyReassign",
"returnAfterHttpError",
"sloppyTypeAssert",
"sortSlice",
"sprintfQuotedString",
"sqlQuery",
"stringsCompare",
"stringXbytes",
"switchTrue",
"syncMapLoadAndDelete",
"timeExprSimplify",
"todoCommentWithoutDetail",
"tooManyResultsChecker",
"truncateCmp",
"typeAssertChain",
"typeDefFirst",
"typeSwitchVar",
"typeUnparen",
"underef",
"unlabelStmt",
"unlambda",
# I am not sure we would want this linter and a lot of existing
# code fails.
# "unnamedResult",
"unnecessaryBlock",
"unnecessaryDefer",
"unslice",
"valSwap",
"weakCond",
"wrapperFunc",
"yodaStyleExpr",
# This requires explanations for "nolint" directives. This would be
# nice for gosec ones, but I am not sure we want it generally unless
# we can get the false positive rate lower.
# "whyNoLint"
]
[linters-settings.gofumpt]
extra-rules = true
lang-version = "1.18"
[linters-settings.govet]
"enable-all" = true
[linters-settings.lll]
line-length = 120
tab-width = 4
[linters-settings.nolintlint]
allow-leading-space = false
allow-unused = false
allow-no-explanation = ["lll", "misspell"]
require-explanation = true
require-specific = true
[linters-settings.revive]
ignore-generated-header = true
severity = "warning"
# This might be nice but it is so common that it is hard
# to enable.
# [[linters-settings.revive.rules]]
# name = "add-constant"
# [[linters-settings.revive.rules]]
# name = "argument-limit"
[[linters-settings.revive.rules]]
name = "atomic"
[[linters-settings.revive.rules]]
name = "bare-return"
[[linters-settings.revive.rules]]
name = "blank-imports"
[[linters-settings.revive.rules]]
name = "bool-literal-in-expr"
[[linters-settings.revive.rules]]
name = "call-to-gc"
# [[linters-settings.revive.rules]]
# name = "cognitive-complexity"
# Probably a good rule, but we have a lot of names that
# only have case differences.
# [[linters-settings.revive.rules]]
# name = "confusing-naming"
# [[linters-settings.revive.rules]]
# name = "confusing-results"
[[linters-settings.revive.rules]]
name = "constant-logical-expr"
[[linters-settings.revive.rules]]
name = "context-as-argument"
[[linters-settings.revive.rules]]
name = "context-keys-type"
# [[linters-settings.revive.rules]]
# name = "cyclomatic"
# [[linters-settings.revive.rules]]
# name = "deep-exit"
[[linters-settings.revive.rules]]
name = "defer"
[[linters-settings.revive.rules]]
name = "dot-imports"
[[linters-settings.revive.rules]]
name = "duplicated-imports"
[[linters-settings.revive.rules]]
name = "early-return"
[[linters-settings.revive.rules]]
name = "empty-block"
[[linters-settings.revive.rules]]
name = "empty-lines"
[[linters-settings.revive.rules]]
name = "errorf"
[[linters-settings.revive.rules]]
name = "error-naming"
[[linters-settings.revive.rules]]
name = "error-return"
[[linters-settings.revive.rules]]
name = "error-strings"
[[linters-settings.revive.rules]]
name = "exported"
# [[linters-settings.revive.rules]]
# name = "file-header"
# We have a lot of flag parameters. This linter probably makes
# a good point, but we would need some cleanup or a lot of nolints.
# [[linters-settings.revive.rules]]
# name = "flag-parameter"
# [[linters-settings.revive.rules]]
# name = "function-result-limit"
[[linters-settings.revive.rules]]
name = "get-return"
[[linters-settings.revive.rules]]
name = "identical-branches"
[[linters-settings.revive.rules]]
name = "if-return"
[[linters-settings.revive.rules]]
name = "imports-blacklist"
[[linters-settings.revive.rules]]
name = "import-shadowing"
[[linters-settings.revive.rules]]
name = "increment-decrement"
[[linters-settings.revive.rules]]
name = "indent-error-flow"
# [[linters-settings.revive.rules]]
# name = "line-length-limit"
# [[linters-settings.revive.rules]]
# name = "max-public-structs"
[[linters-settings.revive.rules]]
name = "modifies-parameter"
[[linters-settings.revive.rules]]
name = "modifies-value-receiver"
# We frequently use nested structs, particularly in tests.
# [[linters-settings.revive.rules]]
# name = "nested-structs"
[[linters-settings.revive.rules]]
name = "optimize-operands-order"
[[linters-settings.revive.rules]]
name = "package-comments"
[[linters-settings.revive.rules]]
name = "range"
[[linters-settings.revive.rules]]
name = "range-val-address"
[[linters-settings.revive.rules]]
name = "range-val-in-closure"
[[linters-settings.revive.rules]]
name = "receiver-naming"
[[linters-settings.revive.rules]]
name = "redefines-builtin-id"
[[linters-settings.revive.rules]]
name = "string-of-int"
[[linters-settings.revive.rules]]
name = "struct-tag"
[[linters-settings.revive.rules]]
name = "superfluous-else"
[[linters-settings.revive.rules]]
name = "time-naming"
[[linters-settings.revive.rules]]
name = "unconditional-recursion"
[[linters-settings.revive.rules]]
name = "unexported-naming"
[[linters-settings.revive.rules]]
name = "unexported-return"
# This is covered elsewhere and we want to ignore some
# functions such as fmt.Fprintf.
# [[linters-settings.revive.rules]]
# name = "unhandled-error"
[[linters-settings.revive.rules]]
name = "unnecessary-stmt"
[[linters-settings.revive.rules]]
name = "unreachable-code"
[[linters-settings.revive.rules]]
name = "unused-parameter"
# We generally have unused receivers in tests for meeting the
# requirements of an interface.
# [[linters-settings.revive.rules]]
# name = "unused-receiver"
# This probably makes sense after we upgrade to 1.18
# [[linters-settings.revive.rules]]
# name = "use-any"
[[linters-settings.revive.rules]]
name = "useless-break"
[[linters-settings.revive.rules]]
name = "var-declaration"
[[linters-settings.revive.rules]]
name = "var-naming"
[[linters-settings.revive.rules]]
name = "waitgroup-by-value"
[linters-settings.unparam]
check-exported = true
[[issues.exclude-rules]]
linters = [
"govet"
]
# we want to enable almost all govet rules. It is easier to just filter out
# the ones we don't want:
#
# * fieldalignment - way too noisy. Although it is very useful in particular
# cases where we are trying to use as little memory as possible, having
# it go off on every struct isn't helpful.
# * shadow - although often useful, it complains about _many_ err
# shadowing assignments and some others where shadowing is clear.
text = "^(fieldalignment|shadow)"

@ -0,0 +1,15 @@
ISC License
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

@ -0,0 +1,36 @@
# MaxMind DB Reader for Go #
[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.svg)](https://godoc.org/github.com/oschwald/maxminddb-golang)
This is a Go reader for the MaxMind DB format. Although this can be used to
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
API for doing so.
This is not an official MaxMind API.
## Installation ##
```
go get github.com/oschwald/maxminddb-golang
```
## Usage ##
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
documentation and examples.
## Examples ##
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
`example_test.go` for examples.
## Contributing ##
Contributions welcome! Please fork the repository and open a pull request
with your changes.
## License ##
This is free software, licensed under the ISC License.

@ -0,0 +1,897 @@
package maxminddb
import (
"encoding/binary"
"math"
"math/big"
"reflect"
"sync"
)
type decoder struct {
buffer []byte
}
type dataType int
const (
_Extended dataType = iota
_Pointer
_String
_Float64
_Bytes
_Uint16
_Uint32
_Map
_Int32
_Uint64
_Uint128
_Slice
// We don't use the next two. They are placeholders. See the spec
// for more details.
_Container //nolint: deadcode, varcheck // above
_Marker //nolint: deadcode, varcheck // above
_Bool
_Float32
)
const (
// This is the value used in libmaxminddb.
maximumDataStructureDepth = 512
)
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
if depth > maximumDataStructureDepth {
return 0, newInvalidDatabaseError(
"exceeded maximum data structure depth; database is likely corrupt",
)
}
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
if err != nil {
return 0, err
}
if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
result.Set(reflect.ValueOf(uintptr(offset)))
return d.nextValueOffset(offset, 1)
}
return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
}
func (d *decoder) decodeToDeserializer(
offset uint,
dser deserializer,
depth int,
getNext bool,
) (uint, error) {
if depth > maximumDataStructureDepth {
return 0, newInvalidDatabaseError(
"exceeded maximum data structure depth; database is likely corrupt",
)
}
skip, err := dser.ShouldSkip(uintptr(offset))
if err != nil {
return 0, err
}
if skip {
if getNext {
return d.nextValueOffset(offset, 1)
}
return 0, nil
}
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
if err != nil {
return 0, err
}
return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1)
}
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
newOffset := offset + 1
if offset >= uint(len(d.buffer)) {
return 0, 0, 0, newOffsetError()
}
ctrlByte := d.buffer[offset]
typeNum := dataType(ctrlByte >> 5)
if typeNum == _Extended {
if newOffset >= uint(len(d.buffer)) {
return 0, 0, 0, newOffsetError()
}
typeNum = dataType(d.buffer[newOffset] + 7)
newOffset++
}
var size uint
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
return typeNum, size, newOffset, err
}
func (d *decoder) sizeFromCtrlByte(
ctrlByte byte,
offset uint,
typeNum dataType,
) (uint, uint, error) {
size := uint(ctrlByte & 0x1f)
if typeNum == _Extended {
return size, offset, nil
}
var bytesToRead uint
if size < 29 {
return size, offset, nil
}
bytesToRead = size - 28
newOffset := offset + bytesToRead
if newOffset > uint(len(d.buffer)) {
return 0, 0, newOffsetError()
}
if size == 29 {
return 29 + uint(d.buffer[offset]), offset + 1, nil
}
sizeBytes := d.buffer[offset:newOffset]
switch {
case size == 30:
size = 285 + uintFromBytes(0, sizeBytes)
case size > 30:
size = uintFromBytes(0, sizeBytes) + 65821
}
return size, newOffset, nil
}
func (d *decoder) decodeFromType(
dtype dataType,
size uint,
offset uint,
result reflect.Value,
depth int,
) (uint, error) {
result = d.indirect(result)
// For these types, size has a special meaning
switch dtype {
case _Bool:
return d.unmarshalBool(size, offset, result)
case _Map:
return d.unmarshalMap(size, offset, result, depth)
case _Pointer:
return d.unmarshalPointer(size, offset, result, depth)
case _Slice:
return d.unmarshalSlice(size, offset, result, depth)
}
// For the remaining types, size is the byte size
if offset+size > uint(len(d.buffer)) {
return 0, newOffsetError()
}
switch dtype {
case _Bytes:
return d.unmarshalBytes(size, offset, result)
case _Float32:
return d.unmarshalFloat32(size, offset, result)
case _Float64:
return d.unmarshalFloat64(size, offset, result)
case _Int32:
return d.unmarshalInt32(size, offset, result)
case _String:
return d.unmarshalString(size, offset, result)
case _Uint16:
return d.unmarshalUint(size, offset, result, 16)
case _Uint32:
return d.unmarshalUint(size, offset, result, 32)
case _Uint64:
return d.unmarshalUint(size, offset, result, 64)
case _Uint128:
return d.unmarshalUint128(size, offset, result)
default:
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
}
}
func (d *decoder) decodeFromTypeToDeserializer(
dtype dataType,
size uint,
offset uint,
dser deserializer,
depth int,
) (uint, error) {
// For these types, size has a special meaning
switch dtype {
case _Bool:
v, offset := d.decodeBool(size, offset)
return offset, dser.Bool(v)
case _Map:
return d.decodeMapToDeserializer(size, offset, dser, depth)
case _Pointer:
pointer, newOffset, err := d.decodePointer(size, offset)
if err != nil {
return 0, err
}
_, err = d.decodeToDeserializer(pointer, dser, depth, false)
return newOffset, err
case _Slice:
return d.decodeSliceToDeserializer(size, offset, dser, depth)
}
// For the remaining types, size is the byte size
if offset+size > uint(len(d.buffer)) {
return 0, newOffsetError()
}
switch dtype {
case _Bytes:
v, offset := d.decodeBytes(size, offset)
return offset, dser.Bytes(v)
case _Float32:
v, offset := d.decodeFloat32(size, offset)
return offset, dser.Float32(v)
case _Float64:
v, offset := d.decodeFloat64(size, offset)
return offset, dser.Float64(v)
case _Int32:
v, offset := d.decodeInt(size, offset)
return offset, dser.Int32(int32(v))
case _String:
v, offset := d.decodeString(size, offset)
return offset, dser.String(v)
case _Uint16:
v, offset := d.decodeUint(size, offset)
return offset, dser.Uint16(uint16(v))
case _Uint32:
v, offset := d.decodeUint(size, offset)
return offset, dser.Uint32(uint32(v))
case _Uint64:
v, offset := d.decodeUint(size, offset)
return offset, dser.Uint64(v)
case _Uint128:
v, offset := d.decodeUint128(size, offset)
return offset, dser.Uint128(v)
default:
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
}
}
func (d *decoder) unmarshalBool(size, offset uint, result reflect.Value) (uint, error) {
if size > 1 {
return 0, newInvalidDatabaseError(
"the MaxMind DB file's data section contains bad data (bool size of %v)",
size,
)
}
value, newOffset := d.decodeBool(size, offset)
switch result.Kind() {
case reflect.Bool:
result.SetBool(value)
return newOffset, nil
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
// indirect follows pointers and create values as necessary. This is
// heavily based on encoding/json as my original version had a subtle
// bug. This method should be considered to be licensed under
// https://golang.org/LICENSE
func (d *decoder) indirect(result reflect.Value) reflect.Value {
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if result.Kind() == reflect.Interface && !result.IsNil() {
e := result.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() {
result = e
continue
}
}
if result.Kind() != reflect.Ptr {
break
}
if result.IsNil() {
result.Set(reflect.New(result.Type().Elem()))
}
result = result.Elem()
}
return result
}
var sliceType = reflect.TypeOf([]byte{})
func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) {
value, newOffset := d.decodeBytes(size, offset)
switch result.Kind() {
case reflect.Slice:
if result.Type() == sliceType {
result.SetBytes(value)
return newOffset, nil
}
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) {
if size != 4 {
return 0, newInvalidDatabaseError(
"the MaxMind DB file's data section contains bad data (float32 size of %v)",
size,
)
}
value, newOffset := d.decodeFloat32(size, offset)
switch result.Kind() {
case reflect.Float32, reflect.Float64:
result.SetFloat(float64(value))
return newOffset, nil
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) {
if size != 8 {
return 0, newInvalidDatabaseError(
"the MaxMind DB file's data section contains bad data (float 64 size of %v)",
size,
)
}
value, newOffset := d.decodeFloat64(size, offset)
switch result.Kind() {
case reflect.Float32, reflect.Float64:
if result.OverflowFloat(value) {
return 0, newUnmarshalTypeError(value, result.Type())
}
result.SetFloat(value)
return newOffset, nil
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) {
if size > 4 {
return 0, newInvalidDatabaseError(
"the MaxMind DB file's data section contains bad data (int32 size of %v)",
size,
)
}
value, newOffset := d.decodeInt(size, offset)
switch result.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n := int64(value)
if !result.OverflowInt(n) {
result.SetInt(n)
return newOffset, nil
}
case reflect.Uint,
reflect.Uint8,
reflect.Uint16,
reflect.Uint32,
reflect.Uint64,
reflect.Uintptr:
n := uint64(value)
if !result.OverflowUint(n) {
result.SetUint(n)
return newOffset, nil
}
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
func (d *decoder) unmarshalMap(
size uint,
offset uint,
result reflect.Value,
depth int,
) (uint, error) {
result = d.indirect(result)
switch result.Kind() {
default:
return 0, newUnmarshalTypeError("map", result.Type())
case reflect.Struct:
return d.decodeStruct(size, offset, result, depth)
case reflect.Map:
return d.decodeMap(size, offset, result, depth)
case reflect.Interface:
if result.NumMethod() == 0 {
rv := reflect.ValueOf(make(map[string]interface{}, size))
newOffset, err := d.decodeMap(size, offset, rv, depth)
result.Set(rv)
return newOffset, err
}
return 0, newUnmarshalTypeError("map", result.Type())
}
}
func (d *decoder) unmarshalPointer(
size, offset uint,
result reflect.Value,
depth int,
) (uint, error) {
pointer, newOffset, err := d.decodePointer(size, offset)
if err != nil {
return 0, err
}
_, err = d.decode(pointer, result, depth)
return newOffset, err
}
func (d *decoder) unmarshalSlice(
size uint,
offset uint,
result reflect.Value,
depth int,
) (uint, error) {
switch result.Kind() {
case reflect.Slice:
return d.decodeSlice(size, offset, result, depth)
case reflect.Interface:
if result.NumMethod() == 0 {
a := []interface{}{}
rv := reflect.ValueOf(&a).Elem()
newOffset, err := d.decodeSlice(size, offset, rv, depth)
result.Set(rv)
return newOffset, err
}
}
return 0, newUnmarshalTypeError("array", result.Type())
}
func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) {
value, newOffset := d.decodeString(size, offset)
switch result.Kind() {
case reflect.String:
result.SetString(value)
return newOffset, nil
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
func (d *decoder) unmarshalUint(
size, offset uint,
result reflect.Value,
uintType uint,
) (uint, error) {
if size > uintType/8 {
return 0, newInvalidDatabaseError(
"the MaxMind DB file's data section contains bad data (uint%v size of %v)",
uintType,
size,
)
}
value, newOffset := d.decodeUint(size, offset)
switch result.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n := int64(value)
if !result.OverflowInt(n) {
result.SetInt(n)
return newOffset, nil
}
case reflect.Uint,
reflect.Uint8,
reflect.Uint16,
reflect.Uint32,
reflect.Uint64,
reflect.Uintptr:
if !result.OverflowUint(value) {
result.SetUint(value)
return newOffset, nil
}
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
var bigIntType = reflect.TypeOf(big.Int{})
func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) {
if size > 16 {
return 0, newInvalidDatabaseError(
"the MaxMind DB file's data section contains bad data (uint128 size of %v)",
size,
)
}
value, newOffset := d.decodeUint128(size, offset)
switch result.Kind() {
case reflect.Struct:
if result.Type() == bigIntType {
result.Set(reflect.ValueOf(*value))
return newOffset, nil
}
case reflect.Interface:
if result.NumMethod() == 0 {
result.Set(reflect.ValueOf(value))
return newOffset, nil
}
}
return newOffset, newUnmarshalTypeError(value, result.Type())
}
func (d *decoder) decodeBool(size, offset uint) (bool, uint) {
return size != 0, offset
}
func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) {
newOffset := offset + size
bytes := make([]byte, size)
copy(bytes, d.buffer[offset:newOffset])
return bytes, newOffset
}
func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) {
newOffset := offset + size
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
return math.Float64frombits(bits), newOffset
}
func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) {
newOffset := offset + size
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
return math.Float32frombits(bits), newOffset
}
func (d *decoder) decodeInt(size, offset uint) (int, uint) {
newOffset := offset + size
var val int32
for _, b := range d.buffer[offset:newOffset] {
val = (val << 8) | int32(b)
}
return int(val), newOffset
}
func (d *decoder) decodeMap(
size uint,
offset uint,
result reflect.Value,
depth int,
) (uint, error) {
if result.IsNil() {
result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
}
mapType := result.Type()
keyValue := reflect.New(mapType.Key()).Elem()
elemType := mapType.Elem()
elemKind := elemType.Kind()
var elemValue reflect.Value
for i := uint(0); i < size; i++ {
var key []byte
var err error
key, offset, err = d.decodeKey(offset)
if err != nil {
return 0, err
}
if !elemValue.IsValid() || elemKind == reflect.Interface {
elemValue = reflect.New(elemType).Elem()
}
offset, err = d.decode(offset, elemValue, depth)
if err != nil {
return 0, err
}
keyValue.SetString(string(key))
result.SetMapIndex(keyValue, elemValue)
}
return offset, nil
}
func (d *decoder) decodeMapToDeserializer(
size uint,
offset uint,
dser deserializer,
depth int,
) (uint, error) {
err := dser.StartMap(size)
if err != nil {
return 0, err
}
for i := uint(0); i < size; i++ {
// TODO - implement key/value skipping?
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
if err != nil {
return 0, err
}
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
if err != nil {
return 0, err
}
}
err = dser.End()
if err != nil {
return 0, err
}
return offset, nil
}
func (d *decoder) decodePointer(
size uint,
offset uint,
) (uint, uint, error) {
pointerSize := ((size >> 3) & 0x3) + 1
newOffset := offset + pointerSize
if newOffset > uint(len(d.buffer)) {
return 0, 0, newOffsetError()
}
pointerBytes := d.buffer[offset:newOffset]
var prefix uint
if pointerSize == 4 {
prefix = 0
} else {
prefix = size & 0x7
}
unpacked := uintFromBytes(prefix, pointerBytes)
var pointerValueOffset uint
switch pointerSize {
case 1:
pointerValueOffset = 0
case 2:
pointerValueOffset = 2048
case 3:
pointerValueOffset = 526336
case 4:
pointerValueOffset = 0
}
pointer := unpacked + pointerValueOffset
return pointer, newOffset, nil
}
func (d *decoder) decodeSlice(
size uint,
offset uint,
result reflect.Value,
depth int,
) (uint, error) {
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
for i := 0; i < int(size); i++ {
var err error
offset, err = d.decode(offset, result.Index(i), depth)
if err != nil {
return 0, err
}
}
return offset, nil
}
func (d *decoder) decodeSliceToDeserializer(
size uint,
offset uint,
dser deserializer,
depth int,
) (uint, error) {
err := dser.StartSlice(size)
if err != nil {
return 0, err
}
for i := uint(0); i < size; i++ {
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
if err != nil {
return 0, err
}
}
err = dser.End()
if err != nil {
return 0, err
}
return offset, nil
}
func (d *decoder) decodeString(size, offset uint) (string, uint) {
newOffset := offset + size
return string(d.buffer[offset:newOffset]), newOffset
}
func (d *decoder) decodeStruct(
size uint,
offset uint,
result reflect.Value,
depth int,
) (uint, error) {
fields := cachedFields(result)
// This fills in embedded structs
for _, i := range fields.anonymousFields {
_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
if err != nil {
return 0, err
}
}
// This handles named fields
for i := uint(0); i < size; i++ {
var (
err error
key []byte
)
key, offset, err = d.decodeKey(offset)
if err != nil {
return 0, err
}
// The string() does not create a copy due to this compiler
// optimization: https://github.com/golang/go/issues/3512
j, ok := fields.namedFields[string(key)]
if !ok {
offset, err = d.nextValueOffset(offset, 1)
if err != nil {
return 0, err
}
continue
}
offset, err = d.decode(offset, result.Field(j), depth)
if err != nil {
return 0, err
}
}
return offset, nil
}
type fieldsType struct {
namedFields map[string]int
anonymousFields []int
}
var fieldsMap sync.Map
func cachedFields(result reflect.Value) *fieldsType {
resultType := result.Type()
if fields, ok := fieldsMap.Load(resultType); ok {
return fields.(*fieldsType)
}
numFields := resultType.NumField()
namedFields := make(map[string]int, numFields)
var anonymous []int
for i := 0; i < numFields; i++ {
field := resultType.Field(i)
fieldName := field.Name
if tag := field.Tag.Get("maxminddb"); tag != "" {
if tag == "-" {
continue
}
fieldName = tag
}
if field.Anonymous {
anonymous = append(anonymous, i)
continue
}
namedFields[fieldName] = i
}
fields := &fieldsType{namedFields, anonymous}
fieldsMap.Store(resultType, fields)
return fields
}
func (d *decoder) decodeUint(size, offset uint) (uint64, uint) {
newOffset := offset + size
bytes := d.buffer[offset:newOffset]
var val uint64
for _, b := range bytes {
val = (val << 8) | uint64(b)
}
return val, newOffset
}
func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) {
newOffset := offset + size
val := new(big.Int)
val.SetBytes(d.buffer[offset:newOffset])
return val, newOffset
}
func uintFromBytes(prefix uint, uintBytes []byte) uint {
val := prefix
for _, b := range uintBytes {
val = (val << 8) | uint(b)
}
return val
}
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
// copying the bytes when decoding a struct. Previously, we achieved this by
// using unsafe.
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
if err != nil {
return nil, 0, err
}
if typeNum == _Pointer {
pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
if err != nil {
return nil, 0, err
}
key, _, err := d.decodeKey(pointer)
return key, ptrOffset, err
}
if typeNum != _String {
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
}
newOffset := dataOffset + size
if newOffset > uint(len(d.buffer)) {
return nil, 0, newOffsetError()
}
return d.buffer[dataOffset:newOffset], newOffset, nil
}
// This function is used to skip ahead to the next value without decoding
// the one at the offset passed in. The size bits have different meanings for
// different data types.
func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) {
if numberToSkip == 0 {
return offset, nil
}
typeNum, size, offset, err := d.decodeCtrlData(offset)
if err != nil {
return 0, err
}
switch typeNum {
case _Pointer:
_, offset, err = d.decodePointer(size, offset)
if err != nil {
return 0, err
}
case _Map:
numberToSkip += 2 * size
case _Slice:
numberToSkip += size
case _Bool:
default:
offset += size
}
return d.nextValueOffset(offset, numberToSkip-1)
}

@ -0,0 +1,31 @@
package maxminddb
import "math/big"
// deserializer is an interface for a type that deserializes an MaxMind DB
// data record to some other type. This exists as an alternative to the
// standard reflection API.
//
// This is fundamentally different than the Unmarshaler interface that
// several packages provide. A Deserializer will generally create the
// final struct or value rather than unmarshaling to itself.
//
// This interface and the associated unmarshaling code is EXPERIMENTAL!
// It is not currently covered by any Semantic Versioning guarantees.
// Use at your own risk.
type deserializer interface {
ShouldSkip(offset uintptr) (bool, error)
StartSlice(size uint) error
StartMap(size uint) error
End() error
String(string) error
Float64(float64) error
Bytes([]byte) error
Uint16(uint16) error
Uint32(uint32) error
Int32(int32) error
Uint64(uint64) error
Uint128(*big.Int) error
Bool(bool) error
Float32(float32) error
}

@ -0,0 +1,42 @@
package maxminddb
import (
"fmt"
"reflect"
)
// InvalidDatabaseError is returned when the database contains invalid data
// and cannot be parsed.
type InvalidDatabaseError struct {
message string
}
func newOffsetError() InvalidDatabaseError {
return InvalidDatabaseError{"unexpected end of database"}
}
func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
return InvalidDatabaseError{fmt.Sprintf(format, args...)}
}
func (e InvalidDatabaseError) Error() string {
return e.message
}
// UnmarshalTypeError is returned when the value in the database cannot be
// assigned to the specified data type.
type UnmarshalTypeError struct {
Value string // stringified copy of the database value that caused the error
Type reflect.Type // type of the value that could not be assign to
}
func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
return UnmarshalTypeError{
Value: fmt.Sprintf("%v", value),
Type: rType,
}
}
func (e UnmarshalTypeError) Error() string {
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
}

@ -0,0 +1,16 @@
//go:build !windows && !appengine && !plan9
// +build !windows,!appengine,!plan9
package maxminddb
import (
"golang.org/x/sys/unix"
)
func mmap(fd, length int) (data []byte, err error) {
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
}
func munmap(b []byte) (err error) {
return unix.Munmap(b)
}

@ -0,0 +1,85 @@
// +build windows,!appengine
package maxminddb
// Windows support largely borrowed from mmap-go.
//
// Copyright 2011 Evan Shaw. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
import (
"errors"
"os"
"reflect"
"sync"
"unsafe"
"golang.org/x/sys/windows"
)
type memoryMap []byte
// Windows
var handleLock sync.Mutex
var handleMap = map[uintptr]windows.Handle{}
func mmap(fd int, length int) (data []byte, err error) {
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
if h == 0 {
return nil, os.NewSyscallError("CreateFileMapping", errno)
}
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
0, uintptr(length))
if addr == 0 {
return nil, os.NewSyscallError("MapViewOfFile", errno)
}
handleLock.Lock()
handleMap[addr] = h
handleLock.Unlock()
m := memoryMap{}
dh := m.header()
dh.Data = addr
dh.Len = length
dh.Cap = dh.Len
return m, nil
}
func (m *memoryMap) header() *reflect.SliceHeader {
return (*reflect.SliceHeader)(unsafe.Pointer(m))
}
func flush(addr, len uintptr) error {
errno := windows.FlushViewOfFile(addr, len)
return os.NewSyscallError("FlushViewOfFile", errno)
}
func munmap(b []byte) (err error) {
m := memoryMap(b)
dh := m.header()
addr := dh.Data
length := uintptr(dh.Len)
flush(addr, length)
err = windows.UnmapViewOfFile(addr)
if err != nil {
return err
}
handleLock.Lock()
defer handleLock.Unlock()
handle, ok := handleMap[addr]
if !ok {
// should be impossible; we would've errored above
return errors.New("unknown base address")
}
delete(handleMap, addr)
e := windows.CloseHandle(windows.Handle(handle))
return os.NewSyscallError("CloseHandle", e)
}

@ -0,0 +1,58 @@
package maxminddb
type nodeReader interface {
readLeft(uint) uint
readRight(uint) uint
}
type nodeReader24 struct {
buffer []byte
}
func (n nodeReader24) readLeft(nodeNumber uint) uint {
return (uint(n.buffer[nodeNumber]) << 16) |
(uint(n.buffer[nodeNumber+1]) << 8) |
uint(n.buffer[nodeNumber+2])
}
func (n nodeReader24) readRight(nodeNumber uint) uint {
return (uint(n.buffer[nodeNumber+3]) << 16) |
(uint(n.buffer[nodeNumber+4]) << 8) |
uint(n.buffer[nodeNumber+5])
}
type nodeReader28 struct {
buffer []byte
}
func (n nodeReader28) readLeft(nodeNumber uint) uint {
return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) |
(uint(n.buffer[nodeNumber]) << 16) |
(uint(n.buffer[nodeNumber+1]) << 8) |
uint(n.buffer[nodeNumber+2])
}
func (n nodeReader28) readRight(nodeNumber uint) uint {
return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) |
(uint(n.buffer[nodeNumber+4]) << 16) |
(uint(n.buffer[nodeNumber+5]) << 8) |
uint(n.buffer[nodeNumber+6])
}
type nodeReader32 struct {
buffer []byte
}
func (n nodeReader32) readLeft(nodeNumber uint) uint {
return (uint(n.buffer[nodeNumber]) << 24) |
(uint(n.buffer[nodeNumber+1]) << 16) |
(uint(n.buffer[nodeNumber+2]) << 8) |
uint(n.buffer[nodeNumber+3])
}
func (n nodeReader32) readRight(nodeNumber uint) uint {
return (uint(n.buffer[nodeNumber+4]) << 24) |
(uint(n.buffer[nodeNumber+5]) << 16) |
(uint(n.buffer[nodeNumber+6]) << 8) |
uint(n.buffer[nodeNumber+7])
}

@ -0,0 +1,310 @@
// Package maxminddb provides a reader for the MaxMind DB file format.
package maxminddb
import (
"bytes"
"errors"
"fmt"
"net"
"reflect"
)
const (
// NotFound is returned by LookupOffset when a matched root record offset
// cannot be found.
NotFound = ^uintptr(0)
dataSectionSeparatorSize = 16
)
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
// Reader holds the data corresponding to the MaxMind DB file. Its only public
// field is Metadata, which contains the metadata from the MaxMind DB file.
//
// All of the methods on Reader are thread-safe. The struct may be safely
// shared across goroutines.
type Reader struct {
hasMappedFile bool
buffer []byte
nodeReader nodeReader
decoder decoder
Metadata Metadata
ipv4Start uint
ipv4StartBitDepth int
nodeOffsetMult uint
}
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
// it has the format version, the build time as Unix epoch time, the database
// type and description, the IP version supported, and a slice of the natural
// languages included.
type Metadata struct {
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
BuildEpoch uint `maxminddb:"build_epoch"`
DatabaseType string `maxminddb:"database_type"`
Description map[string]string `maxminddb:"description"`
IPVersion uint `maxminddb:"ip_version"`
Languages []string `maxminddb:"languages"`
NodeCount uint `maxminddb:"node_count"`
RecordSize uint `maxminddb:"record_size"`
}
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
// a Reader structure or an error.
func FromBytes(buffer []byte) (*Reader, error) {
metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
if metadataStart == -1 {
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
}
metadataStart += len(metadataStartMarker)
metadataDecoder := decoder{buffer[metadataStart:]}
var metadata Metadata
rvMetdata := reflect.ValueOf(&metadata)
_, err := metadataDecoder.decode(0, rvMetdata, 0)
if err != nil {
return nil, err
}
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
dataSectionStart := searchTreeSize + dataSectionSeparatorSize
dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
if dataSectionStart > dataSectionEnd {
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
}
d := decoder{
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
}
nodeBuffer := buffer[:searchTreeSize]
var nodeReader nodeReader
switch metadata.RecordSize {
case 24:
nodeReader = nodeReader24{buffer: nodeBuffer}
case 28:
nodeReader = nodeReader28{buffer: nodeBuffer}
case 32:
nodeReader = nodeReader32{buffer: nodeBuffer}
default:
return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
}
reader := &Reader{
buffer: buffer,
nodeReader: nodeReader,
decoder: d,
Metadata: metadata,
ipv4Start: 0,
nodeOffsetMult: metadata.RecordSize / 4,
}
reader.setIPv4Start()
return reader, err
}
func (r *Reader) setIPv4Start() {
if r.Metadata.IPVersion != 6 {
return
}
nodeCount := r.Metadata.NodeCount
node := uint(0)
i := 0
for ; i < 96 && node < nodeCount; i++ {
node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
}
r.ipv4Start = node
r.ipv4StartBitDepth = i
}
// Lookup retrieves the database record for ip and stores it in the value
// pointed to by result. If result is nil or not a pointer, an error is
// returned. If the data in the database record cannot be stored in result
// because of type differences, an UnmarshalTypeError is returned. If the
// database is invalid or otherwise cannot be read, an InvalidDatabaseError
// is returned.
func (r *Reader) Lookup(ip net.IP, result interface{}) error {
if r.buffer == nil {
return errors.New("cannot call Lookup on a closed database")
}
pointer, _, _, err := r.lookupPointer(ip)
if pointer == 0 || err != nil {
return err
}
return r.retrieveData(pointer, result)
}
// LookupNetwork retrieves the database record for ip and stores it in the
// value pointed to by result. The network returned is the network associated
// with the data record in the database. The ok return value indicates whether
// the database contained a record for the ip.
//
// If result is nil or not a pointer, an error is returned. If the data in the
// database record cannot be stored in result because of type differences, an
// UnmarshalTypeError is returned. If the database is invalid or otherwise
// cannot be read, an InvalidDatabaseError is returned.
func (r *Reader) LookupNetwork(
ip net.IP,
result interface{},
) (network *net.IPNet, ok bool, err error) {
if r.buffer == nil {
return nil, false, errors.New("cannot call Lookup on a closed database")
}
pointer, prefixLength, ip, err := r.lookupPointer(ip)
network = r.cidr(ip, prefixLength)
if pointer == 0 || err != nil {
return network, false, err
}
return network, true, r.retrieveData(pointer, result)
}
// LookupOffset maps an argument net.IP to a corresponding record offset in the
// database. NotFound is returned if no such record is found, and a record may
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
// is an advanced API, which exists to provide clients with a means to cache
// previously-decoded records.
func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
if r.buffer == nil {
return 0, errors.New("cannot call LookupOffset on a closed database")
}
pointer, _, _, err := r.lookupPointer(ip)
if pointer == 0 || err != nil {
return NotFound, err
}
return r.resolveDataPointer(pointer)
}
func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
// This is necessary as the node that the IPv4 start is at may
// be at a bit depth that is less that 96, i.e., ipv4Start points
// to a leaf node. For instance, if a record was inserted at ::/8,
// the ipv4Start would point directly at the leaf node for the
// record and would have a bit depth of 8. This would not happen
// with databases currently distributed by MaxMind as all of them
// have an IPv4 subtree that is greater than a single node.
if r.Metadata.IPVersion == 6 &&
len(ip) == net.IPv4len &&
r.ipv4StartBitDepth != 96 {
return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
}
mask := net.CIDRMask(prefixLength, len(ip)*8)
return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
}
// Decode the record at |offset| into |result|. The result value pointed to
// must be a data value that corresponds to a record in the database. This may
// include a struct representation of the data, a map capable of holding the
// data or an empty interface{} value.
//
// If result is a pointer to a struct, the struct need not include a field
// for every value that may be in the database. If a field is not present in
// the structure, the decoder will not decode that field, reducing the time
// required to decode the record.
//
// As a special case, a struct field of type uintptr will be used to capture
// the offset of the value. Decode may later be used to extract the stored
// value from the offset. MaxMind DBs are highly normalized: for example in
// the City database, all records of the same country will reference a
// single representative record for that country. This uintptr behavior allows
// clients to leverage this normalization in their own sub-record caching.
func (r *Reader) Decode(offset uintptr, result interface{}) error {
if r.buffer == nil {
return errors.New("cannot call Decode on a closed database")
}
return r.decode(offset, result)
}
func (r *Reader) decode(offset uintptr, result interface{}) error {
rv := reflect.ValueOf(result)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return errors.New("result param must be a pointer")
}
if dser, ok := result.(deserializer); ok {
_, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false)
return err
}
_, err := r.decoder.decode(uint(offset), rv, 0)
return err
}
func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
if ip == nil {
return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil")
}
ipV4Address := ip.To4()
if ipV4Address != nil {
ip = ipV4Address
}
if len(ip) == 16 && r.Metadata.IPVersion == 4 {
return 0, 0, ip, fmt.Errorf(
"error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database",
ip.String(),
)
}
bitCount := uint(len(ip) * 8)
var node uint
if bitCount == 32 {
node = r.ipv4Start
}
node, prefixLength := r.traverseTree(ip, node, bitCount)
nodeCount := r.Metadata.NodeCount
if node == nodeCount {
// Record is empty
return 0, prefixLength, ip, nil
} else if node > nodeCount {
return node, prefixLength, ip, nil
}
return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree")
}
func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) {
nodeCount := r.Metadata.NodeCount
i := uint(0)
for ; i < bitCount && node < nodeCount; i++ {
bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
offset := node * r.nodeOffsetMult
if bit == 0 {
node = r.nodeReader.readLeft(offset)
} else {
node = r.nodeReader.readRight(offset)
}
}
return node, int(i)
}
func (r *Reader) retrieveData(pointer uint, result interface{}) error {
offset, err := r.resolveDataPointer(pointer)
if err != nil {
return err
}
return r.decode(offset, result)
}
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
if resolved >= uintptr(len(r.buffer)) {
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
}
return resolved, nil
}

@ -0,0 +1,28 @@
// +build appengine plan9
package maxminddb
import "io/ioutil"
// Open takes a string path to a MaxMind DB file and returns a Reader
// structure or an error. The database file is opened using a memory map,
// except on Google App Engine where mmap is not supported; there the database
// is loaded into memory. Use the Close method on the Reader object to return
// the resources to the system.
func Open(file string) (*Reader, error) {
bytes, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
return FromBytes(bytes)
}
// Close unmaps the database file from virtual memory and returns the
// resources to the system. If called on a Reader opened using FromBytes
// or Open on Google App Engine, this method sets the underlying buffer
// to nil, returning the resources to the system.
func (r *Reader) Close() error {
r.buffer = nil
return nil
}

@ -0,0 +1,66 @@
//go:build !appengine && !plan9
// +build !appengine,!plan9
package maxminddb
import (
"os"
"runtime"
)
// Open takes a string path to a MaxMind DB file and returns a Reader
// structure or an error. The database file is opened using a memory map,
// except on Google App Engine where mmap is not supported; there the database
// is loaded into memory. Use the Close method on the Reader object to return
// the resources to the system.
func Open(file string) (*Reader, error) {
mapFile, err := os.Open(file)
if err != nil {
_ = mapFile.Close()
return nil, err
}
stats, err := mapFile.Stat()
if err != nil {
_ = mapFile.Close()
return nil, err
}
fileSize := int(stats.Size())
mmap, err := mmap(int(mapFile.Fd()), fileSize)
if err != nil {
_ = mapFile.Close()
return nil, err
}
if err := mapFile.Close(); err != nil {
//nolint:errcheck // we prefer to return the original error
munmap(mmap)
return nil, err
}
reader, err := FromBytes(mmap)
if err != nil {
//nolint:errcheck // we prefer to return the original error
munmap(mmap)
return nil, err
}
reader.hasMappedFile = true
runtime.SetFinalizer(reader, (*Reader).Close)
return reader, nil
}
// Close unmaps the database file from virtual memory and returns the
// resources to the system. If called on a Reader opened using FromBytes
// or Open on Google App Engine, this method does nothing.
func (r *Reader) Close() error {
var err error
if r.hasMappedFile {
runtime.SetFinalizer(r, nil)
r.hasMappedFile = false
err = munmap(r.buffer)
}
r.buffer = nil
return err
}

@ -0,0 +1,205 @@
package maxminddb
import (
"fmt"
"net"
)
// Internal structure used to keep track of nodes we still need to visit.
type netNode struct {
ip net.IP
bit uint
pointer uint
}
// Networks represents a set of subnets that we are iterating over.
type Networks struct {
reader *Reader
nodes []netNode // Nodes we still have to visit.
lastNode netNode
err error
skipAliasedNetworks bool
}
var (
allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)}
allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)}
)
// NetworksOption are options for Networks and NetworksWithin.
type NetworksOption func(*Networks)
// SkipAliasedNetworks is an option for Networks and NetworksWithin that
// makes them not iterate over aliases of the IPv4 subtree in an IPv6
// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16.
//
// You most likely want to set this. The only reason it isn't the default
// behavior is to provide backwards compatibility to existing users.
func SkipAliasedNetworks(networks *Networks) {
networks.skipAliasedNetworks = true
}
// Networks returns an iterator that can be used to traverse all networks in
// the database.
//
// Please note that a MaxMind DB may map IPv4 networks into several locations
// in an IPv6 database. This iterator will iterate over all of these locations
// separately. To only iterate over the IPv4 networks once, use the
// SkipAliasedNetworks option.
func (r *Reader) Networks(options ...NetworksOption) *Networks {
var networks *Networks
if r.Metadata.IPVersion == 6 {
networks = r.NetworksWithin(allIPv6, options...)
} else {
networks = r.NetworksWithin(allIPv4, options...)
}
return networks
}
// NetworksWithin returns an iterator that can be used to traverse all networks
// in the database which are contained in a given network.
//
// Please note that a MaxMind DB may map IPv4 networks into several locations
// in an IPv6 database. This iterator will iterate over all of these locations
// separately. To only iterate over the IPv4 networks once, use the
// SkipAliasedNetworks option.
//
// If the provided network is contained within a network in the database, the
// iterator will iterate over exactly one network, the containing network.
func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks {
if r.Metadata.IPVersion == 4 && network.IP.To4() == nil {
return &Networks{
err: fmt.Errorf(
"error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database",
network.String(),
),
}
}
networks := &Networks{reader: r}
for _, option := range options {
option(networks)
}
ip := network.IP
prefixLength, _ := network.Mask.Size()
if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len {
if networks.skipAliasedNetworks {
ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]}
} else {
ip = ip.To16()
}
prefixLength += 96
}
pointer, bit := r.traverseTree(ip, 0, uint(prefixLength))
networks.nodes = []netNode{
{
ip: ip,
bit: uint(bit),
pointer: pointer,
},
}
return networks
}
// Next prepares the next network for reading with the Network method. It
// returns true if there is another network to be processed and false if there
// are no more networks or if there is an error.
func (n *Networks) Next() bool {
if n.err != nil {
return false
}
for len(n.nodes) > 0 {
node := n.nodes[len(n.nodes)-1]
n.nodes = n.nodes[:len(n.nodes)-1]
for node.pointer != n.reader.Metadata.NodeCount {
// This skips IPv4 aliases without hardcoding the networks that the writer
// currently aliases.
if n.skipAliasedNetworks && n.reader.ipv4Start != 0 &&
node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) {
break
}
if node.pointer > n.reader.Metadata.NodeCount {
n.lastNode = node
return true
}
ipRight := make(net.IP, len(node.ip))
copy(ipRight, node.ip)
if len(ipRight) <= int(node.bit>>3) {
n.err = newInvalidDatabaseError(
"invalid search tree at %v/%v", ipRight, node.bit)
return false
}
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
offset := node.pointer * n.reader.nodeOffsetMult
rightPointer := n.reader.nodeReader.readRight(offset)
node.bit++
n.nodes = append(n.nodes, netNode{
pointer: rightPointer,
ip: ipRight,
bit: node.bit,
})
node.pointer = n.reader.nodeReader.readLeft(offset)
}
}
return false
}
// Network returns the current network or an error if there is a problem
// decoding the data for the network. It takes a pointer to a result value to
// decode the network's data into.
func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
if n.err != nil {
return nil, n.err
}
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
return nil, err
}
ip := n.lastNode.ip
prefixLength := int(n.lastNode.bit)
// We do this because uses of SkipAliasedNetworks expect the IPv4 networks
// to be returned as IPv4 networks. If we are not skipping aliased
// networks, then the user will get IPv4 networks from the ::FFFF:0:0/96
// network as Go automatically converts those.
if n.skipAliasedNetworks && isInIPv4Subtree(ip) {
ip = ip[12:]
prefixLength -= 96
}
return &net.IPNet{
IP: ip,
Mask: net.CIDRMask(prefixLength, len(ip)*8),
}, nil
}
// Err returns an error, if any, that was encountered during iteration.
func (n *Networks) Err() error {
return n.err
}
// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's
// IPv4 subtree.
func isInIPv4Subtree(ip net.IP) bool {
if len(ip) != 16 {
return false
}
for i := 0; i < 12; i++ {
if ip[i] != 0 {
return false
}
}
return true
}

@ -0,0 +1,201 @@
package maxminddb
import (
"reflect"
"runtime"
)
type verifier struct {
reader *Reader
}
// Verify checks that the database is valid. It validates the search tree,
// the data section, and the metadata section. This verifier is stricter than
// the specification and may return errors on databases that are readable.
func (r *Reader) Verify() error {
v := verifier{r}
if err := v.verifyMetadata(); err != nil {
return err
}
err := v.verifyDatabase()
runtime.KeepAlive(v.reader)
return err
}
func (v *verifier) verifyMetadata() error {
metadata := v.reader.Metadata
if metadata.BinaryFormatMajorVersion != 2 {
return testError(
"binary_format_major_version",
2,
metadata.BinaryFormatMajorVersion,
)
}
if metadata.BinaryFormatMinorVersion != 0 {
return testError(
"binary_format_minor_version",
0,
metadata.BinaryFormatMinorVersion,
)
}
if metadata.DatabaseType == "" {
return testError(
"database_type",
"non-empty string",
metadata.DatabaseType,
)
}
if len(metadata.Description) == 0 {
return testError(
"description",
"non-empty slice",
metadata.Description,
)
}
if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
return testError(
"ip_version",
"4 or 6",
metadata.IPVersion,
)
}
if metadata.RecordSize != 24 &&
metadata.RecordSize != 28 &&
metadata.RecordSize != 32 {
return testError(
"record_size",
"24, 28, or 32",
metadata.RecordSize,
)
}
if metadata.NodeCount == 0 {
return testError(
"node_count",
"positive integer",
metadata.NodeCount,
)
}
return nil
}
func (v *verifier) verifyDatabase() error {
offsets, err := v.verifySearchTree()
if err != nil {
return err
}
if err := v.verifyDataSectionSeparator(); err != nil {
return err
}
return v.verifyDataSection(offsets)
}
func (v *verifier) verifySearchTree() (map[uint]bool, error) {
offsets := make(map[uint]bool)
it := v.reader.Networks()
for it.Next() {
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
if err != nil {
return nil, err
}
offsets[uint(offset)] = true
}
if err := it.Err(); err != nil {
return nil, err
}
return offsets, nil
}
func (v *verifier) verifyDataSectionSeparator() error {
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
for _, b := range separator {
if b != 0 {
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
}
}
return nil
}
func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
pointerCount := len(offsets)
decoder := v.reader.decoder
var offset uint
bufferLen := uint(len(decoder.buffer))
for offset < bufferLen {
var data interface{}
rv := reflect.ValueOf(&data)
newOffset, err := decoder.decode(offset, rv, 0)
if err != nil {
return newInvalidDatabaseError(
"received decoding error (%v) at offset of %v",
err,
offset,
)
}
if newOffset <= offset {
return newInvalidDatabaseError(
"data section offset unexpectedly went from %v to %v",
offset,
newOffset,
)
}
pointer := offset
if _, ok := offsets[pointer]; !ok {
return newInvalidDatabaseError(
"found data (%v) at %v that the search tree does not point to",
data,
pointer,
)
}
delete(offsets, pointer)
offset = newOffset
}
if offset != bufferLen {
return newInvalidDatabaseError(
"unexpected data at the end of the data section (last offset: %v, end: %v)",
offset,
bufferLen,
)
}
if len(offsets) != 0 {
return newInvalidDatabaseError(
"found %v pointers (of %v) in the search tree that we did not see in the data section",
len(offsets),
pointerCount,
)
}
return nil
}
func testError(
field string,
expected interface{},
actual interface{},
) error {
return newInvalidDatabaseError(
"%v - Expected: %v Actual: %v",
field,
expected,
actual,
)
}

@ -6,3 +6,5 @@
*.log
*_test.go
gomod.sh
*.zip
*.tar.gz

@ -1,53 +1,47 @@
package goip
import (
"go.dtapp.net/goip/geoip"
"go.dtapp.net/goip/ip2region"
"go.dtapp.net/goip/ip2region_v2"
"go.dtapp.net/goip/ipv6wry"
"go.dtapp.net/goip/qqwry"
"net"
"strconv"
)
var (
ipv4 = "IPV4"
ipv6 = "IPV6"
)
type AnalyseResult struct {
IP string `json:"ip,omitempty"` // 输入的ip地址
Country string `json:"country,omitempty"` // 国家或地区
Province string `json:"province,omitempty"` // 省份
City string `json:"city,omitempty"` // 城市
Area string `json:"area,omitempty"` // 区域
Isp string `json:"isp,omitempty"` // 运营商
Ip string `json:"ip,omitempty"`
QqwryInfo qqwry.QueryResult `json:"qqwry_info"`
Ip2regionInfo ip2region.QueryResult `json:"ip2region_info"`
Ip2regionV2info ip2region_v2.QueryResult `json:"ip2regionv2_info"`
GeoipInfo geoip.QueryCityResult `json:"geoip_info"`
Ipv6wryInfo ipv6wry.QueryResult `json:"ipv6wry_info"`
}
func (c *Client) Analyse(item string) AnalyseResult {
isIp := c.isIpv4OrIpv6(item)
ipByte := net.ParseIP(item)
switch isIp {
case ipv4:
info := c.V4db.Find(item)
search, err := c.V4Region.MemorySearch(item)
if err != nil {
return AnalyseResult{
IP: info.IP,
Country: info.Country,
Area: info.Area,
}
} else {
return AnalyseResult{
IP: search.IP,
Country: search.Country,
Province: search.Province,
City: search.City,
Isp: info.Area,
}
qqeryInfo, _ := c.QueryQqWry(ipByte)
ip2regionInfo, _ := c.QueryIp2Region(ipByte)
ip2regionV2Info, _ := c.QueryIp2RegionV2(ipByte)
geoipInfo, _ := c.QueryGeoIp(ipByte)
return AnalyseResult{
Ip: ipByte.String(),
QqwryInfo: qqeryInfo,
Ip2regionInfo: ip2regionInfo,
Ip2regionV2info: ip2regionV2Info,
GeoipInfo: geoipInfo,
}
case ipv6:
info := c.V6db.Find(item)
geoipInfo, _ := c.QueryGeoIp(ipByte)
ipv6Info, _ := c.QueryIpv6wry(ipByte)
return AnalyseResult{
IP: info.IP,
Country: info.Country,
Province: info.Province,
City: info.City,
Area: info.Area,
Isp: info.Isp,
Ip: ipByte.String(),
GeoipInfo: geoipInfo,
Ipv6wryInfo: ipv6Info,
}
default:
return AnalyseResult{}

@ -0,0 +1,39 @@
package goip
import (
"go.dtapp.net/goip/geoip"
"go.dtapp.net/goip/ip2region"
"go.dtapp.net/goip/ip2region_v2"
"go.dtapp.net/goip/ipv6wry"
"go.dtapp.net/goip/qqwry"
)
type Client struct {
ip2regionV2Client *ip2region_v2.Client
ip2regionClient *ip2region.Client
qqwryClient *qqwry.Client
geoIpClient *geoip.Client
ipv6wryClient *ipv6wry.Client
}
// NewIp 实例化
func NewIp() *Client {
c := &Client{}
c.ip2regionV2Client, _ = ip2region_v2.New()
c.ip2regionClient = ip2region.New()
c.qqwryClient = qqwry.New()
c.geoIpClient, _ = geoip.New()
c.ipv6wryClient = ipv6wry.New()
return c
}
func (c *Client) Close() {
c.geoIpClient.Close()
}

@ -1,3 +1,3 @@
package goip
const Version = "1.0.30"
const Version = "1.0.33"

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 MiB

Binary file not shown.

@ -0,0 +1,52 @@
package geoip
import (
_ "embed"
"github.com/oschwald/geoip2-golang"
)
//go:embed GeoLite2-ASN.mmdb
var asnBuff []byte
//go:embed GeoLite2-City.mmdb
var cityBuff []byte
//go:embed GeoLite2-Country.mmdb
var countryBuff []byte
type Client struct {
asnDb *geoip2.Reader
cityDb *geoip2.Reader
countryDb *geoip2.Reader
}
func New() (*Client, error) {
var err error
c := &Client{}
c.asnDb, err = geoip2.FromBytes(asnBuff)
if err != nil {
return nil, err
}
c.cityDb, err = geoip2.FromBytes(cityBuff)
if err != nil {
return nil, err
}
c.countryDb, err = geoip2.FromBytes(countryBuff)
if err != nil {
return nil, err
}
return c, err
}
func (c *Client) Close() {
c.asnDb.Close()
c.cityDb.Close()
c.countryDb.Close()
}

@ -0,0 +1,23 @@
package geoip
import (
"io/ioutil"
"log"
"net/http"
)
func OnlineDownload(downloadUrl string, downloadName string) {
resp, err := http.Get(downloadUrl)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
err = ioutil.WriteFile("./"+downloadName, body, 0644)
if err != nil {
panic(err)
}
log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./"+downloadName)
}

@ -0,0 +1,31 @@
package geoip
import (
"go.dtapp.net/gostring"
)
var licenseKey = "" // 许可证密钥
func GetGeoLite2AsnDownloadUrl() string {
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
}
//func GetGeoLite2AsnCsvDownloadUrl() string {
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
//}
func GetGeoLite2CityDownloadUrl() string {
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
}
//func GetGeoLite2CityCsvDownloadUrl() string {
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
//}
func GetGeoLite2CountryDownloadUrl() string {
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
}
//func GetGeoLite2CountryCsvDownloadUrl() string {
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
//}

@ -0,0 +1,66 @@
package geoip
import (
_ "embed"
"net"
)
// QueryCityResult 返回
type QueryCityResult struct {
Ip string `json:"ip,omitempty"` // ip
Continent struct {
Code string `json:"code,omitempty"` // 大陆代码
Name string `json:"name,omitempty"` // 大陆名称
} `json:"continent,omitempty"`
Country struct {
Code string `json:"code,omitempty"` // 国家代码
Name string `json:"name,omitempty"` // 国家名称
} `json:"country,omitempty"`
Province struct {
Code string `json:"code,omitempty"` // 省份代码
Name string `json:"name,omitempty"` // 省份名称
} `json:"province,omitempty"`
City struct {
Name string `json:"name,omitempty"` // 城市名称
} `json:"city,omitempty"`
Location struct {
TimeZone string `json:"time_zone,omitempty"` // 位置时区
Latitude float64 `json:"latitude,omitempty"` // 坐标纬度
Longitude float64 `json:"longitude,omitempty"` // 坐标经度
} `json:"location,omitempty"`
}
func (c *Client) QueryCity(ipAddress net.IP) (result QueryCityResult, err error) {
record, err := c.cityDb.City(ipAddress)
if err != nil {
return QueryCityResult{}, err
}
// ip
result.Ip = ipAddress.String()
// 大陆
result.Continent.Code = record.Continent.Code
result.Continent.Name = record.Continent.Names["zh-CN"]
// 国家
result.Country.Code = record.Country.IsoCode
result.Country.Name = record.Country.Names["zh-CN"]
// 省份
if len(record.Subdivisions) > 0 {
result.Province.Code = record.Subdivisions[0].IsoCode
result.Province.Name = record.Subdivisions[0].Names["zh-CN"]
}
// 城市
result.City.Name = record.City.Names["zh-CN"]
// 位置
result.Location.TimeZone = record.Location.TimeZone
result.Location.Latitude = record.Location.Latitude
result.Location.Longitude = record.Location.Longitude
return result, err
}

63
vendor/go.dtapp.net/goip/goip.go generated vendored

@ -1,63 +0,0 @@
package goip
import (
"go.dtapp.net/goip/ip2region"
v4 "go.dtapp.net/goip/v4"
v6 "go.dtapp.net/goip/v6"
"log"
"strings"
)
type Client struct {
V4Region ip2region.Ip2Region // IPV4
V4db v4.Pointer // IPV4
V6db v6.Pointer // IPV6
}
// NewIp 实例化
func NewIp() *Client {
app := &Client{}
v4Num := app.V4db.InitIPV4Data()
log.Printf("IPV4 库加载完成 共加载:%d 条 IP 记录\n", v4Num)
v6Num := app.V6db.InitIPV4Data()
log.Printf("IPV6 库加载完成 共加载:%d 条 IP 记录\n", v6Num)
return app
}
func (c *Client) Ipv4(ip string) (res v4.Result, resInfo ip2region.IpInfo) {
res = c.V4db.Find(ip)
resInfo, _ = c.V4Region.MemorySearch(ip)
return res, resInfo
}
func (c *Client) Ipv6(ip string) (res v6.Result) {
res = c.V6db.Find(ip)
return res
}
func (c *Client) isIpv4OrIpv6(ip string) string {
if len(ip) < 7 {
return ""
}
arrIpv4 := strings.Split(ip, ".")
if len(arrIpv4) == 4 {
//. 判断IPv4
for _, val := range arrIpv4 {
if !c.CheckIpv4(val) {
return ""
}
}
return ipv4
}
arrIpv6 := strings.Split(ip, ":")
if len(arrIpv6) == 8 {
// 判断Ipv6
for _, val := range arrIpv6 {
if !c.CheckIpv6(val) {
return "Neither"
}
}
return ipv6
}
return ""
}

23
vendor/go.dtapp.net/goip/ip.go generated vendored

@ -9,6 +9,7 @@ import (
// GetInsideIp 内网ip
func GetInsideIp(ctx context.Context) string {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
panic(err)
@ -49,27 +50,25 @@ func Ips(ctx context.Context) (map[string]string, error) {
var respGetOutsideIp struct {
Data struct {
Ip string `json:"ip"`
Ip string `json:"ip,omitempty"`
} `json:"data"`
}
// GetOutsideIp 外网ip
func GetOutsideIp(ctx context.Context) (ip string) {
ip = "0.0.0.0"
get := gorequest.NewHttp()
get.SetUri("https://api.dtapp.net/ip")
response, err := get.Get(ctx)
func GetOutsideIp(ctx context.Context) string {
// 请求
getHttp := gorequest.NewHttp()
getHttp.SetUri("https://api.dtapp.net/ip")
response, err := getHttp.Get(ctx)
if err != nil {
return
return "0.0.0.0"
}
// 解析
err = json.Unmarshal(response.ResponseBody, &respGetOutsideIp)
if err != nil {
return
}
if respGetOutsideIp.Data.Ip == "" {
return
return "0.0.0.0"
}
ip = respGetOutsideIp.Data.Ip
respGetOutsideIp.Data.Ip = "0.0.0.0"
return respGetOutsideIp.Data.Ip
}

@ -0,0 +1,103 @@
package ip2region
import (
_ "embed"
"errors"
"go.dtapp.net/gostring"
"os"
"strconv"
"strings"
)
const (
IndexBlockLength = 12
)
//go:embed ip2region.db
var dbBuff []byte
type Client struct {
// db file handler
dbFileHandler *os.File
//header block info
headerSip []int64
headerPtr []int64
headerLen int64
// super block index info
firstIndexPtr int64
lastIndexPtr int64
totalBlocks int64
// for memory mode only
// the original db binary string
dbFile string
}
func New() *Client {
c := &Client{}
return c
}
// 获取Ip信息
func getIpInfo(ipStr string, cityId int64, line []byte) (result QueryResult) {
lineSlice := strings.Split(string(line), "|")
length := len(lineSlice)
result.CityId = cityId
if length < 5 {
for i := 0; i <= 5-length; i++ {
lineSlice = append(lineSlice, "")
}
}
if lineSlice[0] != "0" {
result.Country = gostring.SpaceAndLineBreak(lineSlice[0])
}
if lineSlice[1] != "0" {
result.Region = gostring.SpaceAndLineBreak(lineSlice[1])
}
if lineSlice[2] != "0" {
result.Province = gostring.SpaceAndLineBreak(lineSlice[2])
}
if lineSlice[3] != "0" {
result.City = gostring.SpaceAndLineBreak(lineSlice[3])
}
if lineSlice[4] != "0" {
result.Isp = gostring.SpaceAndLineBreak(lineSlice[4])
}
result.Ip = ipStr
return result
}
func getLong(b []byte, offset int64) int64 {
val := int64(b[offset]) |
int64(b[offset+1])<<8 |
int64(b[offset+2])<<16 |
int64(b[offset+3])<<24
return val
}
func ip2long(IpStr string) (int64, error) {
bits := strings.Split(IpStr, ".")
if len(bits) != 4 {
return 0, errors.New("ip format error")
}
var sum int64
for i, n := range bits {
bit, _ := strconv.ParseInt(n, 10, 64)
sum += bit << uint(24-8*i)
}
return sum, nil
}

@ -2,16 +2,22 @@ package ip2region
import (
"io/ioutil"
"log"
"net/http"
)
func getOnline() ([]byte, error) {
resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/data/ip2region.db?raw=true")
func OnlineDownload() {
resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/v1.0/data/ip2region.db?raw=true")
if err != nil {
return nil, err
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
return body, err
err = ioutil.WriteFile("./ip2region.db", body, 0644)
if err != nil {
panic(err)
}
log.Printf("已下载最新 ip2region 数据库 %s ", "./ip2region.db")
}

@ -1,183 +0,0 @@
package ip2region
import (
_ "embed"
"errors"
"go.dtapp.net/gostring"
"io/ioutil"
"log"
"net"
"os"
"strconv"
"strings"
)
const (
IndexBlockLength = 12
)
type Ip2Region struct {
// db file handler
dbFileHandler *os.File
//header block info
headerSip []int64
headerPtr []int64
headerLen int64
// super block index info
firstIndexPtr int64
lastIndexPtr int64
totalBlocks int64
// for memory mode only
// the original db binary string
dbFile string
}
//go:embed ip.db
var dbBinStr []byte
type IpInfo struct {
IP string `json:"ip,omitempty"` // 输入的ip地址
CityID int64 `json:"city_id,omitempty"` // 城市ID
Country string `json:"country,omitempty"` // 国家
Region string `json:"region,omitempty"` // 区域
Province string `json:"province,omitempty"` // 省份
City string `json:"city,omitempty"` // 城市
ISP string `json:"isp,omitempty"` // 运营商
}
func (ip IpInfo) String() string {
return ip.IP + "|" + strconv.FormatInt(ip.CityID, 10) + "|" + ip.Country + "|" + ip.Region + "|" + ip.Province + "|" + ip.City + "|" + ip.ISP
}
// 获取Ip信息
func getIpInfo(ipStr string, cityId int64, line []byte) (ipInfo IpInfo) {
lineSlice := strings.Split(string(line), "|")
length := len(lineSlice)
ipInfo.CityID = cityId
if length < 5 {
for i := 0; i <= 5-length; i++ {
lineSlice = append(lineSlice, "")
}
}
if lineSlice[0] != "0" {
ipInfo.Country = gostring.SpaceAndLineBreak(lineSlice[0])
}
if lineSlice[1] != "0" {
ipInfo.Region = gostring.SpaceAndLineBreak(lineSlice[1])
}
if lineSlice[2] != "0" {
ipInfo.Province = gostring.SpaceAndLineBreak(lineSlice[2])
}
if lineSlice[3] != "0" {
ipInfo.City = gostring.SpaceAndLineBreak(lineSlice[3])
}
if lineSlice[4] != "0" {
ipInfo.ISP = gostring.SpaceAndLineBreak(lineSlice[4])
}
ipInfo.IP = ipStr
return ipInfo
}
// MemorySearch memory算法整个数据库全部载入内存单次查询都在0.1x毫秒内
func (r *Ip2Region) MemorySearch(ipStr string) (ipInfo IpInfo, err error) {
ipInfo.IP = ipStr
if net.ParseIP(ipStr).To4() == nil {
if net.ParseIP(ipStr).To16() == nil {
return ipInfo, err
}
}
if r.totalBlocks == 0 {
if err != nil {
return ipInfo, err
}
r.firstIndexPtr = getLong(dbBinStr, 0)
r.lastIndexPtr = getLong(dbBinStr, 4)
r.totalBlocks = (r.lastIndexPtr-r.firstIndexPtr)/IndexBlockLength + 1
}
ip, err := ip2long(ipStr)
if err != nil {
return ipInfo, err
}
h := r.totalBlocks
var dataPtr, l int64
for l <= h {
m := (l + h) >> 1
p := r.firstIndexPtr + m*IndexBlockLength
sip := getLong(dbBinStr, p)
if ip < sip {
h = m - 1
} else {
eip := getLong(dbBinStr, p+4)
if ip > eip {
l = m + 1
} else {
dataPtr = getLong(dbBinStr, p+8)
break
}
}
}
if dataPtr == 0 {
return ipInfo, errors.New("not found")
}
dataLen := (dataPtr >> 24) & 0xFF
dataPtr = dataPtr & 0x00FFFFFF
ipInfo = getIpInfo(ipStr, getLong(dbBinStr, dataPtr), dbBinStr[(dataPtr)+4:dataPtr+dataLen])
return ipInfo, nil
}
func getLong(b []byte, offset int64) int64 {
val := int64(b[offset]) |
int64(b[offset+1])<<8 |
int64(b[offset+2])<<16 |
int64(b[offset+3])<<24
return val
}
func ip2long(IpStr string) (int64, error) {
bits := strings.Split(IpStr, ".")
if len(bits) != 4 {
return 0, errors.New("ip format error")
}
var sum int64
for i, n := range bits {
bit, _ := strconv.ParseInt(n, 10, 64)
sum += bit << uint(24-8*i)
}
return sum, nil
}
func (r *Ip2Region) OnlineDownload() (err error) {
tmpData, err := getOnline()
if err != nil {
return errors.New("下载失败 %s" + err.Error())
}
if err := ioutil.WriteFile("./ip2region.db", tmpData, 0644); err == nil {
log.Printf("已下载最新 ip2region 数据库 %s ", "./ip2region.db")
} else {
return errors.New("保存失败")
}
return nil
}

@ -0,0 +1,73 @@
package ip2region
import (
"errors"
"net"
"strconv"
)
type QueryResult struct {
Ip string `json:"ip,omitempty"` // ip
CityId int64 `json:"city_id,omitempty"` // 城市代码
Country string `json:"country,omitempty"` // 国家
Region string `json:"region,omitempty"` // 区域
Province string `json:"province,omitempty"` // 省份
City string `json:"city,omitempty"` // 城市
Isp string `json:"isp,omitempty"` // 运营商
}
func (ip QueryResult) String() string {
return ip.Ip + "|" + strconv.FormatInt(ip.CityId, 10) + "|" + ip.Country + "|" + ip.Region + "|" + ip.Province + "|" + ip.City + "|" + ip.Isp
}
// Query memory算法整个数据库全部载入内存单次查询都在0.1x毫秒内
func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
result.Ip = ipAddress.String()
if c.totalBlocks == 0 {
if err != nil {
return result, err
}
c.firstIndexPtr = getLong(dbBuff, 0)
c.lastIndexPtr = getLong(dbBuff, 4)
c.totalBlocks = (c.lastIndexPtr-c.firstIndexPtr)/IndexBlockLength + 1
}
ip, err := ip2long(result.Ip)
if err != nil {
return result, err
}
h := c.totalBlocks
var dataPtr, l int64
for l <= h {
m := (l + h) >> 1
p := c.firstIndexPtr + m*IndexBlockLength
sip := getLong(dbBuff, p)
if ip < sip {
h = m - 1
} else {
eip := getLong(dbBuff, p+4)
if ip > eip {
l = m + 1
} else {
dataPtr = getLong(dbBuff, p+8)
break
}
}
}
if dataPtr == 0 {
return result, errors.New("not found")
}
dataLen := (dataPtr >> 24) & 0xFF
dataPtr = dataPtr & 0x00FFFFFF
result = getIpInfo(result.Ip, getLong(dbBuff, dataPtr), dbBuff[(dataPtr)+4:dataPtr+dataLen])
return result, nil
}

@ -0,0 +1,26 @@
package ip2region_v2
import _ "embed"
//go:embed ip2region.xdb
var cBuff []byte
type Client struct {
db *Searcher
}
func New() (*Client, error) {
var err error
c := &Client{}
// 1、从 dbPath 加载整个 xdb 到内存
// 2、用全局的 cBuff 创建完全基于内存的查询对象。
c.db, err = NewWithBuffer(cBuff)
if err != nil {
return nil, err
}
return c, err
}

@ -0,0 +1,23 @@
package ip2region_v2
import (
"io/ioutil"
"log"
"net/http"
)
func OnlineDownload() {
resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/data/ip2region.xdb?raw=true")
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
err = ioutil.WriteFile("./ip2region.xdb", body, 0644)
if err != nil {
panic(err)
}
log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./ip2region.xdb")
}

Binary file not shown.

@ -0,0 +1,49 @@
package ip2region_v2
import (
_ "embed"
"go.dtapp.net/gostring"
"net"
)
// QueryResult 返回
type QueryResult struct {
Ip string `json:"ip,omitempty"` // ip
Country string `json:"country,omitempty"` // 国家
Province string `json:"province,omitempty"` // 省份
City string `json:"city,omitempty"` // 城市
Operator string `json:"operator,omitempty"` // 运营商
}
func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
// 备注:并发使用,用整个 xdb 缓存创建的 searcher 对象可以安全用于并发。
str, err := c.db.SearchByStr(ipAddress.String())
if err != nil {
return QueryResult{}, err
}
split := gostring.Split(str, "|")
if len(split) <= 0 {
return QueryResult{}, err
}
result.Ip = ipAddress.String()
result.Country = split[0]
result.Province = split[2]
if result.Province == "0" {
result.Province = ""
}
result.City = split[3]
if result.City == "0" {
result.City = ""
}
result.Operator = split[4]
if result.Operator == "0" {
result.Operator = ""
}
return result, err
}

@ -0,0 +1,240 @@
package ip2region_v2
import (
"encoding/binary"
"fmt"
"os"
)
const (
HeaderInfoLength = 256
VectorIndexRows = 256
VectorIndexCols = 256
VectorIndexSize = 8
SegmentIndexBlockSize = 14
)
// --- Index policy define
type IndexPolicy int
const (
VectorIndexPolicy IndexPolicy = 1
BTreeIndexPolicy IndexPolicy = 2
)
func (i IndexPolicy) String() string {
switch i {
case VectorIndexPolicy:
return "VectorIndex"
case BTreeIndexPolicy:
return "BtreeIndex"
default:
return "unknown"
}
}
// --- Header define
type Header struct {
// data []byte
Version uint16
IndexPolicy IndexPolicy
CreatedAt uint32
StartIndexPtr uint32
EndIndexPtr uint32
}
func NewHeader(input []byte) (*Header, error) {
if len(input) < 16 {
return nil, fmt.Errorf("invalid input buffer")
}
return &Header{
Version: binary.LittleEndian.Uint16(input),
IndexPolicy: IndexPolicy(binary.LittleEndian.Uint16(input[2:])),
CreatedAt: binary.LittleEndian.Uint32(input[4:]),
StartIndexPtr: binary.LittleEndian.Uint32(input[8:]),
EndIndexPtr: binary.LittleEndian.Uint32(input[12:]),
}, nil
}
// --- searcher implementation
type Searcher struct {
handle *os.File
// header info
header *Header
ioCount int
// use it only when this feature enabled.
// Preload the vector index will reduce the number of IO operations
// thus speedup the search process
vectorIndex []byte
// content buffer.
// running with the whole xdb file cached
contentBuff []byte
}
func baseNew(dbFile string, vIndex []byte, cBuff []byte) (*Searcher, error) {
var err error
// content buff first
if cBuff != nil {
return &Searcher{
vectorIndex: nil,
contentBuff: cBuff,
}, nil
}
// open the xdb binary file
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
if err != nil {
return nil, err
}
return &Searcher{
handle: handle,
vectorIndex: vIndex,
}, nil
}
func NewWithFileOnly(dbFile string) (*Searcher, error) {
return baseNew(dbFile, nil, nil)
}
func NewWithVectorIndex(dbFile string, vIndex []byte) (*Searcher, error) {
return baseNew(dbFile, vIndex, nil)
}
func NewWithBuffer(cBuff []byte) (*Searcher, error) {
return baseNew("", nil, cBuff)
}
func (s *Searcher) Close() {
if s.handle != nil {
err := s.handle.Close()
if err != nil {
return
}
}
}
// GetIOCount return the global io count for the last search
func (s *Searcher) GetIOCount() int {
return s.ioCount
}
// SearchByStr find the region for the specified ip string
func (s *Searcher) SearchByStr(str string) (string, error) {
ip, err := CheckIP(str)
if err != nil {
return "", err
}
return s.Search(ip)
}
// Search find the region for the specified long ip
func (s *Searcher) Search(ip uint32) (string, error) {
// reset the global ioCount
s.ioCount = 0
// locate the segment index block based on the vector index
var il0 = (ip >> 24) & 0xFF
var il1 = (ip >> 16) & 0xFF
var idx = il0*VectorIndexCols*VectorIndexSize + il1*VectorIndexSize
var sPtr, ePtr = uint32(0), uint32(0)
if s.vectorIndex != nil {
sPtr = binary.LittleEndian.Uint32(s.vectorIndex[idx:])
ePtr = binary.LittleEndian.Uint32(s.vectorIndex[idx+4:])
} else if s.contentBuff != nil {
sPtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx:])
ePtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx+4:])
} else {
// read the vector index block
var buff = make([]byte, VectorIndexSize)
err := s.read(int64(HeaderInfoLength+idx), buff)
if err != nil {
return "", fmt.Errorf("read vector index block at %d: %w", HeaderInfoLength+idx, err)
}
sPtr = binary.LittleEndian.Uint32(buff)
ePtr = binary.LittleEndian.Uint32(buff[4:])
}
// fmt.Printf("sPtr=%d, ePtr=%d", sPtr, ePtr)
// binary search the segment index to get the region
var dataLen, dataPtr = 0, uint32(0)
var buff = make([]byte, SegmentIndexBlockSize)
var l, h = 0, int((ePtr - sPtr) / SegmentIndexBlockSize)
for l <= h {
m := (l + h) >> 1
p := sPtr + uint32(m*SegmentIndexBlockSize)
err := s.read(int64(p), buff)
if err != nil {
return "", fmt.Errorf("read segment index at %d: %w", p, err)
}
// decode the data step by step to reduce the unnecessary operations
sip := binary.LittleEndian.Uint32(buff)
if ip < sip {
h = m - 1
} else {
eip := binary.LittleEndian.Uint32(buff[4:])
if ip > eip {
l = m + 1
} else {
dataLen = int(binary.LittleEndian.Uint16(buff[8:]))
dataPtr = binary.LittleEndian.Uint32(buff[10:])
break
}
}
}
//fmt.Printf("dataLen: %d, dataPtr: %d", dataLen, dataPtr)
if dataLen == 0 {
return "", nil
}
// load and return the region data
var regionBuff = make([]byte, dataLen)
err := s.read(int64(dataPtr), regionBuff)
if err != nil {
return "", fmt.Errorf("read region at %d: %w", dataPtr, err)
}
return string(regionBuff), nil
}
// do the data read operation based on the setting.
// content buffer first or will read from the file.
// this operation will invoke the Seek for file based read.
func (s *Searcher) read(offset int64, buff []byte) error {
if s.contentBuff != nil {
cLen := copy(buff, s.contentBuff[offset:])
if cLen != len(buff) {
return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
}
} else {
_, err := s.handle.Seek(offset, 0)
if err != nil {
return fmt.Errorf("seek to %d: %w", offset, err)
}
s.ioCount++
rLen, err := s.handle.Read(buff)
if err != nil {
return fmt.Errorf("handle read: %w", err)
}
if rLen != len(buff) {
return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
}
}
return nil
}

@ -0,0 +1,165 @@
package ip2region_v2
import (
"fmt"
"os"
"strconv"
"strings"
)
var shiftIndex = []int{24, 16, 8, 0}
func CheckIP(ip string) (uint32, error) {
var ps = strings.Split(ip, ".")
if len(ps) != 4 {
return 0, fmt.Errorf("invalid ip address `%s`", ip)
}
var val = uint32(0)
for i, s := range ps {
d, err := strconv.Atoi(s)
if err != nil {
return 0, fmt.Errorf("the %dth part `%s` is not an integer", i, s)
}
if d < 0 || d > 255 {
return 0, fmt.Errorf("the %dth part `%s` should be an integer bettween 0 and 255", i, s)
}
val |= uint32(d) << shiftIndex[i]
}
// convert the ip to integer
return val, nil
}
func Long2IP(ip uint32) string {
return fmt.Sprintf("%d.%d.%d.%d", (ip>>24)&0xFF, (ip>>16)&0xFF, (ip>>8)&0xFF, ip&0xFF)
}
func MidIP(sip uint32, eip uint32) uint32 {
return uint32((uint64(sip) + uint64(eip)) >> 1)
}
// LoadHeader load the header info from the specified handle
func LoadHeader(handle *os.File) (*Header, error) {
_, err := handle.Seek(0, 0)
if err != nil {
return nil, fmt.Errorf("seek to the header: %w", err)
}
var buff = make([]byte, HeaderInfoLength)
rLen, err := handle.Read(buff)
if err != nil {
return nil, err
}
if rLen != len(buff) {
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
}
return NewHeader(buff)
}
// LoadHeaderFromFile load header info from the specified db file path
func LoadHeaderFromFile(dbFile string) (*Header, error) {
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
if err != nil {
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
}
header, err := LoadHeader(handle)
if err != nil {
return nil, err
}
_ = handle.Close()
return header, nil
}
// LoadHeaderFromBuff wrap the header info from the content buffer
func LoadHeaderFromBuff(cBuff []byte) (*Header, error) {
return NewHeader(cBuff[0:256])
}
// LoadVectorIndex util function to load the vector index from the specified file handle
func LoadVectorIndex(handle *os.File) ([]byte, error) {
// load all the vector index block
_, err := handle.Seek(HeaderInfoLength, 0)
if err != nil {
return nil, fmt.Errorf("seek to vector index: %w", err)
}
var buff = make([]byte, VectorIndexRows*VectorIndexCols*VectorIndexSize)
rLen, err := handle.Read(buff)
if err != nil {
return nil, err
}
if rLen != len(buff) {
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
}
return buff, nil
}
// LoadVectorIndexFromFile load vector index from a specified file path
func LoadVectorIndexFromFile(dbFile string) ([]byte, error) {
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
if err != nil {
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
}
vIndex, err := LoadVectorIndex(handle)
if err != nil {
return nil, err
}
_ = handle.Close()
return vIndex, nil
}
// LoadContent load the whole xdb content from the specified file handle
func LoadContent(handle *os.File) ([]byte, error) {
// get file size
fi, err := handle.Stat()
if err != nil {
return nil, fmt.Errorf("stat: %w", err)
}
size := fi.Size()
// seek to the head of the file
_, err = handle.Seek(0, 0)
if err != nil {
return nil, fmt.Errorf("seek to get xdb file length: %w", err)
}
var buff = make([]byte, size)
rLen, err := handle.Read(buff)
if err != nil {
return nil, err
}
if rLen != len(buff) {
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
}
return buff, nil
}
// LoadContentFromFile load the whole xdb content from the specified db file path
func LoadContentFromFile(dbFile string) ([]byte, error) {
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
if err != nil {
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
}
cBuff, err := LoadContent(handle)
if err != nil {
return nil, err
}
_ = handle.Close()
return cBuff, nil
}

@ -0,0 +1,138 @@
package ipv6wry
import (
_ "embed"
"encoding/binary"
"log"
)
var (
header []byte
country []byte
area []byte
v6ip uint64
offset uint32
start uint32
end uint32
)
//go:embed ipv6wry.db
var datBuff []byte
type Client struct {
Offset uint32
ItemLen uint32
IndexLen uint32
}
func New() *Client {
c := &Client{}
buf := datBuff[0:8]
start := binary.LittleEndian.Uint32(buf[:4])
end := binary.LittleEndian.Uint32(buf[4:])
num := int64((end-start)/7 + 1)
log.Printf("ipv6wry.db 共加载:%d 条ip记录\n", num)
return c
}
// ReadData 从文件中读取数据
func (c *Client) readData(length uint32) (rs []byte) {
end := c.Offset + length
dataNum := uint32(len(datBuff))
if c.Offset > dataNum {
return nil
}
if end > dataNum {
end = dataNum
}
rs = datBuff[c.Offset:end]
c.Offset = end
return rs
}
func (c *Client) getAddr() ([]byte, []byte) {
mode := c.readData(1)[0]
if mode == 0x01 {
// [IP][0x01][国家和地区信息的绝对偏移地址]
c.Offset = byteToUInt32(c.readData(3))
return c.getAddr()
}
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
_offset := c.Offset - 1
c1 := c.readArea(_offset)
if mode == 0x02 {
c.Offset = 4 + _offset
} else {
c.Offset = _offset + uint32(1+len(c1))
}
c2 := c.readArea(c.Offset)
return c1, c2
}
func (c *Client) readArea(offset uint32) []byte {
c.Offset = offset
mode := c.readData(1)[0]
if mode == 0x01 || mode == 0x02 {
return c.readArea(byteToUInt32(c.readData(3)))
}
c.Offset = offset
return c.readString()
}
func (c *Client) readString() []byte {
data := make([]byte, 0)
for {
buf := c.readData(1)
if buf[0] == 0 {
break
}
data = append(data, buf[0])
}
return data
}
func (c *Client) searchIndex(ip uint64) uint32 {
c.ItemLen = 8
c.IndexLen = 11
header = datBuff[8:24]
start = binary.LittleEndian.Uint32(header[8:])
counts := binary.LittleEndian.Uint32(header[:8])
end = start + counts*c.IndexLen
buf := make([]byte, c.IndexLen)
for {
mid := start + c.IndexLen*(((end-start)/c.IndexLen)>>1)
buf = datBuff[mid : mid+c.IndexLen]
_ip := binary.LittleEndian.Uint64(buf[:c.ItemLen])
if end-start == c.IndexLen {
if ip >= binary.LittleEndian.Uint64(datBuff[end:end+c.ItemLen]) {
buf = datBuff[end : end+c.IndexLen]
}
return byteToUInt32(buf[c.ItemLen:])
}
if _ip > ip {
end = mid
} else if _ip < ip {
start = mid
} else if _ip == ip {
return byteToUInt32(buf[c.ItemLen:])
}
}
}
func byteToUInt32(data []byte) uint32 {
i := uint32(data[0]) & 0xff
i |= (uint32(data[1]) << 8) & 0xff00
i |= (uint32(data[2]) << 16) & 0xff0000
return i
}

@ -1,4 +1,4 @@
package v6
package ipv6wry
import (
"github.com/saracen/go7z"
@ -9,28 +9,39 @@ import (
"os"
)
func getOnline() (data []byte, err error) {
func OnlineDownload() {
resp, err := http.Get("https://ip.zxinc.org/ip.7z")
if err != nil {
return nil, err
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
panic(err)
}
file7z, err := ioutil.TempFile("", "*")
if err != nil {
return nil, err
panic(err)
}
defer os.Remove(file7z.Name())
if err := ioutil.WriteFile(file7z.Name(), body, 0644); err == nil {
return Un7z(file7z.Name())
err = ioutil.WriteFile(file7z.Name(), body, 0644)
if err != nil {
panic(err)
}
tmpData, err := Un7z(file7z.Name())
if err != nil {
panic(err)
}
err = ioutil.WriteFile("./ipv6wry.db", tmpData, 0644)
if err != nil {
panic(err)
}
return
log.Printf("已下载最新 ZX IPv6数据库 %s ", "./ipv6wry.db")
}
func Un7z(filePath string) (data []byte, err error) {

@ -0,0 +1,82 @@
package ipv6wry
import (
"go.dtapp.net/gostring"
"math/big"
"net"
"strings"
)
// QueryResult 返回
type QueryResult struct {
Ip string `json:"ip,omitempty"` // ip
Country string `json:"country,omitempty"` // 国家
Province string `json:"province,omitempty"` // 省份
City string `json:"city,omitempty"` // 城市
Area string `json:"area,omitempty"` // 区域
Isp string `json:"isp,omitempty"` // 运营商
}
// Query ip地址查询对应归属地信息
func (c *Client) Query(ipAddress net.IP) (result QueryResult) {
result.Ip = ipAddress.String()
c.Offset = 0
tp := big.NewInt(0)
op := big.NewInt(0)
tp.SetBytes(ipAddress.To16())
op.SetString("18446744073709551616", 10)
op.Div(tp, op)
tp.SetString("FFFFFFFFFFFFFFFF", 16)
op.And(op, tp)
v6ip = op.Uint64()
offset = c.searchIndex(v6ip)
c.Offset = offset
country, area = c.getAddr()
// 解析地区数据
info := strings.Split(string(country), "\t")
if len(info) > 0 {
i := 1
for {
if i > len(info) {
break
}
switch i {
case 1:
result.Country = info[i-1]
result.Country = gostring.SpaceAndLineBreak(result.Country)
case 2:
result.Province = info[i-1]
result.Province = gostring.SpaceAndLineBreak(result.Province)
case 3:
result.City = info[i-1]
result.City = gostring.SpaceAndLineBreak(result.City)
case 4:
result.Area = info[i-1]
result.Area = gostring.SpaceAndLineBreak(result.Area)
}
i++ // 自增
}
} else {
result.Country = string(country)
result.Country = gostring.SpaceAndLineBreak(result.Country)
}
// 运营商
result.Isp = string(area)
// Delete ZX (防止不相关的信息产生干扰)
if result.Isp == "ZX" || result.Isp == "" {
result.Isp = ""
} else {
result.Isp = " " + result.Isp
}
result.Isp = gostring.SpaceAndLineBreak(result.Isp)
return result
}

35
vendor/go.dtapp.net/goip/is.go generated vendored

@ -0,0 +1,35 @@
package goip
import "strings"
var (
ipv4 = "IPV4"
ipv6 = "IPV6"
)
func (c *Client) isIpv4OrIpv6(ip string) string {
if len(ip) < 7 {
return ""
}
arrIpv4 := strings.Split(ip, ".")
if len(arrIpv4) == 4 {
//. 判断IPv4
for _, val := range arrIpv4 {
if !c.CheckIpv4(val) {
return ""
}
}
return ipv4
}
arrIpv6 := strings.Split(ip, ":")
if len(arrIpv6) == 8 {
// 判断Ipv6
for _, val := range arrIpv6 {
if !c.CheckIpv6(val) {
return "Neither"
}
}
return ipv6
}
return ""
}

@ -0,0 +1,139 @@
package qqwry
import (
_ "embed"
"encoding/binary"
"log"
)
var (
header []byte
country []byte
area []byte
offset uint32
start uint32
end uint32
)
//go:embed qqwry.dat
var datBuff []byte
type Client struct {
Offset uint32
ItemLen uint32
IndexLen uint32
}
func New() *Client {
c := &Client{}
buf := datBuff[0:8]
start := binary.LittleEndian.Uint32(buf[:4])
end := binary.LittleEndian.Uint32(buf[4:])
num := int64((end-start)/7 + 1)
log.Printf("qqwry.dat 共加载:%d 条ip记录\n", num)
return c
}
// ReadData 从文件中读取数据
func (c *Client) readData(length uint32) (rs []byte) {
end := c.Offset + length
dataNum := uint32(len(datBuff))
if c.Offset > dataNum {
return nil
}
if end > dataNum {
end = dataNum
}
rs = datBuff[c.Offset:end]
c.Offset = end
return rs
}
// 获取地址信息
func (c *Client) getAddr() ([]byte, []byte) {
mode := c.readData(1)[0]
if mode == 0x01 {
// [IP][0x01][国家和地区信息的绝对偏移地址]
c.Offset = byteToUInt32(c.readData(3))
return c.getAddr()
}
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
_offset := c.Offset - 1
c1 := c.readArea(_offset)
if mode == 0x02 {
c.Offset = 4 + _offset
} else {
c.Offset = _offset + uint32(1+len(c1))
}
c2 := c.readArea(c.Offset)
return c1, c2
}
// 读取区
func (c *Client) readArea(offset uint32) []byte {
c.Offset = offset
mode := c.readData(1)[0]
if mode == 0x01 || mode == 0x02 {
return c.readArea(byteToUInt32(c.readData(3)))
}
c.Offset = offset
return c.readString()
}
// 读取字符串
func (c *Client) readString() []byte {
data := make([]byte, 0)
for {
buf := c.readData(1)
if buf[0] == 0 {
break
}
data = append(data, buf[0])
}
return data
}
// 搜索索引
func (c *Client) searchIndex(ip uint32) uint32 {
c.ItemLen = 4
c.IndexLen = 7
header = datBuff[0:8]
start = binary.LittleEndian.Uint32(header[:4])
end = binary.LittleEndian.Uint32(header[4:])
buf := make([]byte, c.IndexLen)
for {
mid := start + c.IndexLen*(((end-start)/c.IndexLen)>>1)
buf = datBuff[mid : mid+c.IndexLen]
_ip := binary.LittleEndian.Uint32(buf[:c.ItemLen])
if end-start == c.IndexLen {
if ip >= binary.LittleEndian.Uint32(datBuff[end:end+c.ItemLen]) {
buf = datBuff[end : end+c.IndexLen]
}
return byteToUInt32(buf[c.ItemLen:])
}
if _ip > ip {
end = mid
} else if _ip < ip {
start = mid
} else if _ip == ip {
return byteToUInt32(buf[c.ItemLen:])
}
}
}
// 字节转UInt32
func byteToUInt32(data []byte) uint32 {
i := uint32(data[0]) & 0xff
i |= (uint32(data[1]) << 8) & 0xff00
i |= (uint32(data[2]) << 16) & 0xff0000
return i
}

@ -1,10 +1,11 @@
package v4
package qqwry
import (
"bytes"
"compress/zlib"
"encoding/binary"
"io/ioutil"
"log"
"net/http"
)
@ -25,34 +26,45 @@ func getKey() (uint32, error) {
}
}
// 在线获取内容
func getOnline() ([]byte, error) {
func OnlineDownload() {
resp, err := http.Get("https://update.cz88.net/ip/qqwry.rar")
if err != nil {
return nil, err
panic(err)
}
defer resp.Body.Close()
if body, err := ioutil.ReadAll(resp.Body); err != nil {
return nil, err
} else {
if key, err := getKey(); err != nil {
return nil, err
} else {
for i := 0; i < 0x200; i++ {
key = key * 0x805
key++
key = key & 0xff
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
key, err := getKey()
if err != nil {
panic(err)
}
for i := 0; i < 0x200; i++ {
key = key * 0x805
key++
key = key & 0xff
body[i] = byte(uint32(body[i]) ^ key)
}
body[i] = byte(uint32(body[i]) ^ key)
}
reader, err := zlib.NewReader(bytes.NewReader(body))
if err != nil {
panic(err)
}
reader, err := zlib.NewReader(bytes.NewReader(body))
if err != nil {
return nil, err
}
tmpData, err := ioutil.ReadAll(reader)
if err != nil {
panic(err)
}
return ioutil.ReadAll(reader)
}
err = ioutil.WriteFile("./qqwry.dat", tmpData, 0644)
if err != nil {
panic(err)
}
log.Printf("已下载最新 纯真 IPv4数据库 %s ", "./qqwry.dat")
}

@ -0,0 +1,53 @@
package qqwry
import (
"encoding/binary"
"errors"
"go.dtapp.net/gostring"
"golang.org/x/text/encoding/simplifiedchinese"
"net"
)
// QueryResult 返回
type QueryResult struct {
Ip string `json:"ip,omitempty"` // ip
Country string `json:"country,omitempty"` // 国家或地区
Area string `json:"area,omitempty"` // 区域
}
// Query ip地址查询对应归属地信息
func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
c.Offset = 0
// 偏移
offset = c.searchIndex(binary.BigEndian.Uint32(ipAddress.To4()))
if offset <= 0 {
return QueryResult{}, errors.New("搜索失败")
}
result.Ip = ipAddress.String()
c.Offset = offset + c.ItemLen
country, area = c.getAddr()
enc := simplifiedchinese.GBK.NewDecoder()
result.Country, _ = enc.String(string(country))
result.Country = gostring.SpaceAndLineBreak(result.Country)
result.Area, _ = enc.String(string(area))
// Delete CZ88.NET (防止不相关的信息产生干扰)
if result.Area == " CZ88.NET" || result.Area == "" {
result.Area = ""
} else {
result.Area = " " + result.Area
}
result.Area = gostring.SpaceAndLineBreak(result.Area)
return result, nil
}

87
vendor/go.dtapp.net/goip/query.go generated vendored

@ -0,0 +1,87 @@
package goip
import (
"errors"
"go.dtapp.net/goip/geoip"
"go.dtapp.net/goip/ip2region"
"go.dtapp.net/goip/ip2region_v2"
"go.dtapp.net/goip/ipv6wry"
"go.dtapp.net/goip/qqwry"
"net"
)
var (
QueryIncorrect = errors.New("ip地址不正确")
)
// QueryQqWry 纯真IP库
// https://www.cz88.net/
func (c *Client) QueryQqWry(ipAddress net.IP) (result qqwry.QueryResult, err error) {
if ipAddress.To4() == nil {
return result, QueryIncorrect
}
query, err := c.qqwryClient.Query(ipAddress)
if err != nil {
return qqwry.QueryResult{}, err
}
return query, err
}
// QueryIp2Region ip2region
// https://github.com/lionsoul2014/ip2region
func (c *Client) QueryIp2Region(ipAddress net.IP) (result ip2region.QueryResult, err error) {
if ipAddress.To4() == nil {
return result, QueryIncorrect
}
query, err := c.ip2regionClient.Query(ipAddress)
if err != nil {
return ip2region.QueryResult{}, err
}
return query, err
}
// QueryIp2RegionV2 ip2region
// https://github.com/lionsoul2014/ip2region
func (c *Client) QueryIp2RegionV2(ipAddress net.IP) (result ip2region_v2.QueryResult, err error) {
if ipAddress.To4() == nil {
return result, QueryIncorrect
}
query, err := c.ip2regionV2Client.Query(ipAddress)
if err != nil {
return ip2region_v2.QueryResult{}, err
}
return query, nil
}
// QueryGeoIp ip2region
// https://www.maxmind.com/
func (c *Client) QueryGeoIp(ipAddress net.IP) (result geoip.QueryCityResult, err error) {
if ipAddress.String() == "<nil>" {
return result, QueryIncorrect
}
query, err := c.geoIpClient.QueryCity(ipAddress)
if err != nil {
return geoip.QueryCityResult{}, err
}
return query, nil
}
// QueryIpv6wry ip2region
// https://ip.zxinc.org
func (c *Client) QueryIpv6wry(ipAddress net.IP) (result ipv6wry.QueryResult, err error) {
if ipAddress.To16() == nil {
return result, QueryIncorrect
}
query := c.ipv6wryClient.Query(ipAddress)
return query, nil
}

201
vendor/go.dtapp.net/goip/v4/ipv4.go generated vendored

@ -1,201 +0,0 @@
package v4
import (
_ "embed"
"encoding/binary"
"errors"
"go.dtapp.net/gostring"
"golang.org/x/text/encoding/simplifiedchinese"
"io/ioutil"
"log"
"net"
)
var (
header []byte
country []byte
area []byte
offset uint32
start uint32
end uint32
)
//go:embed ip.dat
var dat []byte
type Pointer struct {
Offset uint32
ItemLen uint32
IndexLen uint32
}
// Result 返回
type Result struct {
IP string `json:"ip,omitempty"` // 输入的ip地址
Country string `json:"country,omitempty"` // 国家或地区
Area string `json:"area,omitempty"` // 区域
}
// InitIPV4Data 加载
func (q *Pointer) InitIPV4Data() int64 {
buf := dat[0:8]
start := binary.LittleEndian.Uint32(buf[:4])
end := binary.LittleEndian.Uint32(buf[4:])
return int64((end-start)/7 + 1)
}
// ReadData 从文件中读取数据
func (q *Pointer) readData(length uint32) (rs []byte) {
end := q.Offset + length
dataNum := uint32(len(dat))
if q.Offset > dataNum {
return nil
}
if end > dataNum {
end = dataNum
}
rs = dat[q.Offset:end]
q.Offset = end
return rs
}
// Find ip地址查询对应归属地信息
func (q *Pointer) Find(ipStr string) (res Result) {
// 赋值
res.IP = ipStr
if net.ParseIP(ipStr).To4() == nil {
// 不是ip地址
return res
}
q.Offset = 0
// 偏移
offset = q.searchIndex(binary.BigEndian.Uint32(net.ParseIP(ipStr).To4()))
if offset <= 0 {
return
}
q.Offset = offset + q.ItemLen
country, area = q.getAddr()
enc := simplifiedchinese.GBK.NewDecoder()
res.Country, _ = enc.String(string(country))
res.Country = gostring.SpaceAndLineBreak(res.Country)
res.Area, _ = enc.String(string(area))
// Delete CZ88.NET (防止不相关的信息产生干扰)
if res.Area == " CZ88.NET" || res.Area == "" {
res.Area = ""
} else {
res.Area = " " + res.Area
}
res.Area = gostring.SpaceAndLineBreak(res.Area)
return
}
// 获取地址信息
func (q *Pointer) getAddr() ([]byte, []byte) {
mode := q.readData(1)[0]
if mode == 0x01 {
// [IP][0x01][国家和地区信息的绝对偏移地址]
q.Offset = byteToUInt32(q.readData(3))
return q.getAddr()
}
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
_offset := q.Offset - 1
c1 := q.readArea(_offset)
if mode == 0x02 {
q.Offset = 4 + _offset
} else {
q.Offset = _offset + uint32(1+len(c1))
}
c2 := q.readArea(q.Offset)
return c1, c2
}
// 读取区
func (q *Pointer) readArea(offset uint32) []byte {
q.Offset = offset
mode := q.readData(1)[0]
if mode == 0x01 || mode == 0x02 {
return q.readArea(byteToUInt32(q.readData(3)))
}
q.Offset = offset
return q.readString()
}
// 读取字符串
func (q *Pointer) readString() []byte {
data := make([]byte, 0)
for {
buf := q.readData(1)
if buf[0] == 0 {
break
}
data = append(data, buf[0])
}
return data
}
// 搜索索引
func (q *Pointer) searchIndex(ip uint32) uint32 {
q.ItemLen = 4
q.IndexLen = 7
header = dat[0:8]
start = binary.LittleEndian.Uint32(header[:4])
end = binary.LittleEndian.Uint32(header[4:])
buf := make([]byte, q.IndexLen)
for {
mid := start + q.IndexLen*(((end-start)/q.IndexLen)>>1)
buf = dat[mid : mid+q.IndexLen]
_ip := binary.LittleEndian.Uint32(buf[:q.ItemLen])
if end-start == q.IndexLen {
if ip >= binary.LittleEndian.Uint32(dat[end:end+q.ItemLen]) {
buf = dat[end : end+q.IndexLen]
}
return byteToUInt32(buf[q.ItemLen:])
}
if _ip > ip {
end = mid
} else if _ip < ip {
start = mid
} else if _ip == ip {
return byteToUInt32(buf[q.ItemLen:])
}
}
}
// 字节转UInt32
func byteToUInt32(data []byte) uint32 {
i := uint32(data[0]) & 0xff
i |= (uint32(data[1]) << 8) & 0xff00
i |= (uint32(data[2]) << 16) & 0xff0000
return i
}
// OnlineDownload 在线下载
func (q *Pointer) OnlineDownload() (err error) {
tmpData, err := getOnline()
if err != nil {
return errors.New("下载失败")
}
if err := ioutil.WriteFile("./qqwry.dat", tmpData, 0644); err == nil {
log.Printf("已下载最新 纯真 IPv4数据库 %s ", "./qqwry.dat")
} else {
return errors.New("保存失败")
}
return nil
}

230
vendor/go.dtapp.net/goip/v6/ipv6.go generated vendored

@ -1,230 +0,0 @@
package v6
import (
_ "embed"
"encoding/binary"
"errors"
"go.dtapp.net/gostring"
"io/ioutil"
"log"
"math/big"
"net"
"strings"
)
var (
header []byte
country []byte
area []byte
v6ip uint64
offset uint32
start uint32
end uint32
)
type Result struct {
IP string `json:"ip,omitempty"` // 输入的ip地址
Country string `json:"country,omitempty"` // 国家
Province string `json:"province,omitempty"` // 省份
City string `json:"city,omitempty"` // 城市
Area string `json:"area,omitempty"` // 区域
Isp string `json:"isp,omitempty"` // 运营商
}
//go:embed ip.db
var dat []byte
type Pointer struct {
Offset uint32
ItemLen uint32
IndexLen uint32
}
// InitIPV4Data 加载
func (q *Pointer) InitIPV4Data() int64 {
buf := dat[0:8]
start := binary.LittleEndian.Uint32(buf[:4])
end := binary.LittleEndian.Uint32(buf[4:])
return int64((end-start)/7 + 1)
}
// ReadData 从文件中读取数据
func (q *Pointer) readData(length uint32) (rs []byte) {
end := q.Offset + length
dataNum := uint32(len(dat))
if q.Offset > dataNum {
return nil
}
if end > dataNum {
end = dataNum
}
rs = dat[q.Offset:end]
q.Offset = end
return rs
}
// Find ip地址查询对应归属地信息
func (q *Pointer) Find(ipStr string) (res Result) {
res = Result{}
res.IP = ipStr
if net.ParseIP(ipStr).To16() == nil {
return Result{}
}
q.Offset = 0
tp := big.NewInt(0)
op := big.NewInt(0)
tp.SetBytes(net.ParseIP(ipStr).To16())
op.SetString("18446744073709551616", 10)
op.Div(tp, op)
tp.SetString("FFFFFFFFFFFFFFFF", 16)
op.And(op, tp)
v6ip = op.Uint64()
offset = q.searchIndex(v6ip)
q.Offset = offset
country, area = q.getAddr()
// 解析地区数据
info := strings.Split(string(country), "\t")
if len(info) > 0 {
i := 1
for {
if i > len(info) {
break
}
switch i {
case 1:
res.Country = info[i-1]
res.Country = gostring.SpaceAndLineBreak(res.Country)
case 2:
res.Province = info[i-1]
res.Province = gostring.SpaceAndLineBreak(res.Province)
case 3:
res.City = info[i-1]
res.City = gostring.SpaceAndLineBreak(res.City)
case 4:
res.Area = info[i-1]
res.Area = gostring.SpaceAndLineBreak(res.Area)
}
i++ // 自增
}
} else {
res.Country = string(country)
res.Country = gostring.SpaceAndLineBreak(res.Country)
}
// 运营商
res.Isp = string(area)
// Delete ZX (防止不相关的信息产生干扰)
if res.Isp == "ZX" || res.Isp == "" {
res.Isp = ""
} else {
res.Isp = " " + res.Isp
}
res.Isp = gostring.SpaceAndLineBreak(res.Isp)
return
}
func (q *Pointer) getAddr() ([]byte, []byte) {
mode := q.readData(1)[0]
if mode == 0x01 {
// [IP][0x01][国家和地区信息的绝对偏移地址]
q.Offset = byteToUInt32(q.readData(3))
return q.getAddr()
}
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
_offset := q.Offset - 1
c1 := q.readArea(_offset)
if mode == 0x02 {
q.Offset = 4 + _offset
} else {
q.Offset = _offset + uint32(1+len(c1))
}
c2 := q.readArea(q.Offset)
return c1, c2
}
func (q *Pointer) readArea(offset uint32) []byte {
q.Offset = offset
mode := q.readData(1)[0]
if mode == 0x01 || mode == 0x02 {
return q.readArea(byteToUInt32(q.readData(3)))
}
q.Offset = offset
return q.readString()
}
func (q *Pointer) readString() []byte {
data := make([]byte, 0)
for {
buf := q.readData(1)
if buf[0] == 0 {
break
}
data = append(data, buf[0])
}
return data
}
func (q *Pointer) searchIndex(ip uint64) uint32 {
q.ItemLen = 8
q.IndexLen = 11
header = dat[8:24]
start = binary.LittleEndian.Uint32(header[8:])
counts := binary.LittleEndian.Uint32(header[:8])
end = start + counts*q.IndexLen
buf := make([]byte, q.IndexLen)
for {
mid := start + q.IndexLen*(((end-start)/q.IndexLen)>>1)
buf = dat[mid : mid+q.IndexLen]
_ip := binary.LittleEndian.Uint64(buf[:q.ItemLen])
if end-start == q.IndexLen {
if ip >= binary.LittleEndian.Uint64(dat[end:end+q.ItemLen]) {
buf = dat[end : end+q.IndexLen]
}
return byteToUInt32(buf[q.ItemLen:])
}
if _ip > ip {
end = mid
} else if _ip < ip {
start = mid
} else if _ip == ip {
return byteToUInt32(buf[q.ItemLen:])
}
}
}
func byteToUInt32(data []byte) uint32 {
i := uint32(data[0]) & 0xff
i |= (uint32(data[1]) << 8) & 0xff00
i |= (uint32(data[2]) << 16) & 0xff0000
return i
}
// OnlineDownload 在线下载
func (q *Pointer) OnlineDownload() (err error) {
tmpData, err := getOnline()
if err != nil {
return errors.New("下载失败")
}
if err := ioutil.WriteFile("./ipv6wry.db", tmpData, 0644); err == nil {
log.Printf("已下载最新 ZX IPv6数据库 %s ", "./ipv6wry.db")
} else {
return errors.New("保存失败")
}
return nil
}

@ -214,11 +214,6 @@ esac
if [ "$GOOSARCH" == "aix_ppc64" ]; then
# aix/ppc64 script generates files instead of writing to stdin.
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
elif [ "$GOOS" == "darwin" ]; then
# 1.12 and later, syscalls via libSystem
echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
# 1.13 and later, syscalls via libSystem (including syscallPtr)
echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
elif [ "$GOOS" == "illumos" ]; then
# illumos code generation requires a --illumos switch
echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";

@ -1,32 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && go1.12 && !go1.13
// +build darwin,go1.12,!go1.13
package unix
import (
"unsafe"
)
const _SYS_GETDIRENTRIES64 = 344
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// To implement this using libSystem we'd need syscall_syscallPtr for
// fdopendir. However, syscallPtr was only added in Go 1.13, so we fall
// back to raw syscalls for this func on Go 1.12.
var p unsafe.Pointer
if len(buf) > 0 {
p = unsafe.Pointer(&buf[0])
} else {
p = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
n = int(r0)
if e1 != 0 {
return n, errnoErr(e1)
}
return n, nil
}

@ -1,100 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build darwin && go1.13
// +build darwin,go1.13
package unix
import "unsafe"
//sys closedir(dir uintptr) (err error)
//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
func fdopendir(fd int) (dir uintptr, err error) {
r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
dir = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fdopendir_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// Simulate Getdirentries using fdopendir/readdir_r/closedir.
// We store the number of entries to skip in the seek
// offset of fd. See issue #31368.
// It's not the full required semantics, but should handle the case
// of calling Getdirentries or ReadDirent repeatedly.
// It won't handle assigning the results of lseek to *basep, or handle
// the directory being edited underfoot.
skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
if err != nil {
return 0, err
}
// We need to duplicate the incoming file descriptor
// because the caller expects to retain control of it, but
// fdopendir expects to take control of its argument.
// Just Dup'ing the file descriptor is not enough, as the
// result shares underlying state. Use Openat to make a really
// new file descriptor referring to the same directory.
fd2, err := Openat(fd, ".", O_RDONLY, 0)
if err != nil {
return 0, err
}
d, err := fdopendir(fd2)
if err != nil {
Close(fd2)
return 0, err
}
defer closedir(d)
var cnt int64
for {
var entry Dirent
var entryp *Dirent
e := readdir_r(d, &entry, &entryp)
if e != 0 {
return n, errnoErr(e)
}
if entryp == nil {
break
}
if skip > 0 {
skip--
cnt++
continue
}
reclen := int(entry.Reclen)
if reclen > len(buf) {
// Not enough room. Return for now.
// The counter will let us know where we should start up again.
// Note: this strategy for suspending in the middle and
// restarting is O(n^2) in the length of the directory. Oh well.
break
}
// Copy entry into return buffer.
s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
copy(buf, s)
buf = buf[reclen:]
n += reclen
cnt++
}
// Set the seek offset of the input fd to record
// how many files we've already returned.
_, err = Seek(fd, cnt, 0 /* SEEK_SET */)
if err != nil {
return n, err
}
return n, nil
}

@ -19,6 +19,96 @@ import (
"unsafe"
)
//sys closedir(dir uintptr) (err error)
//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
func fdopendir(fd int) (dir uintptr, err error) {
r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
dir = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_fdopendir_trampoline_addr uintptr
//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
// Simulate Getdirentries using fdopendir/readdir_r/closedir.
// We store the number of entries to skip in the seek
// offset of fd. See issue #31368.
// It's not the full required semantics, but should handle the case
// of calling Getdirentries or ReadDirent repeatedly.
// It won't handle assigning the results of lseek to *basep, or handle
// the directory being edited underfoot.
skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
if err != nil {
return 0, err
}
// We need to duplicate the incoming file descriptor
// because the caller expects to retain control of it, but
// fdopendir expects to take control of its argument.
// Just Dup'ing the file descriptor is not enough, as the
// result shares underlying state. Use Openat to make a really
// new file descriptor referring to the same directory.
fd2, err := Openat(fd, ".", O_RDONLY, 0)
if err != nil {
return 0, err
}
d, err := fdopendir(fd2)
if err != nil {
Close(fd2)
return 0, err
}
defer closedir(d)
var cnt int64
for {
var entry Dirent
var entryp *Dirent
e := readdir_r(d, &entry, &entryp)
if e != 0 {
return n, errnoErr(e)
}
if entryp == nil {
break
}
if skip > 0 {
skip--
cnt++
continue
}
reclen := int(entry.Reclen)
if reclen > len(buf) {
// Not enough room. Return for now.
// The counter will let us know where we should start up again.
// Note: this strategy for suspending in the middle and
// restarting is O(n^2) in the length of the directory. Oh well.
break
}
// Copy entry into return buffer.
s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
copy(buf, s)
buf = buf[reclen:]
n += reclen
cnt++
}
// Set the seek offset of the input fd to record
// how many files we've already returned.
_, err = Seek(fd, cnt, 0 /* SEEK_SET */)
if err != nil {
return n, err
}
return n, nil
}
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8

@ -1,40 +0,0 @@
// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build darwin && amd64 && go1.13
// +build darwin,amd64,go1.13
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func closedir(dir uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_closedir_trampoline_addr uintptr
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
res = Errno(r0)
return
}
var libc_readdir_r_trampoline_addr uintptr
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"

@ -1,25 +0,0 @@
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.13
// +build go1.13
#include "textflag.h"
TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fdopendir(SB)
GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_closedir(SB)
GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readdir_r(SB)
GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)

@ -1,8 +1,8 @@
// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build darwin && amd64 && go1.12
// +build darwin,amd64,go1.12
//go:build darwin && amd64
// +build darwin,amd64
package unix
@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func closedir(dir uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_closedir_trampoline_addr uintptr
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
res = Errno(r0)
return
}
var libc_readdir_r_trampoline_addr uintptr
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]int32) (err error) {
_, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {

@ -1,11 +1,14 @@
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.12
// +build go1.12
#include "textflag.h"
TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fdopendir(SB)
GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_closedir(SB)
GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readdir_r(SB)
GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)

@ -1,40 +0,0 @@
// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build darwin && arm64 && go1.13
// +build darwin,arm64,go1.13
package unix
import (
"syscall"
"unsafe"
)
var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func closedir(dir uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_closedir_trampoline_addr uintptr
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
res = Errno(r0)
return
}
var libc_readdir_r_trampoline_addr uintptr
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"

@ -1,25 +0,0 @@
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.13
// +build go1.13
#include "textflag.h"
TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fdopendir(SB)
GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_closedir(SB)
GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readdir_r(SB)
GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)

@ -1,8 +1,8 @@
// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go
// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build darwin && arm64 && go1.12
// +build darwin,arm64,go1.12
//go:build darwin && arm64
// +build darwin,arm64
package unix
@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func closedir(dir uintptr) (err error) {
_, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_closedir_trampoline_addr uintptr
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
res = Errno(r0)
return
}
var libc_readdir_r_trampoline_addr uintptr
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]int32) (err error) {
_, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {

@ -1,11 +1,14 @@
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.12
// +build go1.12
#include "textflag.h"
TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fdopendir(SB)
GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_closedir(SB)
GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readdir_r(SB)
GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)

3
vendor/gorm.io/gorm/.gitignore generated vendored

@ -3,4 +3,5 @@ documents
coverage.txt
_book
.idea
vendor
vendor
.vscode

@ -507,7 +507,9 @@ func (association *Association) buildCondition() *DB {
joinStmt.AddClause(queryClause)
}
joinStmt.Build("WHERE")
tx.Clauses(clause.Expr{SQL: strings.Replace(joinStmt.SQL.String(), "WHERE ", "", 1), Vars: joinStmt.Vars})
if len(joinStmt.SQL.String()) > 0 {
tx.Clauses(clause.Expr{SQL: strings.Replace(joinStmt.SQL.String(), "WHERE ", "", 1), Vars: joinStmt.Vars})
}
}
tx = tx.Session(&Session{QueryFields: true}).Clauses(clause.From{Joins: []clause.Join{{

@ -206,7 +206,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
}
}
cacheKey := utils.ToStringKey(relPrimaryValues)
cacheKey := utils.ToStringKey(relPrimaryValues...)
if len(relPrimaryValues) != len(rel.FieldSchema.PrimaryFields) || !identityMap[cacheKey] {
identityMap[cacheKey] = true
if isPtr {
@ -292,7 +292,7 @@ func SaveAfterAssociations(create bool) func(db *gorm.DB) {
}
}
cacheKey := utils.ToStringKey(relPrimaryValues)
cacheKey := utils.ToStringKey(relPrimaryValues...)
if len(relPrimaryValues) != len(rel.FieldSchema.PrimaryFields) || !identityMap[cacheKey] {
identityMap[cacheKey] = true
distinctElems = reflect.Append(distinctElems, elem)

@ -70,10 +70,12 @@ func Update(config *Config) func(db *gorm.DB) {
if db.Statement.SQL.Len() == 0 {
db.Statement.SQL.Grow(180)
db.Statement.AddClauseIfNotExists(clause.Update{})
if set := ConvertToAssignments(db.Statement); len(set) != 0 {
db.Statement.AddClause(set)
} else if _, ok := db.Statement.Clauses["SET"]; !ok {
return
if _, ok := db.Statement.Clauses["SET"]; !ok {
if set := ConvertToAssignments(db.Statement); len(set) != 0 {
db.Statement.AddClause(set)
} else {
return
}
}
db.Statement.Build(db.Statement.BuildClauses...)
@ -158,21 +160,21 @@ func ConvertToAssignments(stmt *gorm.Statement) (set clause.Set) {
switch stmt.ReflectValue.Kind() {
case reflect.Slice, reflect.Array:
if size := stmt.ReflectValue.Len(); size > 0 {
var primaryKeyExprs []clause.Expression
var isZero bool
for i := 0; i < size; i++ {
exprs := make([]clause.Expression, len(stmt.Schema.PrimaryFields))
var notZero bool
for idx, field := range stmt.Schema.PrimaryFields {
value, isZero := field.ValueOf(stmt.Context, stmt.ReflectValue.Index(i))
exprs[idx] = clause.Eq{Column: field.DBName, Value: value}
notZero = notZero || !isZero
}
if notZero {
primaryKeyExprs = append(primaryKeyExprs, clause.And(exprs...))
for _, field := range stmt.Schema.PrimaryFields {
_, isZero = field.ValueOf(stmt.Context, stmt.ReflectValue.Index(i))
if !isZero {
break
}
}
}
stmt.AddClause(clause.Where{Exprs: []clause.Expression{clause.And(clause.Or(primaryKeyExprs...))}})
if !isZero {
_, primaryValues := schema.GetIdentityFieldValuesMap(stmt.Context, stmt.ReflectValue, stmt.Schema.PrimaryFields)
column, values := schema.ToQueryValues("", stmt.Schema.PrimaryFieldDBNames, primaryValues)
stmt.AddClause(clause.Where{Exprs: []clause.Expression{clause.IN{Column: column, Values: values}}})
}
}
case reflect.Struct:
for _, field := range stmt.Schema.PrimaryFields {

@ -13,7 +13,7 @@ import (
"gorm.io/gorm/utils"
)
// Create insert the value into database
// Create inserts value, returning the inserted data's primary key in value's id
func (db *DB) Create(value interface{}) (tx *DB) {
if db.CreateBatchSize > 0 {
return db.CreateInBatches(value, db.CreateBatchSize)
@ -24,7 +24,7 @@ func (db *DB) Create(value interface{}) (tx *DB) {
return tx.callbacks.Create().Execute(tx)
}
// CreateInBatches insert the value in batches into database
// CreateInBatches inserts value in batches of batchSize
func (db *DB) CreateInBatches(value interface{}, batchSize int) (tx *DB) {
reflectValue := reflect.Indirect(reflect.ValueOf(value))
@ -68,7 +68,7 @@ func (db *DB) CreateInBatches(value interface{}, batchSize int) (tx *DB) {
return
}
// Save update value in database, if the value doesn't have primary key, will insert it
// Save updates value in database. If value doesn't contain a matching primary key, value is inserted.
func (db *DB) Save(value interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.Dest = value
@ -114,7 +114,7 @@ func (db *DB) Save(value interface{}) (tx *DB) {
return
}
// First find first record that match given conditions, order by primary key
// First finds the first record ordered by primary key, matching given conditions conds
func (db *DB) First(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.Limit(1).Order(clause.OrderByColumn{
Column: clause.Column{Table: clause.CurrentTable, Name: clause.PrimaryKey},
@ -129,7 +129,7 @@ func (db *DB) First(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
// Take return a record that match given conditions, the order will depend on the database implementation
// Take finds the first record returned by the database in no specified order, matching given conditions conds
func (db *DB) Take(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.Limit(1)
if len(conds) > 0 {
@ -142,7 +142,7 @@ func (db *DB) Take(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
// Last find last record that match given conditions, order by primary key
// Last finds the last record ordered by primary key, matching given conditions conds
func (db *DB) Last(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.Limit(1).Order(clause.OrderByColumn{
Column: clause.Column{Table: clause.CurrentTable, Name: clause.PrimaryKey},
@ -158,7 +158,7 @@ func (db *DB) Last(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
// Find find records that match given conditions
// Find finds all records matching given conditions conds
func (db *DB) Find(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.getInstance()
if len(conds) > 0 {
@ -170,7 +170,7 @@ func (db *DB) Find(dest interface{}, conds ...interface{}) (tx *DB) {
return tx.callbacks.Query().Execute(tx)
}
// FindInBatches find records in batches
// FindInBatches finds all records in batches of batchSize
func (db *DB) FindInBatches(dest interface{}, batchSize int, fc func(tx *DB, batch int) error) *DB {
var (
tx = db.Order(clause.OrderByColumn{
@ -202,7 +202,9 @@ func (db *DB) FindInBatches(dest interface{}, batchSize int, fc func(tx *DB, bat
batch++
if result.Error == nil && result.RowsAffected != 0 {
tx.AddError(fc(result, batch))
fcTx := result.Session(&Session{NewDB: true})
fcTx.RowsAffected = result.RowsAffected
tx.AddError(fc(fcTx, batch))
} else if result.Error != nil {
tx.AddError(result.Error)
}
@ -284,7 +286,8 @@ func (db *DB) assignInterfacesToValue(values ...interface{}) {
}
}
// FirstOrInit gets the first matched record or initialize a new instance with given conditions (only works with struct or map conditions)
// FirstOrInit finds the first matching record, otherwise if not found initializes a new instance with given conds.
// Each conds must be a struct or map.
func (db *DB) FirstOrInit(dest interface{}, conds ...interface{}) (tx *DB) {
queryTx := db.Limit(1).Order(clause.OrderByColumn{
Column: clause.Column{Table: clause.CurrentTable, Name: clause.PrimaryKey},
@ -310,7 +313,8 @@ func (db *DB) FirstOrInit(dest interface{}, conds ...interface{}) (tx *DB) {
return
}
// FirstOrCreate gets the first matched record or create a new one with given conditions (only works with struct, map conditions)
// FirstOrCreate finds the first matching record, otherwise if not found creates a new instance with given conds.
// Each conds must be a struct or map.
func (db *DB) FirstOrCreate(dest interface{}, conds ...interface{}) (tx *DB) {
tx = db.getInstance()
queryTx := db.Session(&Session{}).Limit(1).Order(clause.OrderByColumn{
@ -358,14 +362,14 @@ func (db *DB) FirstOrCreate(dest interface{}, conds ...interface{}) (tx *DB) {
return tx
}
// Update update attributes with callbacks, refer: https://gorm.io/docs/update.html#Update-Changed-Fields
// Update updates column with value using callbacks. Reference: https://gorm.io/docs/update.html#Update-Changed-Fields
func (db *DB) Update(column string, value interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.Dest = map[string]interface{}{column: value}
return tx.callbacks.Update().Execute(tx)
}
// Updates update attributes with callbacks, refer: https://gorm.io/docs/update.html#Update-Changed-Fields
// Updates updates attributes using callbacks. values must be a struct or map. Reference: https://gorm.io/docs/update.html#Update-Changed-Fields
func (db *DB) Updates(values interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.Dest = values
@ -386,7 +390,9 @@ func (db *DB) UpdateColumns(values interface{}) (tx *DB) {
return tx.callbacks.Update().Execute(tx)
}
// Delete delete value match given conditions, if the value has primary key, then will including the primary key as condition
// Delete deletes value matching given conditions. If value contains primary key it is included in the conditions. If
// value includes a deleted_at field, then Delete performs a soft delete instead by setting deleted_at with the current
// time if null.
func (db *DB) Delete(value interface{}, conds ...interface{}) (tx *DB) {
tx = db.getInstance()
if len(conds) > 0 {
@ -480,7 +486,7 @@ func (db *DB) Rows() (*sql.Rows, error) {
return rows, tx.Error
}
// Scan scan value to a struct
// Scan scans selected value to the struct dest
func (db *DB) Scan(dest interface{}) (tx *DB) {
config := *db.Config
currentLogger, newLogger := config.Logger, logger.Recorder.New()
@ -505,7 +511,7 @@ func (db *DB) Scan(dest interface{}) (tx *DB) {
return
}
// Pluck used to query single column from a model as a map
// Pluck queries a single column from a model, returning in the slice dest. E.g.:
// var ages []int64
// db.Model(&users).Pluck("age", &ages)
func (db *DB) Pluck(column string, dest interface{}) (tx *DB) {
@ -548,7 +554,8 @@ func (db *DB) ScanRows(rows *sql.Rows, dest interface{}) error {
return tx.Error
}
// Connection use a db conn to execute Multiple commands,this conn will put conn pool after it is executed.
// Connection uses a db connection to execute an arbitrary number of commands in fc. When finished, the connection is
// returned to the connection pool.
func (db *DB) Connection(fc func(tx *DB) error) (err error) {
if db.Error != nil {
return db.Error
@ -570,7 +577,9 @@ func (db *DB) Connection(fc func(tx *DB) error) (err error) {
return fc(tx)
}
// Transaction start a transaction as a block, return error will rollback, otherwise to commit.
// Transaction start a transaction as a block, return error will rollback, otherwise to commit. Transaction executes an
// arbitrary number of commands in fc within a transaction. On success the changes are committed; if an error occurs
// they are rolled back.
func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err error) {
panicked := true
@ -613,7 +622,7 @@ func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err er
return
}
// Begin begins a transaction
// Begin begins a transaction with any transaction options opts
func (db *DB) Begin(opts ...*sql.TxOptions) *DB {
var (
// clone statement
@ -642,7 +651,7 @@ func (db *DB) Begin(opts ...*sql.TxOptions) *DB {
return tx
}
// Commit commit a transaction
// Commit commits the changes in a transaction
func (db *DB) Commit() *DB {
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil && !reflect.ValueOf(committer).IsNil() {
db.AddError(committer.Commit())
@ -652,7 +661,7 @@ func (db *DB) Commit() *DB {
return db
}
// Rollback rollback a transaction
// Rollback rollbacks the changes in a transaction
func (db *DB) Rollback() *DB {
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil {
if !reflect.ValueOf(committer).IsNil() {
@ -682,7 +691,7 @@ func (db *DB) RollbackTo(name string) *DB {
return db
}
// Exec execute raw sql
// Exec executes raw sql
func (db *DB) Exec(sql string, values ...interface{}) (tx *DB) {
tx = db.getInstance()
tx.Statement.SQL = strings.Builder{}

5
vendor/gorm.io/gorm/gorm.go generated vendored

@ -300,7 +300,8 @@ func (db *DB) WithContext(ctx context.Context) *DB {
// Debug start debug mode
func (db *DB) Debug() (tx *DB) {
return db.Session(&Session{
tx = db.getInstance()
return tx.Session(&Session{
Logger: db.Logger.LogMode(logger.Info),
})
}
@ -412,7 +413,7 @@ func (db *DB) SetupJoinTable(model interface{}, field string, joinTable interfac
relation, ok := modelSchema.Relationships.Relations[field]
isRelation := ok && relation.JoinTable != nil
if !isRelation {
return fmt.Errorf("failed to found relation: %s", field)
return fmt.Errorf("failed to find relation: %s", field)
}
for _, ref := range relation.References {

@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"log"
"os"
"time"
@ -68,8 +68,8 @@ type Interface interface {
}
var (
// Discard Discard logger will print any log to ioutil.Discard
Discard = New(log.New(ioutil.Discard, "", log.LstdFlags), Config{})
// Discard Discard logger will print any log to io.Discard
Discard = New(log.New(io.Discard, "", log.LstdFlags), Config{})
// Default Default logger
Default = New(log.New(os.Stdout, "\r\n", log.LstdFlags), Config{
SlowThreshold: 200 * time.Millisecond,

17
vendor/gorm.io/gorm/logger/sql.go generated vendored

@ -30,6 +30,8 @@ func isPrintable(s string) bool {
var convertibleTypes = []reflect.Type{reflect.TypeOf(time.Time{}), reflect.TypeOf(false), reflect.TypeOf([]byte{})}
var numericPlaceholderRe = regexp.MustCompile(`\$\d+\$`)
// ExplainSQL generate SQL string with given parameters, the generated SQL is expected to be used in logger, execute it might introduce a SQL injection vulnerability
func ExplainSQL(sql string, numericPlaceholder *regexp.Regexp, escaper string, avars ...interface{}) string {
var (
@ -138,9 +140,18 @@ func ExplainSQL(sql string, numericPlaceholder *regexp.Regexp, escaper string, a
sql = newSQL.String()
} else {
sql = numericPlaceholder.ReplaceAllString(sql, "$$$1$$")
for idx, v := range vars {
sql = strings.Replace(sql, "$"+strconv.Itoa(idx+1)+"$", v, 1)
}
sql = numericPlaceholderRe.ReplaceAllStringFunc(sql, func(v string) string {
num := v[1 : len(v)-1]
n, _ := strconv.Atoi(num)
// position var start from 1 ($1, $2)
n -= 1
if n >= 0 && n <= len(vars)-1 {
return vars[n]
}
return v
})
}
return sql

@ -15,7 +15,7 @@ import (
)
var (
regFullDataType = regexp.MustCompile(`[^\d]*(\d+)[^\d]?`)
regFullDataType = regexp.MustCompile(`\D*(\d+)\D?`)
)
// Migrator m struct
@ -135,12 +135,12 @@ func (m Migrator) AutoMigrate(values ...interface{}) error {
}
}
}
}
for _, chk := range stmt.Schema.ParseCheckConstraints() {
if !tx.Migrator().HasConstraint(value, chk.Name) {
if err := tx.Migrator().CreateConstraint(value, chk.Name); err != nil {
return err
}
for _, chk := range stmt.Schema.ParseCheckConstraints() {
if !tx.Migrator().HasConstraint(value, chk.Name) {
if err := tx.Migrator().CreateConstraint(value, chk.Name); err != nil {
return err
}
}
}

38
vendor/gorm.io/gorm/scan.go generated vendored

@ -66,30 +66,32 @@ func (db *DB) scanIntoStruct(rows Rows, reflectValue reflect.Value, values []int
db.RowsAffected++
db.AddError(rows.Scan(values...))
joinedSchemaMap := make(map[*schema.Field]interface{}, 0)
joinedSchemaMap := make(map[*schema.Field]interface{})
for idx, field := range fields {
if field != nil {
if len(joinFields) == 0 || joinFields[idx][0] == nil {
db.AddError(field.Set(db.Statement.Context, reflectValue, values[idx]))
} else {
joinSchema := joinFields[idx][0]
relValue := joinSchema.ReflectValueOf(db.Statement.Context, reflectValue)
if relValue.Kind() == reflect.Ptr {
if _, ok := joinedSchemaMap[joinSchema]; !ok {
if value := reflect.ValueOf(values[idx]).Elem(); value.Kind() == reflect.Ptr && value.IsNil() {
continue
}
if field == nil {
continue
}
relValue.Set(reflect.New(relValue.Type().Elem()))
joinedSchemaMap[joinSchema] = nil
if len(joinFields) == 0 || joinFields[idx][0] == nil {
db.AddError(field.Set(db.Statement.Context, reflectValue, values[idx]))
} else {
joinSchema := joinFields[idx][0]
relValue := joinSchema.ReflectValueOf(db.Statement.Context, reflectValue)
if relValue.Kind() == reflect.Ptr {
if _, ok := joinedSchemaMap[joinSchema]; !ok {
if value := reflect.ValueOf(values[idx]).Elem(); value.Kind() == reflect.Ptr && value.IsNil() {
continue
}
relValue.Set(reflect.New(relValue.Type().Elem()))
joinedSchemaMap[joinSchema] = nil
}
db.AddError(joinFields[idx][1].Set(db.Statement.Context, relValue, values[idx]))
}
// release data to pool
field.NewValuePool.Put(values[idx])
db.AddError(joinFields[idx][1].Set(db.Statement.Context, relValue, values[idx]))
}
// release data to pool
field.NewValuePool.Put(values[idx])
}
}

@ -403,18 +403,14 @@ func (schema *Schema) ParseField(fieldStruct reflect.StructField) *Field {
}
if ef.PrimaryKey {
if val, ok := ef.TagSettings["PRIMARYKEY"]; ok && utils.CheckTruth(val) {
ef.PrimaryKey = true
} else if val, ok := ef.TagSettings["PRIMARY_KEY"]; ok && utils.CheckTruth(val) {
ef.PrimaryKey = true
} else {
if !utils.CheckTruth(ef.TagSettings["PRIMARYKEY"], ef.TagSettings["PRIMARY_KEY"]) {
ef.PrimaryKey = false
if val, ok := ef.TagSettings["AUTOINCREMENT"]; !ok || !utils.CheckTruth(val) {
ef.AutoIncrement = false
}
if ef.DefaultValue == "" {
if !ef.AutoIncrement && ef.DefaultValue == "" {
ef.HasDefaultValue = false
}
}
@ -472,9 +468,6 @@ func (field *Field) setupValuerAndSetter() {
oldValuerOf := field.ValueOf
field.ValueOf = func(ctx context.Context, v reflect.Value) (interface{}, bool) {
value, zero := oldValuerOf(ctx, v)
if zero {
return value, zero
}
s, ok := value.(SerializerValuerInterface)
if !ok {
@ -487,7 +480,7 @@ func (field *Field) setupValuerAndSetter() {
Destination: v,
Context: ctx,
fieldValue: value,
}, false
}, zero
}
}

@ -112,7 +112,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
schemaCacheKey = modelType
}
// Load exist schmema cache, return if exists
// Load exist schema cache, return if exists
if v, ok := cacheStore.Load(schemaCacheKey); ok {
s := v.(*Schema)
// Wait for the initialization of other goroutines to complete
@ -146,7 +146,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
// When the schema initialization is completed, the channel will be closed
defer close(schema.initialized)
// Load exist schmema cache, return if exists
// Load exist schema cache, return if exists
if v, ok := cacheStore.Load(schemaCacheKey); ok {
s := v.(*Schema)
// Wait for the initialization of other goroutines to complete

@ -88,7 +88,9 @@ func (JSONSerializer) Scan(ctx context.Context, field *Field, dst reflect.Value,
return fmt.Errorf("failed to unmarshal JSONB value: %#v", dbValue)
}
err = json.Unmarshal(bytes, fieldValue.Interface())
if len(bytes) > 0 {
err = json.Unmarshal(bytes, fieldValue.Interface())
}
}
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
@ -117,9 +119,15 @@ func (UnixSecondSerializer) Scan(ctx context.Context, field *Field, dst reflect.
// Value implements serializer interface
func (UnixSecondSerializer) Value(ctx context.Context, field *Field, dst reflect.Value, fieldValue interface{}) (result interface{}, err error) {
rv := reflect.ValueOf(fieldValue)
switch v := fieldValue.(type) {
case int64, int, uint, uint64, int32, uint32, int16, uint16, *int64, *int, *uint, *uint64, *int32, *uint32, *int16, *uint16:
result = time.Unix(reflect.Indirect(reflect.ValueOf(v)).Int(), 0)
case int64, int, uint, uint64, int32, uint32, int16, uint16:
result = time.Unix(reflect.Indirect(rv).Int(), 0)
case *int64, *int, *uint, *uint64, *int32, *uint32, *int16, *uint16:
if rv.IsZero() {
return nil, nil
}
result = time.Unix(reflect.Indirect(rv).Int(), 0)
default:
err = fmt.Errorf("invalid field type %#v for UnixSecondSerializer, only int, uint supported", v)
}
@ -142,8 +150,10 @@ func (GobSerializer) Scan(ctx context.Context, field *Field, dst reflect.Value,
default:
return fmt.Errorf("failed to unmarshal gob value: %#v", dbValue)
}
decoder := gob.NewDecoder(bytes.NewBuffer(bytesValue))
err = decoder.Decode(fieldValue.Interface())
if len(bytesValue) > 0 {
decoder := gob.NewDecoder(bytes.NewBuffer(bytesValue))
err = decoder.Decode(fieldValue.Interface())
}
}
field.ReflectValueOf(ctx, dst).Set(fieldValue.Elem())
return

6
vendor/gorm.io/gorm/statement.go generated vendored

@ -650,7 +650,7 @@ func (stmt *Statement) Changed(fields ...string) bool {
return false
}
var nameMatcher = regexp.MustCompile(`^[\W]?(?:[a-z_0-9]+?)[\W]?\.[\W]?([a-z_0-9]+?)[\W]?$`)
var nameMatcher = regexp.MustCompile(`^(?:\W?(\w+?)\W?\.)?\W?(\w+?)\W?$`)
// SelectAndOmitColumns get select and omit columns, select -> true, omit -> false
func (stmt *Statement) SelectAndOmitColumns(requireCreate, requireUpdate bool) (map[string]bool, bool) {
@ -672,8 +672,8 @@ func (stmt *Statement) SelectAndOmitColumns(requireCreate, requireUpdate bool) (
}
} else if field := stmt.Schema.LookUpField(column); field != nil && field.DBName != "" {
results[field.DBName] = true
} else if matches := nameMatcher.FindStringSubmatch(column); len(matches) == 2 {
results[matches[1]] = true
} else if matches := nameMatcher.FindStringSubmatch(column); len(matches) == 3 && (matches[1] == stmt.Table || matches[1] == "") {
results[matches[2]] = true
} else {
results[column] = true
}

20
vendor/modules.txt vendored

@ -135,6 +135,12 @@ github.com/montanaflynn/stats
# github.com/natefinch/lumberjack v2.0.0+incompatible
## explicit
github.com/natefinch/lumberjack
# github.com/oschwald/geoip2-golang v1.8.0
## explicit; go 1.18
github.com/oschwald/geoip2-golang
# github.com/oschwald/maxminddb-golang v1.10.0
## explicit; go 1.18
github.com/oschwald/maxminddb-golang
# github.com/pelletier/go-toml/v2 v2.0.5
## explicit; go 1.16
github.com/pelletier/go-toml/v2
@ -243,12 +249,14 @@ github.com/youmark/pkcs8
# go.dtapp.net/dorm v1.0.33
## explicit; go 1.19
go.dtapp.net/dorm
# go.dtapp.net/goip v1.0.30
# go.dtapp.net/goip v1.0.33
## explicit; go 1.19
go.dtapp.net/goip
go.dtapp.net/goip/geoip
go.dtapp.net/goip/ip2region
go.dtapp.net/goip/v4
go.dtapp.net/goip/v6
go.dtapp.net/goip/ip2region_v2
go.dtapp.net/goip/ipv6wry
go.dtapp.net/goip/qqwry
# go.dtapp.net/gorandom v1.0.1
## explicit; go 1.18
go.dtapp.net/gorandom
@ -341,7 +349,7 @@ golang.org/x/net/idna
# golang.org/x/sync v0.0.0-20220907140024-f12130a52804
## explicit
golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20220913175220-63ea55921009
# golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/internal/unsafeheader
@ -399,8 +407,8 @@ gorm.io/driver/mysql
# gorm.io/driver/postgres v1.3.9
## explicit; go 1.14
gorm.io/driver/postgres
# gorm.io/gorm v1.23.8
## explicit; go 1.14
# gorm.io/gorm v1.23.9
## explicit; go 1.16
gorm.io/gorm
gorm.io/gorm/callbacks
gorm.io/gorm/clause

Loading…
Cancel
Save