parent
96906f189e
commit
e92e32634c
@ -1,3 +1,3 @@
|
||||
package goip
|
||||
|
||||
const Version = "1.0.30"
|
||||
const Version = "1.0.31"
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 67 MiB |
Binary file not shown.
@ -0,0 +1,46 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
accountId int64 // 账号id
|
||||
licenseKey string // 许可证密钥
|
||||
asnDb *geoip2.Reader
|
||||
cityDb *geoip2.Reader
|
||||
countryDb *geoip2.Reader
|
||||
}
|
||||
|
||||
func New(licenseKey string) (*Client, error) {
|
||||
|
||||
var err error
|
||||
c := &Client{}
|
||||
|
||||
c.licenseKey = licenseKey
|
||||
|
||||
c.asnDb, err = geoip2.FromBytes(asnBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.cityDb, err = geoip2.FromBytes(cityBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.countryDb, err = geoip2.FromBytes(countryBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (c *Client) Close() {
|
||||
|
||||
c.asnDb.Close()
|
||||
c.cityDb.Close()
|
||||
c.countryDb.Close()
|
||||
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func OnlineDownload(downloadUrl string, downloadName string) {
|
||||
resp, err := http.Get(downloadUrl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
err = ioutil.WriteFile("./"+downloadName, body, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./"+downloadName)
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
"go.dtapp.net/gostring"
|
||||
)
|
||||
|
||||
func (c *Client) GetGeoLite2AsnDownloadUrl() string {
|
||||
if c.licenseKey == "" {
|
||||
return ""
|
||||
}
|
||||
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", c.licenseKey)
|
||||
}
|
||||
|
||||
//func (c *Client) GetGeoLite2AsnCsvDownloadUrl() string {
|
||||
// if c.licenseKey == "" {
|
||||
// return ""
|
||||
// }
|
||||
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", c.licenseKey)
|
||||
//}
|
||||
|
||||
func (c *Client) GetGeoLite2CityDownloadUrl() string {
|
||||
if c.licenseKey == "" {
|
||||
return ""
|
||||
}
|
||||
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", c.licenseKey)
|
||||
}
|
||||
|
||||
//func (c *Client) GetGeoLite2CityCsvDownloadUrl() string {
|
||||
// if c.licenseKey == "" {
|
||||
// return ""
|
||||
// }
|
||||
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", c.licenseKey)
|
||||
//}
|
||||
|
||||
func (c *Client) GetGeoLite2CountryDownloadUrl() string {
|
||||
if c.licenseKey == "" {
|
||||
return ""
|
||||
}
|
||||
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", c.licenseKey)
|
||||
}
|
||||
|
||||
//func (c *Client) GetGeoLite2CountryCsvDownloadUrl() string {
|
||||
// if c.licenseKey == "" {
|
||||
// return ""
|
||||
// }
|
||||
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", c.licenseKey)
|
||||
//}
|
@ -0,0 +1,75 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"net"
|
||||
)
|
||||
|
||||
//go:embed GeoLite2-ASN.mmdb
|
||||
var asnBuff []byte
|
||||
|
||||
//go:embed GeoLite2-City.mmdb
|
||||
var cityBuff []byte
|
||||
|
||||
//go:embed GeoLite2-Country.mmdb
|
||||
var countryBuff []byte
|
||||
|
||||
// QueryCityResult 返回
|
||||
type QueryCityResult struct {
|
||||
Ip string `json:"ip,omitempty"` // ip
|
||||
Continent struct {
|
||||
Code string `json:"code,omitempty"` // 大陆代码
|
||||
Name string `json:"name,omitempty"` // 大陆名称
|
||||
} `json:"continent,omitempty"`
|
||||
Country struct {
|
||||
Code string `json:"code,omitempty"` // 国家代码
|
||||
Name string `json:"name,omitempty"` // 国家名称
|
||||
} `json:"country,omitempty"`
|
||||
Province struct {
|
||||
Code string `json:"code,omitempty"` // 省份代码
|
||||
Name string `json:"name,omitempty"` // 省份名称
|
||||
} `json:"province,omitempty"`
|
||||
City struct {
|
||||
Name string `json:"name,omitempty"` // 城市名称
|
||||
} `json:"city,omitempty"`
|
||||
Location struct {
|
||||
TimeZone string `json:"time_zone,omitempty"` // 位置时区
|
||||
Latitude float64 `json:"latitude,omitempty"` // 坐标纬度
|
||||
Longitude float64 `json:"longitude,omitempty"` // 坐标经度
|
||||
} `json:"location,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Client) QueryCity(ipAddress net.IP) (result QueryCityResult, err error) {
|
||||
|
||||
record, err := c.cityDb.City(ipAddress)
|
||||
if err != nil {
|
||||
return QueryCityResult{}, err
|
||||
}
|
||||
|
||||
// ip
|
||||
result.Ip = ipAddress.String()
|
||||
|
||||
// 大陆
|
||||
result.Continent.Code = record.Continent.Code
|
||||
result.Continent.Name = record.Continent.Names["zh-CN"]
|
||||
|
||||
// 国家
|
||||
result.Country.Code = record.Country.IsoCode
|
||||
result.Country.Name = record.Country.Names["zh-CN"]
|
||||
|
||||
// 省份
|
||||
if len(record.Subdivisions) > 0 {
|
||||
result.Province.Code = record.Subdivisions[0].IsoCode
|
||||
result.Province.Name = record.Subdivisions[0].Names["zh-CN"]
|
||||
}
|
||||
|
||||
// 城市
|
||||
result.City.Name = record.City.Names["zh-CN"]
|
||||
|
||||
// 位置
|
||||
result.Location.TimeZone = record.Location.TimeZone
|
||||
result.Location.Latitude = record.Location.Latitude
|
||||
result.Location.Longitude = record.Location.Longitude
|
||||
|
||||
return result, err
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func OnlineDownload() {
|
||||
resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/data/ip2region.xdb?raw=true")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
err = ioutil.WriteFile("./ip2region.xdb", body, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./ip2region.xdb")
|
||||
}
|
Binary file not shown.
@ -0,0 +1,72 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"go.dtapp.net/gostring"
|
||||
"net"
|
||||
)
|
||||
|
||||
//go:embed ip2region.xdb
|
||||
var cBuff []byte
|
||||
|
||||
type Client struct {
|
||||
db *Searcher
|
||||
}
|
||||
|
||||
func New() (*Client, error) {
|
||||
|
||||
var err error
|
||||
c := &Client{}
|
||||
|
||||
// 1、从 dbPath 加载整个 xdb 到内存
|
||||
|
||||
// 2、用全局的 cBuff 创建完全基于内存的查询对象。
|
||||
c.db, err = NewWithBuffer(cBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Result 返回
|
||||
type Result struct {
|
||||
Ip string `json:"ip,omitempty"` // 查询的ip地址
|
||||
Country string `json:"country,omitempty"` // 国家
|
||||
Province string `json:"province,omitempty"` // 省份
|
||||
City string `json:"city,omitempty"` // 城市
|
||||
Operator string `json:"operator,omitempty"` // 运营商
|
||||
}
|
||||
|
||||
func (c *Client) Query(ipAddress net.IP) (result Result, err error) {
|
||||
|
||||
// 备注:并发使用,用整个 xdb 缓存创建的 searcher 对象可以安全用于并发。
|
||||
|
||||
str, err := c.db.SearchByStr(ipAddress.String())
|
||||
if err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
|
||||
split := gostring.Split(str, "|")
|
||||
if len(split) <= 0 {
|
||||
return Result{}, err
|
||||
}
|
||||
|
||||
result.Ip = ipAddress.String()
|
||||
|
||||
result.Country = split[0]
|
||||
result.Province = split[2]
|
||||
if result.Province == "0" {
|
||||
result.Province = ""
|
||||
}
|
||||
result.City = split[3]
|
||||
if result.City == "0" {
|
||||
result.City = ""
|
||||
}
|
||||
result.Operator = split[4]
|
||||
if result.Operator == "0" {
|
||||
result.Operator = ""
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
@ -0,0 +1,240 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
HeaderInfoLength = 256
|
||||
VectorIndexRows = 256
|
||||
VectorIndexCols = 256
|
||||
VectorIndexSize = 8
|
||||
SegmentIndexBlockSize = 14
|
||||
)
|
||||
|
||||
// --- Index policy define
|
||||
|
||||
type IndexPolicy int
|
||||
|
||||
const (
|
||||
VectorIndexPolicy IndexPolicy = 1
|
||||
BTreeIndexPolicy IndexPolicy = 2
|
||||
)
|
||||
|
||||
func (i IndexPolicy) String() string {
|
||||
switch i {
|
||||
case VectorIndexPolicy:
|
||||
return "VectorIndex"
|
||||
case BTreeIndexPolicy:
|
||||
return "BtreeIndex"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// --- Header define
|
||||
|
||||
type Header struct {
|
||||
// data []byte
|
||||
Version uint16
|
||||
IndexPolicy IndexPolicy
|
||||
CreatedAt uint32
|
||||
StartIndexPtr uint32
|
||||
EndIndexPtr uint32
|
||||
}
|
||||
|
||||
func NewHeader(input []byte) (*Header, error) {
|
||||
if len(input) < 16 {
|
||||
return nil, fmt.Errorf("invalid input buffer")
|
||||
}
|
||||
|
||||
return &Header{
|
||||
Version: binary.LittleEndian.Uint16(input),
|
||||
IndexPolicy: IndexPolicy(binary.LittleEndian.Uint16(input[2:])),
|
||||
CreatedAt: binary.LittleEndian.Uint32(input[4:]),
|
||||
StartIndexPtr: binary.LittleEndian.Uint32(input[8:]),
|
||||
EndIndexPtr: binary.LittleEndian.Uint32(input[12:]),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// --- searcher implementation
|
||||
|
||||
type Searcher struct {
|
||||
handle *os.File
|
||||
|
||||
// header info
|
||||
header *Header
|
||||
ioCount int
|
||||
|
||||
// use it only when this feature enabled.
|
||||
// Preload the vector index will reduce the number of IO operations
|
||||
// thus speedup the search process
|
||||
vectorIndex []byte
|
||||
|
||||
// content buffer.
|
||||
// running with the whole xdb file cached
|
||||
contentBuff []byte
|
||||
}
|
||||
|
||||
func baseNew(dbFile string, vIndex []byte, cBuff []byte) (*Searcher, error) {
|
||||
var err error
|
||||
|
||||
// content buff first
|
||||
if cBuff != nil {
|
||||
return &Searcher{
|
||||
vectorIndex: nil,
|
||||
contentBuff: cBuff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// open the xdb binary file
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Searcher{
|
||||
handle: handle,
|
||||
vectorIndex: vIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewWithFileOnly(dbFile string) (*Searcher, error) {
|
||||
return baseNew(dbFile, nil, nil)
|
||||
}
|
||||
|
||||
func NewWithVectorIndex(dbFile string, vIndex []byte) (*Searcher, error) {
|
||||
return baseNew(dbFile, vIndex, nil)
|
||||
}
|
||||
|
||||
func NewWithBuffer(cBuff []byte) (*Searcher, error) {
|
||||
return baseNew("", nil, cBuff)
|
||||
}
|
||||
|
||||
func (s *Searcher) Close() {
|
||||
if s.handle != nil {
|
||||
err := s.handle.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetIOCount return the global io count for the last search
|
||||
func (s *Searcher) GetIOCount() int {
|
||||
return s.ioCount
|
||||
}
|
||||
|
||||
// SearchByStr find the region for the specified ip string
|
||||
func (s *Searcher) SearchByStr(str string) (string, error) {
|
||||
ip, err := CheckIP(str)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return s.Search(ip)
|
||||
}
|
||||
|
||||
// Search find the region for the specified long ip
|
||||
func (s *Searcher) Search(ip uint32) (string, error) {
|
||||
// reset the global ioCount
|
||||
s.ioCount = 0
|
||||
|
||||
// locate the segment index block based on the vector index
|
||||
var il0 = (ip >> 24) & 0xFF
|
||||
var il1 = (ip >> 16) & 0xFF
|
||||
var idx = il0*VectorIndexCols*VectorIndexSize + il1*VectorIndexSize
|
||||
var sPtr, ePtr = uint32(0), uint32(0)
|
||||
if s.vectorIndex != nil {
|
||||
sPtr = binary.LittleEndian.Uint32(s.vectorIndex[idx:])
|
||||
ePtr = binary.LittleEndian.Uint32(s.vectorIndex[idx+4:])
|
||||
} else if s.contentBuff != nil {
|
||||
sPtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx:])
|
||||
ePtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx+4:])
|
||||
} else {
|
||||
// read the vector index block
|
||||
var buff = make([]byte, VectorIndexSize)
|
||||
err := s.read(int64(HeaderInfoLength+idx), buff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read vector index block at %d: %w", HeaderInfoLength+idx, err)
|
||||
}
|
||||
|
||||
sPtr = binary.LittleEndian.Uint32(buff)
|
||||
ePtr = binary.LittleEndian.Uint32(buff[4:])
|
||||
}
|
||||
|
||||
// fmt.Printf("sPtr=%d, ePtr=%d", sPtr, ePtr)
|
||||
|
||||
// binary search the segment index to get the region
|
||||
var dataLen, dataPtr = 0, uint32(0)
|
||||
var buff = make([]byte, SegmentIndexBlockSize)
|
||||
var l, h = 0, int((ePtr - sPtr) / SegmentIndexBlockSize)
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
p := sPtr + uint32(m*SegmentIndexBlockSize)
|
||||
err := s.read(int64(p), buff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read segment index at %d: %w", p, err)
|
||||
}
|
||||
|
||||
// decode the data step by step to reduce the unnecessary operations
|
||||
sip := binary.LittleEndian.Uint32(buff)
|
||||
if ip < sip {
|
||||
h = m - 1
|
||||
} else {
|
||||
eip := binary.LittleEndian.Uint32(buff[4:])
|
||||
if ip > eip {
|
||||
l = m + 1
|
||||
} else {
|
||||
dataLen = int(binary.LittleEndian.Uint16(buff[8:]))
|
||||
dataPtr = binary.LittleEndian.Uint32(buff[10:])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//fmt.Printf("dataLen: %d, dataPtr: %d", dataLen, dataPtr)
|
||||
if dataLen == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// load and return the region data
|
||||
var regionBuff = make([]byte, dataLen)
|
||||
err := s.read(int64(dataPtr), regionBuff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read region at %d: %w", dataPtr, err)
|
||||
}
|
||||
|
||||
return string(regionBuff), nil
|
||||
}
|
||||
|
||||
// do the data read operation based on the setting.
|
||||
// content buffer first or will read from the file.
|
||||
// this operation will invoke the Seek for file based read.
|
||||
func (s *Searcher) read(offset int64, buff []byte) error {
|
||||
if s.contentBuff != nil {
|
||||
cLen := copy(buff, s.contentBuff[offset:])
|
||||
if cLen != len(buff) {
|
||||
return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
} else {
|
||||
_, err := s.handle.Seek(offset, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("seek to %d: %w", offset, err)
|
||||
}
|
||||
|
||||
s.ioCount++
|
||||
rLen, err := s.handle.Read(buff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("handle read: %w", err)
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,165 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var shiftIndex = []int{24, 16, 8, 0}
|
||||
|
||||
func CheckIP(ip string) (uint32, error) {
|
||||
var ps = strings.Split(ip, ".")
|
||||
if len(ps) != 4 {
|
||||
return 0, fmt.Errorf("invalid ip address `%s`", ip)
|
||||
}
|
||||
|
||||
var val = uint32(0)
|
||||
for i, s := range ps {
|
||||
d, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("the %dth part `%s` is not an integer", i, s)
|
||||
}
|
||||
|
||||
if d < 0 || d > 255 {
|
||||
return 0, fmt.Errorf("the %dth part `%s` should be an integer bettween 0 and 255", i, s)
|
||||
}
|
||||
|
||||
val |= uint32(d) << shiftIndex[i]
|
||||
}
|
||||
|
||||
// convert the ip to integer
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func Long2IP(ip uint32) string {
|
||||
return fmt.Sprintf("%d.%d.%d.%d", (ip>>24)&0xFF, (ip>>16)&0xFF, (ip>>8)&0xFF, ip&0xFF)
|
||||
}
|
||||
|
||||
func MidIP(sip uint32, eip uint32) uint32 {
|
||||
return uint32((uint64(sip) + uint64(eip)) >> 1)
|
||||
}
|
||||
|
||||
// LoadHeader load the header info from the specified handle
|
||||
func LoadHeader(handle *os.File) (*Header, error) {
|
||||
_, err := handle.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek to the header: %w", err)
|
||||
}
|
||||
|
||||
var buff = make([]byte, HeaderInfoLength)
|
||||
rLen, err := handle.Read(buff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
|
||||
return NewHeader(buff)
|
||||
}
|
||||
|
||||
// LoadHeaderFromFile load header info from the specified db file path
|
||||
func LoadHeaderFromFile(dbFile string) (*Header, error) {
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
|
||||
}
|
||||
|
||||
header, err := LoadHeader(handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = handle.Close()
|
||||
return header, nil
|
||||
}
|
||||
|
||||
// LoadHeaderFromBuff wrap the header info from the content buffer
|
||||
func LoadHeaderFromBuff(cBuff []byte) (*Header, error) {
|
||||
return NewHeader(cBuff[0:256])
|
||||
}
|
||||
|
||||
// LoadVectorIndex util function to load the vector index from the specified file handle
|
||||
func LoadVectorIndex(handle *os.File) ([]byte, error) {
|
||||
// load all the vector index block
|
||||
_, err := handle.Seek(HeaderInfoLength, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek to vector index: %w", err)
|
||||
}
|
||||
|
||||
var buff = make([]byte, VectorIndexRows*VectorIndexCols*VectorIndexSize)
|
||||
rLen, err := handle.Read(buff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
|
||||
return buff, nil
|
||||
}
|
||||
|
||||
// LoadVectorIndexFromFile load vector index from a specified file path
|
||||
func LoadVectorIndexFromFile(dbFile string) ([]byte, error) {
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
|
||||
}
|
||||
|
||||
vIndex, err := LoadVectorIndex(handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = handle.Close()
|
||||
return vIndex, nil
|
||||
}
|
||||
|
||||
// LoadContent load the whole xdb content from the specified file handle
|
||||
func LoadContent(handle *os.File) ([]byte, error) {
|
||||
// get file size
|
||||
fi, err := handle.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat: %w", err)
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
// seek to the head of the file
|
||||
_, err = handle.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek to get xdb file length: %w", err)
|
||||
}
|
||||
|
||||
var buff = make([]byte, size)
|
||||
rLen, err := handle.Read(buff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
|
||||
return buff, nil
|
||||
}
|
||||
|
||||
// LoadContentFromFile load the whole xdb content from the specified db file path
|
||||
func LoadContentFromFile(dbFile string) ([]byte, error) {
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
|
||||
}
|
||||
|
||||
cBuff, err := LoadContent(handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = handle.Close()
|
||||
return cBuff, nil
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
package goip
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"go.dtapp.net/goip/geoip"
|
||||
"go.dtapp.net/goip/ip2region_v2"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
QueryIncorrect = errors.New("ip地址不正确")
|
||||
)
|
||||
|
||||
// QueryQqWryResult 返回
|
||||
type QueryQqWryResult struct {
|
||||
Ip string `json:"ip,omitempty"` // 查询的ip地址
|
||||
Country string `json:"country,omitempty"` // 国家或地区
|
||||
Area string `json:"area,omitempty"` // 区域
|
||||
}
|
||||
|
||||
// QueryQqWry 纯真IP库
|
||||
// https://www.cz88.net/
|
||||
func (c *Client) QueryQqWry(ipAddress net.IP) (result QueryQqWryResult, err error) {
|
||||
if ipAddress.To4() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
resp := c.V4db.Query(ipAddress)
|
||||
return QueryQqWryResult{
|
||||
Ip: resp.IP,
|
||||
Country: resp.Country,
|
||||
Area: resp.Area,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// QueryIp2Region ip2region
|
||||
// https://github.com/lionsoul2014/ip2region
|
||||
func (c *Client) QueryIp2Region(ipAddress net.IP) (result QueryQqWryResult, err error) {
|
||||
if ipAddress.To4() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
resp := c.V4db.Query(ipAddress)
|
||||
return QueryQqWryResult{
|
||||
Ip: resp.IP,
|
||||
Country: resp.Country,
|
||||
Area: resp.Area,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// QueryIp2RegionV2 ip2region
|
||||
// https://github.com/lionsoul2014/ip2region
|
||||
func (c *Client) QueryIp2RegionV2(ipAddress net.IP) (result ip2region_v2.Result, err error) {
|
||||
if ipAddress.To4() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query, err := c.ip2regionV2Client.Query(ipAddress)
|
||||
if err != nil {
|
||||
return ip2region_v2.Result{}, err
|
||||
}
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// QueryGeoIp ip2region
|
||||
// https://www.maxmind.com/
|
||||
func (c *Client) QueryGeoIp(ipAddress net.IP) (result geoip.QueryCityResult, err error) {
|
||||
if ipAddress.String() == "<nil>" {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query, err := c.geoIpClient.QueryCity(ipAddress)
|
||||
if err != nil {
|
||||
return geoip.QueryCityResult{}, err
|
||||
}
|
||||
|
||||
return query, nil
|
||||
}
|
@ -0,0 +1,3 @@
|
||||
.vscode
|
||||
*.out
|
||||
*.test
|
@ -0,0 +1,3 @@
|
||||
[submodule "test-data"]
|
||||
path = test-data
|
||||
url = https://github.com/maxmind/MaxMind-DB.git
|
@ -0,0 +1,472 @@
|
||||
[run]
|
||||
deadline = "10m"
|
||||
|
||||
tests = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bidichk",
|
||||
"bodyclose",
|
||||
"containedctx",
|
||||
"contextcheck",
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errchkjson",
|
||||
"errname",
|
||||
"errorlint",
|
||||
"exportloopref",
|
||||
"forbidigo",
|
||||
#"forcetypeassert",
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gocritic",
|
||||
"godot",
|
||||
"gofumpt",
|
||||
"gomodguard",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"govet",
|
||||
"grouper",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"maintidx",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nilerr",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
"nosprintfhostport",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"stylecheck",
|
||||
"tenv",
|
||||
"tparallel",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"vetshadow",
|
||||
"wastedassign",
|
||||
]
|
||||
|
||||
# Please note that we only use depguard for stdlib as gomodguard only
|
||||
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
|
||||
[linters-settings.depguard]
|
||||
list-type = "blacklist"
|
||||
include-go-root = true
|
||||
packages = [
|
||||
# ioutil is deprecated. The functions have been moved elsewhere:
|
||||
# https://golang.org/doc/go1.16#ioutil
|
||||
"io/ioutil",
|
||||
]
|
||||
|
||||
[linters-settings.errcheck]
|
||||
# Don't allow setting of error to the blank identifier. If there is a legtimate
|
||||
# reason, there should be a nolint with an explanation.
|
||||
check-blank = true
|
||||
|
||||
exclude-functions = [
|
||||
# If we are rolling back a transaction, we are often already in an error
|
||||
# state.
|
||||
'(*database/sql.Tx).Rollback',
|
||||
|
||||
# It is reasonable to ignore errors if Cleanup fails in most cases.
|
||||
'(*github.com/google/renameio/v2.PendingFile).Cleanup',
|
||||
|
||||
# We often don't care if removing a file failed (e.g., it doesn't exist)
|
||||
'os.Remove',
|
||||
'os.RemoveAll',
|
||||
]
|
||||
|
||||
# Ignoring Close so that we don't have to have a bunch of
|
||||
# `defer func() { _ = r.Close() }()` constructs when we
|
||||
# don't actually care about the error.
|
||||
ignore = "Close,fmt:.*"
|
||||
|
||||
[linters-settings.errorlint]
|
||||
errorf = true
|
||||
asserts = true
|
||||
comparison = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters-settings.forbidigo]
|
||||
# Forbid the following identifiers
|
||||
forbid = [
|
||||
"^minFraud*",
|
||||
"^maxMind*",
|
||||
]
|
||||
|
||||
[linters-settings.gocritic]
|
||||
enabled-checks = [
|
||||
"appendAssign",
|
||||
"appendCombine",
|
||||
"argOrder",
|
||||
"assignOp",
|
||||
"badCall",
|
||||
"badCond",
|
||||
"badLock",
|
||||
"badRegexp",
|
||||
"badSorting",
|
||||
"boolExprSimplify",
|
||||
"builtinShadow",
|
||||
"builtinShadowDecl",
|
||||
"captLocal",
|
||||
"caseOrder",
|
||||
"codegenComment",
|
||||
"commentedOutCode",
|
||||
"commentedOutImport",
|
||||
"commentFormatting",
|
||||
"defaultCaseOrder",
|
||||
# Revive's defer rule already captures this. This caught no extra cases.
|
||||
# "deferInLoop",
|
||||
"deferUnlambda",
|
||||
"deprecatedComment",
|
||||
"docStub",
|
||||
"dupArg",
|
||||
"dupBranchBody",
|
||||
"dupCase",
|
||||
"dupImport",
|
||||
"dupSubExpr",
|
||||
"dynamicFmtString",
|
||||
"elseif",
|
||||
"emptyDecl",
|
||||
"emptyFallthrough",
|
||||
"emptyStringTest",
|
||||
"equalFold",
|
||||
"evalOrder",
|
||||
"exitAfterDefer",
|
||||
"exposedSyncMutex",
|
||||
"externalErrorReassign",
|
||||
# Given that all of our code runs on Linux and the / separate should
|
||||
# work fine, this seems less important.
|
||||
# "filepathJoin",
|
||||
"flagDeref",
|
||||
"flagName",
|
||||
"hexLiteral",
|
||||
"ifElseChain",
|
||||
"importShadow",
|
||||
"indexAlloc",
|
||||
"initClause",
|
||||
"ioutilDeprecated",
|
||||
"mapKey",
|
||||
"methodExprCall",
|
||||
"nestingReduce",
|
||||
"newDeref",
|
||||
"nilValReturn",
|
||||
"octalLiteral",
|
||||
"offBy1",
|
||||
"paramTypeCombine",
|
||||
"preferDecodeRune",
|
||||
"preferFilepathJoin",
|
||||
"preferFprint",
|
||||
"preferStringWriter",
|
||||
"preferWriteByte",
|
||||
"ptrToRefParam",
|
||||
"rangeExprCopy",
|
||||
"rangeValCopy",
|
||||
"redundantSprint",
|
||||
"regexpMust",
|
||||
"regexpPattern",
|
||||
# This might be good, but I don't think we want to encourage
|
||||
# significant changes to regexes as we port stuff from Perl.
|
||||
# "regexpSimplify",
|
||||
"ruleguard",
|
||||
"singleCaseSwitch",
|
||||
"sliceClear",
|
||||
"sloppyLen",
|
||||
# This seems like it might also be good, but a lot of existing code
|
||||
# fails.
|
||||
# "sloppyReassign",
|
||||
"returnAfterHttpError",
|
||||
"sloppyTypeAssert",
|
||||
"sortSlice",
|
||||
"sprintfQuotedString",
|
||||
"sqlQuery",
|
||||
"stringsCompare",
|
||||
"stringXbytes",
|
||||
"switchTrue",
|
||||
"syncMapLoadAndDelete",
|
||||
"timeExprSimplify",
|
||||
"todoCommentWithoutDetail",
|
||||
"tooManyResultsChecker",
|
||||
"truncateCmp",
|
||||
"typeAssertChain",
|
||||
"typeDefFirst",
|
||||
"typeSwitchVar",
|
||||
"typeUnparen",
|
||||
"underef",
|
||||
"unlabelStmt",
|
||||
"unlambda",
|
||||
# I am not sure we would want this linter and a lot of existing
|
||||