parent
b664b74537
commit
c8a251ca62
@ -1,5 +1,5 @@
|
||||
package golog
|
||||
|
||||
const (
|
||||
Version = "1.0.73"
|
||||
Version = "1.0.74"
|
||||
)
|
||||
|
@ -0,0 +1,3 @@
|
||||
.vscode
|
||||
*.out
|
||||
*.test
|
@ -0,0 +1,3 @@
|
||||
[submodule "test-data"]
|
||||
path = test-data
|
||||
url = https://github.com/maxmind/MaxMind-DB.git
|
@ -0,0 +1,472 @@
|
||||
[run]
|
||||
deadline = "10m"
|
||||
|
||||
tests = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bidichk",
|
||||
"bodyclose",
|
||||
"containedctx",
|
||||
"contextcheck",
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errchkjson",
|
||||
"errname",
|
||||
"errorlint",
|
||||
"exportloopref",
|
||||
"forbidigo",
|
||||
#"forcetypeassert",
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gocritic",
|
||||
"godot",
|
||||
"gofumpt",
|
||||
"gomodguard",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"govet",
|
||||
"grouper",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"maintidx",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nilerr",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
"nosprintfhostport",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"stylecheck",
|
||||
"tenv",
|
||||
"tparallel",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"vetshadow",
|
||||
"wastedassign",
|
||||
]
|
||||
|
||||
# Please note that we only use depguard for stdlib as gomodguard only
|
||||
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
|
||||
[linters-settings.depguard]
|
||||
list-type = "blacklist"
|
||||
include-go-root = true
|
||||
packages = [
|
||||
# ioutil is deprecated. The functions have been moved elsewhere:
|
||||
# https://golang.org/doc/go1.16#ioutil
|
||||
"io/ioutil",
|
||||
]
|
||||
|
||||
[linters-settings.errcheck]
|
||||
# Don't allow setting of error to the blank identifier. If there is a legtimate
|
||||
# reason, there should be a nolint with an explanation.
|
||||
check-blank = true
|
||||
|
||||
exclude-functions = [
|
||||
# If we are rolling back a transaction, we are often already in an error
|
||||
# state.
|
||||
'(*database/sql.Tx).Rollback',
|
||||
|
||||
# It is reasonable to ignore errors if Cleanup fails in most cases.
|
||||
'(*github.com/google/renameio/v2.PendingFile).Cleanup',
|
||||
|
||||
# We often don't care if removing a file failed (e.g., it doesn't exist)
|
||||
'os.Remove',
|
||||
'os.RemoveAll',
|
||||
]
|
||||
|
||||
# Ignoring Close so that we don't have to have a bunch of
|
||||
# `defer func() { _ = r.Close() }()` constructs when we
|
||||
# don't actually care about the error.
|
||||
ignore = "Close,fmt:.*"
|
||||
|
||||
[linters-settings.errorlint]
|
||||
errorf = true
|
||||
asserts = true
|
||||
comparison = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters-settings.forbidigo]
|
||||
# Forbid the following identifiers
|
||||
forbid = [
|
||||
"^minFraud*",
|
||||
"^maxMind*",
|
||||
]
|
||||
|
||||
[linters-settings.gocritic]
|
||||
enabled-checks = [
|
||||
"appendAssign",
|
||||
"appendCombine",
|
||||
"argOrder",
|
||||
"assignOp",
|
||||
"badCall",
|
||||
"badCond",
|
||||
"badLock",
|
||||
"badRegexp",
|
||||
"badSorting",
|
||||
"boolExprSimplify",
|
||||
"builtinShadow",
|
||||
"builtinShadowDecl",
|
||||
"captLocal",
|
||||
"caseOrder",
|
||||
"codegenComment",
|
||||
"commentedOutCode",
|
||||
"commentedOutImport",
|
||||
"commentFormatting",
|
||||
"defaultCaseOrder",
|
||||
# Revive's defer rule already captures this. This caught no extra cases.
|
||||
# "deferInLoop",
|
||||
"deferUnlambda",
|
||||
"deprecatedComment",
|
||||
"docStub",
|
||||
"dupArg",
|
||||
"dupBranchBody",
|
||||
"dupCase",
|
||||
"dupImport",
|
||||
"dupSubExpr",
|
||||
"dynamicFmtString",
|
||||
"elseif",
|
||||
"emptyDecl",
|
||||
"emptyFallthrough",
|
||||
"emptyStringTest",
|
||||
"equalFold",
|
||||
"evalOrder",
|
||||
"exitAfterDefer",
|
||||
"exposedSyncMutex",
|
||||
"externalErrorReassign",
|
||||
# Given that all of our code runs on Linux and the / separate should
|
||||
# work fine, this seems less important.
|
||||
# "filepathJoin",
|
||||
"flagDeref",
|
||||
"flagName",
|
||||
"hexLiteral",
|
||||
"ifElseChain",
|
||||
"importShadow",
|
||||
"indexAlloc",
|
||||
"initClause",
|
||||
"ioutilDeprecated",
|
||||
"mapKey",
|
||||
"methodExprCall",
|
||||
"nestingReduce",
|
||||
"newDeref",
|
||||
"nilValReturn",
|
||||
"octalLiteral",
|
||||
"offBy1",
|
||||
"paramTypeCombine",
|
||||
"preferDecodeRune",
|
||||
"preferFilepathJoin",
|
||||
"preferFprint",
|
||||
"preferStringWriter",
|
||||
"preferWriteByte",
|
||||
"ptrToRefParam",
|
||||
"rangeExprCopy",
|
||||
"rangeValCopy",
|
||||
"redundantSprint",
|
||||
"regexpMust",
|
||||
"regexpPattern",
|
||||
# This might be good, but I don't think we want to encourage
|
||||
# significant changes to regexes as we port stuff from Perl.
|
||||
# "regexpSimplify",
|
||||
"ruleguard",
|
||||
"singleCaseSwitch",
|
||||
"sliceClear",
|
||||
"sloppyLen",
|
||||
# This seems like it might also be good, but a lot of existing code
|
||||
# fails.
|
||||
# "sloppyReassign",
|
||||
"returnAfterHttpError",
|
||||
"sloppyTypeAssert",
|
||||
"sortSlice",
|
||||
"sprintfQuotedString",
|
||||
"sqlQuery",
|
||||
"stringsCompare",
|
||||
"stringXbytes",
|
||||
"switchTrue",
|
||||
"syncMapLoadAndDelete",
|
||||
"timeExprSimplify",
|
||||
"todoCommentWithoutDetail",
|
||||
"tooManyResultsChecker",
|
||||
"truncateCmp",
|
||||
"typeAssertChain",
|
||||
"typeDefFirst",
|
||||
"typeSwitchVar",
|
||||
"typeUnparen",
|
||||
"underef",
|
||||
"unlabelStmt",
|
||||
"unlambda",
|
||||
# I am not sure we would want this linter and a lot of existing
|
||||
# code fails.
|
||||
# "unnamedResult",
|
||||
"unnecessaryBlock",
|
||||
"unnecessaryDefer",
|
||||
"unslice",
|
||||
"valSwap",
|
||||
"weakCond",
|
||||
"wrapperFunc",
|
||||
"yodaStyleExpr",
|
||||
# This requires explanations for "nolint" directives. This would be
|
||||
# nice for gosec ones, but I am not sure we want it generally unless
|
||||
# we can get the false positive rate lower.
|
||||
# "whyNoLint"
|
||||
]
|
||||
|
||||
[linters-settings.gofumpt]
|
||||
extra-rules = true
|
||||
lang-version = "1.18"
|
||||
|
||||
[linters-settings.govet]
|
||||
"enable-all" = true
|
||||
|
||||
[linters-settings.lll]
|
||||
line-length = 120
|
||||
tab-width = 4
|
||||
|
||||
[linters-settings.nolintlint]
|
||||
allow-leading-space = false
|
||||
allow-unused = false
|
||||
allow-no-explanation = ["lll", "misspell"]
|
||||
require-explanation = true
|
||||
require-specific = true
|
||||
|
||||
[linters-settings.revive]
|
||||
ignore-generated-header = true
|
||||
severity = "warning"
|
||||
|
||||
# This might be nice but it is so common that it is hard
|
||||
# to enable.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "add-constant"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "argument-limit"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "atomic"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "bare-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "blank-imports"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "bool-literal-in-expr"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "call-to-gc"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "cognitive-complexity"
|
||||
|
||||
# Probably a good rule, but we have a lot of names that
|
||||
# only have case differences.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "confusing-naming"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "confusing-results"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "constant-logical-expr"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "context-as-argument"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "context-keys-type"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "cyclomatic"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "deep-exit"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "defer"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "dot-imports"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "duplicated-imports"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "early-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "empty-block"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "empty-lines"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "errorf"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "error-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "error-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "error-strings"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "exported"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "file-header"
|
||||
|
||||
# We have a lot of flag parameters. This linter probably makes
|
||||
# a good point, but we would need some cleanup or a lot of nolints.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "flag-parameter"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "function-result-limit"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "get-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "identical-branches"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "if-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "imports-blacklist"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "import-shadowing"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "increment-decrement"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "indent-error-flow"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "line-length-limit"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "max-public-structs"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "modifies-parameter"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "modifies-value-receiver"
|
||||
|
||||
# We frequently use nested structs, particularly in tests.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "nested-structs"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "optimize-operands-order"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "package-comments"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "range"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "range-val-address"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "range-val-in-closure"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "receiver-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "redefines-builtin-id"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "string-of-int"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "struct-tag"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "superfluous-else"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "time-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unconditional-recursion"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unexported-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unexported-return"
|
||||
|
||||
# This is covered elsewhere and we want to ignore some
|
||||
# functions such as fmt.Fprintf.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "unhandled-error"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unnecessary-stmt"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unreachable-code"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unused-parameter"
|
||||
|
||||
# We generally have unused receivers in tests for meeting the
|
||||
# requirements of an interface.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "unused-receiver"
|
||||
|
||||
# This probably makes sense after we upgrade to 1.18
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "use-any"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "useless-break"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "var-declaration"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "var-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "waitgroup-by-value"
|
||||
|
||||
[linters-settings.unparam]
|
||||
check-exported = true
|
||||
|
||||
[[issues.exclude-rules]]
|
||||
linters = [
|
||||
"govet"
|
||||
]
|
||||
# we want to enable almost all govet rules. It is easier to just filter out
|
||||
# the ones we don't want:
|
||||
#
|
||||
# * fieldalignment - way too noisy. Although it is very useful in particular
|
||||
# cases where we are trying to use as little memory as possible, having
|
||||
# it go off on every struct isn't helpful.
|
||||
# * shadow - although often useful, it complains about _many_ err
|
||||
# shadowing assignments and some others where shadowing is clear.
|
||||
text = "^(fieldalignment|shadow)"
|
@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
@ -0,0 +1,93 @@
|
||||
# GeoIP2 Reader for Go #
|
||||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/oschwald/geoip2-golang)](https://pkg.go.dev/github.com/oschwald/geoip2-golang)
|
||||
|
||||
This library reads MaxMind [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/)
|
||||
and [GeoIP2](http://www.maxmind.com/en/geolocation_landing) databases.
|
||||
|
||||
This library is built using
|
||||
[the Go maxminddb reader](https://github.com/oschwald/maxminddb-golang).
|
||||
All data for the database record is decoded using this library. If you only
|
||||
need several fields, you may get superior performance by using maxminddb's
|
||||
`Lookup` directly with a result struct that only contains the required fields.
|
||||
(See [example_test.go](https://github.com/oschwald/maxminddb-golang/blob/main/example_test.go)
|
||||
in the maxminddb repository for an example of this.)
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/geoip2-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/geoip2-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Example ##
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := geoip2.Open("GeoIP2-City.mmdb")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
// If you are using strings that may be invalid, check that ip is not nil
|
||||
ip := net.ParseIP("81.2.69.142")
|
||||
record, err := db.City(ip)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Printf("Portuguese (BR) city name: %v\n", record.City.Names["pt-BR"])
|
||||
if len(record.Subdivisions) > 0 {
|
||||
fmt.Printf("English subdivision name: %v\n", record.Subdivisions[0].Names["en"])
|
||||
}
|
||||
fmt.Printf("Russian country name: %v\n", record.Country.Names["ru"])
|
||||
fmt.Printf("ISO country code: %v\n", record.Country.IsoCode)
|
||||
fmt.Printf("Time zone: %v\n", record.Location.TimeZone)
|
||||
fmt.Printf("Coordinates: %v, %v\n", record.Location.Latitude, record.Location.Longitude)
|
||||
// Output:
|
||||
// Portuguese (BR) city name: Londres
|
||||
// English subdivision name: England
|
||||
// Russian country name: Великобритания
|
||||
// ISO country code: GB
|
||||
// Time zone: Europe/London
|
||||
// Coordinates: 51.5142, -0.0931
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Testing ##
|
||||
|
||||
Make sure you checked out test data submodule:
|
||||
|
||||
```
|
||||
git submodule init
|
||||
git submodule update
|
||||
```
|
||||
|
||||
Execute test suite:
|
||||
|
||||
```
|
||||
go test
|
||||
```
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC license.
|
@ -0,0 +1,418 @@
|
||||
// Package geoip2 provides an easy-to-use API for the MaxMind GeoIP2 and
|
||||
// GeoLite2 databases; this package does not support GeoIP Legacy databases.
|
||||
//
|
||||
// The structs provided by this package match the internal structure of
|
||||
// the data in the MaxMind databases.
|
||||
//
|
||||
// See github.com/oschwald/maxminddb-golang for more advanced used cases.
|
||||
package geoip2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
)
|
||||
|
||||
// The Enterprise struct corresponds to the data in the GeoIP2 Enterprise
|
||||
// database.
|
||||
type Enterprise struct {
|
||||
City struct {
|
||||
Confidence uint8 `maxminddb:"confidence"`
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"city"`
|
||||
Continent struct {
|
||||
Code string `maxminddb:"code"`
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"continent"`
|
||||
Country struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
Confidence uint8 `maxminddb:"confidence"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
} `maxminddb:"country"`
|
||||
Location struct {
|
||||
AccuracyRadius uint16 `maxminddb:"accuracy_radius"`
|
||||
Latitude float64 `maxminddb:"latitude"`
|
||||
Longitude float64 `maxminddb:"longitude"`
|
||||
MetroCode uint `maxminddb:"metro_code"`
|
||||
TimeZone string `maxminddb:"time_zone"`
|
||||
} `maxminddb:"location"`
|
||||
Postal struct {
|
||||
Code string `maxminddb:"code"`
|
||||
Confidence uint8 `maxminddb:"confidence"`
|
||||
} `maxminddb:"postal"`
|
||||
RegisteredCountry struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
Confidence uint8 `maxminddb:"confidence"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
} `maxminddb:"registered_country"`
|
||||
RepresentedCountry struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
Type string `maxminddb:"type"`
|
||||
} `maxminddb:"represented_country"`
|
||||
Subdivisions []struct {
|
||||
Confidence uint8 `maxminddb:"confidence"`
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"subdivisions"`
|
||||
Traits struct {
|
||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
|
||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
|
||||
ConnectionType string `maxminddb:"connection_type"`
|
||||
Domain string `maxminddb:"domain"`
|
||||
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
|
||||
IsLegitimateProxy bool `maxminddb:"is_legitimate_proxy"`
|
||||
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
|
||||
ISP string `maxminddb:"isp"`
|
||||
MobileCountryCode string `maxminddb:"mobile_country_code"`
|
||||
MobileNetworkCode string `maxminddb:"mobile_network_code"`
|
||||
Organization string `maxminddb:"organization"`
|
||||
StaticIPScore float64 `maxminddb:"static_ip_score"`
|
||||
UserType string `maxminddb:"user_type"`
|
||||
} `maxminddb:"traits"`
|
||||
}
|
||||
|
||||
// The City struct corresponds to the data in the GeoIP2/GeoLite2 City
|
||||
// databases.
|
||||
type City struct {
|
||||
City struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"city"`
|
||||
Continent struct {
|
||||
Code string `maxminddb:"code"`
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"continent"`
|
||||
Country struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"country"`
|
||||
Location struct {
|
||||
AccuracyRadius uint16 `maxminddb:"accuracy_radius"`
|
||||
Latitude float64 `maxminddb:"latitude"`
|
||||
Longitude float64 `maxminddb:"longitude"`
|
||||
MetroCode uint `maxminddb:"metro_code"`
|
||||
TimeZone string `maxminddb:"time_zone"`
|
||||
} `maxminddb:"location"`
|
||||
Postal struct {
|
||||
Code string `maxminddb:"code"`
|
||||
} `maxminddb:"postal"`
|
||||
RegisteredCountry struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"registered_country"`
|
||||
RepresentedCountry struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
Type string `maxminddb:"type"`
|
||||
} `maxminddb:"represented_country"`
|
||||
Subdivisions []struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"subdivisions"`
|
||||
Traits struct {
|
||||
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
|
||||
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
|
||||
} `maxminddb:"traits"`
|
||||
}
|
||||
|
||||
// The Country struct corresponds to the data in the GeoIP2/GeoLite2
|
||||
// Country databases.
|
||||
type Country struct {
|
||||
Continent struct {
|
||||
Code string `maxminddb:"code"`
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"continent"`
|
||||
Country struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"country"`
|
||||
RegisteredCountry struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
} `maxminddb:"registered_country"`
|
||||
RepresentedCountry struct {
|
||||
GeoNameID uint `maxminddb:"geoname_id"`
|
||||
IsInEuropeanUnion bool `maxminddb:"is_in_european_union"`
|
||||
IsoCode string `maxminddb:"iso_code"`
|
||||
Names map[string]string `maxminddb:"names"`
|
||||
Type string `maxminddb:"type"`
|
||||
} `maxminddb:"represented_country"`
|
||||
Traits struct {
|
||||
IsAnonymousProxy bool `maxminddb:"is_anonymous_proxy"`
|
||||
IsSatelliteProvider bool `maxminddb:"is_satellite_provider"`
|
||||
} `maxminddb:"traits"`
|
||||
}
|
||||
|
||||
// The AnonymousIP struct corresponds to the data in the GeoIP2
|
||||
// Anonymous IP database.
|
||||
type AnonymousIP struct {
|
||||
IsAnonymous bool `maxminddb:"is_anonymous"`
|
||||
IsAnonymousVPN bool `maxminddb:"is_anonymous_vpn"`
|
||||
IsHostingProvider bool `maxminddb:"is_hosting_provider"`
|
||||
IsPublicProxy bool `maxminddb:"is_public_proxy"`
|
||||
IsResidentialProxy bool `maxminddb:"is_residential_proxy"`
|
||||
IsTorExitNode bool `maxminddb:"is_tor_exit_node"`
|
||||
}
|
||||
|
||||
// The ASN struct corresponds to the data in the GeoLite2 ASN database.
|
||||
type ASN struct {
|
||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
|
||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
|
||||
}
|
||||
|
||||
// The ConnectionType struct corresponds to the data in the GeoIP2
|
||||
// Connection-Type database.
|
||||
type ConnectionType struct {
|
||||
ConnectionType string `maxminddb:"connection_type"`
|
||||
}
|
||||
|
||||
// The Domain struct corresponds to the data in the GeoIP2 Domain database.
|
||||
type Domain struct {
|
||||
Domain string `maxminddb:"domain"`
|
||||
}
|
||||
|
||||
// The ISP struct corresponds to the data in the GeoIP2 ISP database.
|
||||
type ISP struct {
|
||||
AutonomousSystemNumber uint `maxminddb:"autonomous_system_number"`
|
||||
AutonomousSystemOrganization string `maxminddb:"autonomous_system_organization"`
|
||||
ISP string `maxminddb:"isp"`
|
||||
MobileCountryCode string `maxminddb:"mobile_country_code"`
|
||||
MobileNetworkCode string `maxminddb:"mobile_network_code"`
|
||||
Organization string `maxminddb:"organization"`
|
||||
}
|
||||
|
||||
type databaseType int
|
||||
|
||||
const (
|
||||
isAnonymousIP = 1 << iota
|
||||
isASN
|
||||
isCity
|
||||
isConnectionType
|
||||
isCountry
|
||||
isDomain
|
||||
isEnterprise
|
||||
isISP
|
||||
)
|
||||
|
||||
// Reader holds the maxminddb.Reader struct. It can be created using the
|
||||
// Open and FromBytes functions.
|
||||
type Reader struct {
|
||||
mmdbReader *maxminddb.Reader
|
||||
databaseType databaseType
|
||||
}
|
||||
|
||||
// InvalidMethodError is returned when a lookup method is called on a
|
||||
// database that it does not support. For instance, calling the ISP method
|
||||
// on a City database.
|
||||
type InvalidMethodError struct {
|
||||
Method string
|
||||
DatabaseType string
|
||||
}
|
||||
|
||||
func (e InvalidMethodError) Error() string {
|
||||
return fmt.Sprintf(`geoip2: the %s method does not support the %s database`,
|
||||
e.Method, e.DatabaseType)
|
||||
}
|
||||
|
||||
// UnknownDatabaseTypeError is returned when an unknown database type is
|
||||
// opened.
|
||||
type UnknownDatabaseTypeError struct {
|
||||
DatabaseType string
|
||||
}
|
||||
|
||||
func (e UnknownDatabaseTypeError) Error() string {
|
||||
return fmt.Sprintf(`geoip2: reader does not support the %q database type`,
|
||||
e.DatabaseType)
|
||||
}
|
||||
|
||||
// Open takes a string path to a file and returns a Reader struct or an error.
|
||||
// The database file is opened using a memory map. Use the Close method on the
|
||||
// Reader object to return the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
reader, err := maxminddb.Open(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbType, err := getDBType(reader)
|
||||
return &Reader{reader, dbType}, err
|
||||
}
|
||||
|
||||
// FromBytes takes a byte slice corresponding to a GeoIP2/GeoLite2 database
|
||||
// file and returns a Reader struct or an error. Note that the byte slice is
|
||||
// used directly; any modification of it after opening the database will result
|
||||
// in errors while reading from the database.
|
||||
func FromBytes(bytes []byte) (*Reader, error) {
|
||||
reader, err := maxminddb.FromBytes(bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dbType, err := getDBType(reader)
|
||||
return &Reader{reader, dbType}, err
|
||||
}
|
||||
|
||||
func getDBType(reader *maxminddb.Reader) (databaseType, error) {
|
||||
switch reader.Metadata.DatabaseType {
|
||||
case "GeoIP2-Anonymous-IP":
|
||||
return isAnonymousIP, nil
|
||||
case "DBIP-ASN-Lite (compat=GeoLite2-ASN)",
|
||||
"GeoLite2-ASN":
|
||||
return isASN, nil
|
||||
// We allow City lookups on Country for back compat
|
||||
case "DBIP-City-Lite",
|
||||
"DBIP-Country-Lite",
|
||||
"DBIP-Country",
|
||||
"DBIP-Location (compat=City)",
|
||||
"GeoLite2-City",
|
||||
"GeoIP2-City",
|
||||
"GeoIP2-City-Africa",
|
||||
"GeoIP2-City-Asia-Pacific",
|
||||
"GeoIP2-City-Europe",
|
||||
"GeoIP2-City-North-America",
|
||||
"GeoIP2-City-South-America",
|
||||
"GeoIP2-Precision-City",
|
||||
"GeoLite2-Country",
|
||||
"GeoIP2-Country":
|
||||
return isCity | isCountry, nil
|
||||
case "GeoIP2-Connection-Type":
|
||||
return isConnectionType, nil
|
||||
case "GeoIP2-Domain":
|
||||
return isDomain, nil
|
||||
case "DBIP-ISP (compat=Enterprise)",
|
||||
"DBIP-Location-ISP (compat=Enterprise)",
|
||||
"GeoIP2-Enterprise":
|
||||
return isEnterprise | isCity | isCountry, nil
|
||||
case "GeoIP2-ISP",
|
||||
"GeoIP2-Precision-ISP":
|
||||
return isISP | isASN, nil
|
||||
default:
|
||||
return 0, UnknownDatabaseTypeError{reader.Metadata.DatabaseType}
|
||||
}
|
||||
}
|
||||
|
||||
// Enterprise takes an IP address as a net.IP struct and returns an Enterprise
|
||||
// struct and/or an error. This is intended to be used with the GeoIP2
|
||||
// Enterprise database.
|
||||
func (r *Reader) Enterprise(ipAddress net.IP) (*Enterprise, error) {
|
||||
if isEnterprise&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"Enterprise", r.Metadata().DatabaseType}
|
||||
}
|
||||
var enterprise Enterprise
|
||||
err := r.mmdbReader.Lookup(ipAddress, &enterprise)
|
||||
return &enterprise, err
|
||||
}
|
||||
|
||||
// City takes an IP address as a net.IP struct and returns a City struct
|
||||
// and/or an error. Although this can be used with other databases, this
|
||||
// method generally should be used with the GeoIP2 or GeoLite2 City databases.
|
||||
func (r *Reader) City(ipAddress net.IP) (*City, error) {
|
||||
if isCity&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"City", r.Metadata().DatabaseType}
|
||||
}
|
||||
var city City
|
||||
err := r.mmdbReader.Lookup(ipAddress, &city)
|
||||
return &city, err
|
||||
}
|
||||
|
||||
// Country takes an IP address as a net.IP struct and returns a Country struct
|
||||
// and/or an error. Although this can be used with other databases, this
|
||||
// method generally should be used with the GeoIP2 or GeoLite2 Country
|
||||
// databases.
|
||||
func (r *Reader) Country(ipAddress net.IP) (*Country, error) {
|
||||
if isCountry&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"Country", r.Metadata().DatabaseType}
|
||||
}
|
||||
var country Country
|
||||
err := r.mmdbReader.Lookup(ipAddress, &country)
|
||||
return &country, err
|
||||
}
|
||||
|
||||
// AnonymousIP takes an IP address as a net.IP struct and returns a
|
||||
// AnonymousIP struct and/or an error.
|
||||
func (r *Reader) AnonymousIP(ipAddress net.IP) (*AnonymousIP, error) {
|
||||
if isAnonymousIP&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"AnonymousIP", r.Metadata().DatabaseType}
|
||||
}
|
||||
var anonIP AnonymousIP
|
||||
err := r.mmdbReader.Lookup(ipAddress, &anonIP)
|
||||
return &anonIP, err
|
||||
}
|
||||
|
||||
// ASN takes an IP address as a net.IP struct and returns a ASN struct and/or
|
||||
// an error.
|
||||
func (r *Reader) ASN(ipAddress net.IP) (*ASN, error) {
|
||||
if isASN&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"ASN", r.Metadata().DatabaseType}
|
||||
}
|
||||
var val ASN
|
||||
err := r.mmdbReader.Lookup(ipAddress, &val)
|
||||
return &val, err
|
||||
}
|
||||
|
||||
// ConnectionType takes an IP address as a net.IP struct and returns a
|
||||
// ConnectionType struct and/or an error.
|
||||
func (r *Reader) ConnectionType(ipAddress net.IP) (*ConnectionType, error) {
|
||||
if isConnectionType&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"ConnectionType", r.Metadata().DatabaseType}
|
||||
}
|
||||
var val ConnectionType
|
||||
err := r.mmdbReader.Lookup(ipAddress, &val)
|
||||
return &val, err
|
||||
}
|
||||
|
||||
// Domain takes an IP address as a net.IP struct and returns a
|
||||
// Domain struct and/or an error.
|
||||
func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) {
|
||||
if isDomain&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"Domain", r.Metadata().DatabaseType}
|
||||
}
|
||||
var val Domain
|
||||
err := r.mmdbReader.Lookup(ipAddress, &val)
|
||||
return &val, err
|
||||
}
|
||||
|
||||
// ISP takes an IP address as a net.IP struct and returns a ISP struct and/or
|
||||
// an error.
|
||||
func (r *Reader) ISP(ipAddress net.IP) (*ISP, error) {
|
||||
if isISP&r.databaseType == 0 {
|
||||
return nil, InvalidMethodError{"ISP", r.Metadata().DatabaseType}
|
||||
}
|
||||
var val ISP
|
||||
err := r.mmdbReader.Lookup(ipAddress, &val)
|
||||
return &val, err
|
||||
}
|
||||
|
||||
// Metadata takes no arguments and returns a struct containing metadata about
|
||||
// the MaxMind database in use by the Reader.
|
||||
func (r *Reader) Metadata() maxminddb.Metadata {
|
||||
return r.mmdbReader.Metadata
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system.
|
||||
func (r *Reader) Close() error {
|
||||
return r.mmdbReader.Close()
|
||||
}
|
@ -0,0 +1,4 @@
|
||||
.vscode
|
||||
*.out
|
||||
*.sw?
|
||||
*.test
|
@ -0,0 +1,3 @@
|
||||
[submodule "test-data"]
|
||||
path = test-data
|
||||
url = https://github.com/maxmind/MaxMind-DB.git
|
@ -0,0 +1,472 @@
|
||||
[run]
|
||||
deadline = "10m"
|
||||
|
||||
tests = true
|
||||
|
||||
[linters]
|
||||
disable-all = true
|
||||
enable = [
|
||||
"asciicheck",
|
||||
"bidichk",
|
||||
"bodyclose",
|
||||
"containedctx",
|
||||
"contextcheck",
|
||||
"deadcode",
|
||||
"depguard",
|
||||
"durationcheck",
|
||||
"errcheck",
|
||||
"errchkjson",
|
||||
"errname",
|
||||
"errorlint",
|
||||
"exportloopref",
|
||||
"forbidigo",
|
||||
#"forcetypeassert",
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"gocritic",
|
||||
"godot",
|
||||
"gofumpt",
|
||||
"gomodguard",
|
||||
"gosec",
|
||||
"gosimple",
|
||||
"govet",
|
||||
"grouper",
|
||||
"ineffassign",
|
||||
"lll",
|
||||
"makezero",
|
||||
"maintidx",
|
||||
"misspell",
|
||||
"nakedret",
|
||||
"nilerr",
|
||||
"noctx",
|
||||
"nolintlint",
|
||||
"nosprintfhostport",
|
||||
"predeclared",
|
||||
"revive",
|
||||
"rowserrcheck",
|
||||
"sqlclosecheck",
|
||||
"staticcheck",
|
||||
"structcheck",
|
||||
"stylecheck",
|
||||
"tenv",
|
||||
"tparallel",
|
||||
"typecheck",
|
||||
"unconvert",
|
||||
"unparam",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"vetshadow",
|
||||
"wastedassign",
|
||||
]
|
||||
|
||||
# Please note that we only use depguard for stdlib as gomodguard only
|
||||
# supports modules currently. See https://github.com/ryancurrah/gomodguard/issues/12
|
||||
[linters-settings.depguard]
|
||||
list-type = "blacklist"
|
||||
include-go-root = true
|
||||
packages = [
|
||||
# ioutil is deprecated. The functions have been moved elsewhere:
|
||||
# https://golang.org/doc/go1.16#ioutil
|
||||
"io/ioutil",
|
||||
]
|
||||
|
||||
[linters-settings.errcheck]
|
||||
# Don't allow setting of error to the blank identifier. If there is a legtimate
|
||||
# reason, there should be a nolint with an explanation.
|
||||
check-blank = true
|
||||
|
||||
exclude-functions = [
|
||||
# If we are rolling back a transaction, we are often already in an error
|
||||
# state.
|
||||
'(*database/sql.Tx).Rollback',
|
||||
|
||||
# It is reasonable to ignore errors if Cleanup fails in most cases.
|
||||
'(*github.com/google/renameio/v2.PendingFile).Cleanup',
|
||||
|
||||
# We often don't care if removing a file failed (e.g., it doesn't exist)
|
||||
'os.Remove',
|
||||
'os.RemoveAll',
|
||||
]
|
||||
|
||||
# Ignoring Close so that we don't have to have a bunch of
|
||||
# `defer func() { _ = r.Close() }()` constructs when we
|
||||
# don't actually care about the error.
|
||||
ignore = "Close,fmt:.*"
|
||||
|
||||
[linters-settings.errorlint]
|
||||
errorf = true
|
||||
asserts = true
|
||||
comparison = true
|
||||
|
||||
[linters-settings.exhaustive]
|
||||
default-signifies-exhaustive = true
|
||||
|
||||
[linters-settings.forbidigo]
|
||||
# Forbid the following identifiers
|
||||
forbid = [
|
||||
"^minFraud*",
|
||||
"^maxMind*",
|
||||
]
|
||||
|
||||
[linters-settings.gocritic]
|
||||
enabled-checks = [
|
||||
"appendAssign",
|
||||
"appendCombine",
|
||||
"argOrder",
|
||||
"assignOp",
|
||||
"badCall",
|
||||
"badCond",
|
||||
"badLock",
|
||||
"badRegexp",
|
||||
"badSorting",
|
||||
"boolExprSimplify",
|
||||
"builtinShadow",
|
||||
"builtinShadowDecl",
|
||||
"captLocal",
|
||||
"caseOrder",
|
||||
"codegenComment",
|
||||
"commentedOutCode",
|
||||
"commentedOutImport",
|
||||
"commentFormatting",
|
||||
"defaultCaseOrder",
|
||||
# Revive's defer rule already captures this. This caught no extra cases.
|
||||
# "deferInLoop",
|
||||
"deferUnlambda",
|
||||
"deprecatedComment",
|
||||
"docStub",
|
||||
"dupArg",
|
||||
"dupBranchBody",
|
||||
"dupCase",
|
||||
"dupImport",
|
||||
"dupSubExpr",
|
||||
"dynamicFmtString",
|
||||
"elseif",
|
||||
"emptyDecl",
|
||||
"emptyFallthrough",
|
||||
"emptyStringTest",
|
||||
"equalFold",
|
||||
"evalOrder",
|
||||
"exitAfterDefer",
|
||||
"exposedSyncMutex",
|
||||
"externalErrorReassign",
|
||||
# Given that all of our code runs on Linux and the / separate should
|
||||
# work fine, this seems less important.
|
||||
# "filepathJoin",
|
||||
"flagDeref",
|
||||
"flagName",
|
||||
"hexLiteral",
|
||||
"ifElseChain",
|
||||
"importShadow",
|
||||
"indexAlloc",
|
||||
"initClause",
|
||||
"ioutilDeprecated",
|
||||
"mapKey",
|
||||
"methodExprCall",
|
||||
"nestingReduce",
|
||||
"newDeref",
|
||||
"nilValReturn",
|
||||
"octalLiteral",
|
||||
"offBy1",
|
||||
"paramTypeCombine",
|
||||
"preferDecodeRune",
|
||||
"preferFilepathJoin",
|
||||
"preferFprint",
|
||||
"preferStringWriter",
|
||||
"preferWriteByte",
|
||||
"ptrToRefParam",
|
||||
"rangeExprCopy",
|
||||
"rangeValCopy",
|
||||
"redundantSprint",
|
||||
"regexpMust",
|
||||
"regexpPattern",
|
||||
# This might be good, but I don't think we want to encourage
|
||||
# significant changes to regexes as we port stuff from Perl.
|
||||
# "regexpSimplify",
|
||||
"ruleguard",
|
||||
"singleCaseSwitch",
|
||||
"sliceClear",
|
||||
"sloppyLen",
|
||||
# This seems like it might also be good, but a lot of existing code
|
||||
# fails.
|
||||
# "sloppyReassign",
|
||||
"returnAfterHttpError",
|
||||
"sloppyTypeAssert",
|
||||
"sortSlice",
|
||||
"sprintfQuotedString",
|
||||
"sqlQuery",
|
||||
"stringsCompare",
|
||||
"stringXbytes",
|
||||
"switchTrue",
|
||||
"syncMapLoadAndDelete",
|
||||
"timeExprSimplify",
|
||||
"todoCommentWithoutDetail",
|
||||
"tooManyResultsChecker",
|
||||
"truncateCmp",
|
||||
"typeAssertChain",
|
||||
"typeDefFirst",
|
||||
"typeSwitchVar",
|
||||
"typeUnparen",
|
||||
"underef",
|
||||
"unlabelStmt",
|
||||
"unlambda",
|
||||
# I am not sure we would want this linter and a lot of existing
|
||||
# code fails.
|
||||
# "unnamedResult",
|
||||
"unnecessaryBlock",
|
||||
"unnecessaryDefer",
|
||||
"unslice",
|
||||
"valSwap",
|
||||
"weakCond",
|
||||
"wrapperFunc",
|
||||
"yodaStyleExpr",
|
||||
# This requires explanations for "nolint" directives. This would be
|
||||
# nice for gosec ones, but I am not sure we want it generally unless
|
||||
# we can get the false positive rate lower.
|
||||
# "whyNoLint"
|
||||
]
|
||||
|
||||
[linters-settings.gofumpt]
|
||||
extra-rules = true
|
||||
lang-version = "1.18"
|
||||
|
||||
[linters-settings.govet]
|
||||
"enable-all" = true
|
||||
|
||||
[linters-settings.lll]
|
||||
line-length = 120
|
||||
tab-width = 4
|
||||
|
||||
[linters-settings.nolintlint]
|
||||
allow-leading-space = false
|
||||
allow-unused = false
|
||||
allow-no-explanation = ["lll", "misspell"]
|
||||
require-explanation = true
|
||||
require-specific = true
|
||||
|
||||
[linters-settings.revive]
|
||||
ignore-generated-header = true
|
||||
severity = "warning"
|
||||
|
||||
# This might be nice but it is so common that it is hard
|
||||
# to enable.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "add-constant"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "argument-limit"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "atomic"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "bare-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "blank-imports"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "bool-literal-in-expr"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "call-to-gc"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "cognitive-complexity"
|
||||
|
||||
# Probably a good rule, but we have a lot of names that
|
||||
# only have case differences.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "confusing-naming"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "confusing-results"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "constant-logical-expr"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "context-as-argument"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "context-keys-type"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "cyclomatic"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "deep-exit"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "defer"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "dot-imports"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "duplicated-imports"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "early-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "empty-block"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "empty-lines"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "errorf"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "error-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "error-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "error-strings"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "exported"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "file-header"
|
||||
|
||||
# We have a lot of flag parameters. This linter probably makes
|
||||
# a good point, but we would need some cleanup or a lot of nolints.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "flag-parameter"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "function-result-limit"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "get-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "identical-branches"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "if-return"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "imports-blacklist"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "import-shadowing"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "increment-decrement"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "indent-error-flow"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "line-length-limit"
|
||||
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "max-public-structs"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "modifies-parameter"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "modifies-value-receiver"
|
||||
|
||||
# We frequently use nested structs, particularly in tests.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "nested-structs"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "optimize-operands-order"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "package-comments"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "range"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "range-val-address"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "range-val-in-closure"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "receiver-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "redefines-builtin-id"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "string-of-int"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "struct-tag"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "superfluous-else"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "time-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unconditional-recursion"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unexported-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unexported-return"
|
||||
|
||||
# This is covered elsewhere and we want to ignore some
|
||||
# functions such as fmt.Fprintf.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "unhandled-error"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unnecessary-stmt"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unreachable-code"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "unused-parameter"
|
||||
|
||||
# We generally have unused receivers in tests for meeting the
|
||||
# requirements of an interface.
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "unused-receiver"
|
||||
|
||||
# This probably makes sense after we upgrade to 1.18
|
||||
# [[linters-settings.revive.rules]]
|
||||
# name = "use-any"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "useless-break"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "var-declaration"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "var-naming"
|
||||
|
||||
[[linters-settings.revive.rules]]
|
||||
name = "waitgroup-by-value"
|
||||
|
||||
[linters-settings.unparam]
|
||||
check-exported = true
|
||||
|
||||
[[issues.exclude-rules]]
|
||||
linters = [
|
||||
"govet"
|
||||
]
|
||||
# we want to enable almost all govet rules. It is easier to just filter out
|
||||
# the ones we don't want:
|
||||
#
|
||||
# * fieldalignment - way too noisy. Although it is very useful in particular
|
||||
# cases where we are trying to use as little memory as possible, having
|
||||
# it go off on every struct isn't helpful.
|
||||
# * shadow - although often useful, it complains about _many_ err
|
||||
# shadowing assignments and some others where shadowing is clear.
|
||||
text = "^(fieldalignment|shadow)"
|
@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2015, Gregory J. Oschwald <oschwald@gmail.com>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
||||
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
||||
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
||||
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THIS SOFTWARE.
|
@ -0,0 +1,36 @@
|
||||
# MaxMind DB Reader for Go #
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/oschwald/maxminddb-golang?status.svg)](https://godoc.org/github.com/oschwald/maxminddb-golang)
|
||||
|
||||
This is a Go reader for the MaxMind DB format. Although this can be used to
|
||||
read [GeoLite2](http://dev.maxmind.com/geoip/geoip2/geolite2/) and
|
||||
[GeoIP2](https://www.maxmind.com/en/geoip2-databases) databases,
|
||||
[geoip2](https://github.com/oschwald/geoip2-golang) provides a higher-level
|
||||
API for doing so.
|
||||
|
||||
This is not an official MaxMind API.
|
||||
|
||||
## Installation ##
|
||||
|
||||
```
|
||||
go get github.com/oschwald/maxminddb-golang
|
||||
```
|
||||
|
||||
## Usage ##
|
||||
|
||||
[See GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) for
|
||||
documentation and examples.
|
||||
|
||||
## Examples ##
|
||||
|
||||
See [GoDoc](http://godoc.org/github.com/oschwald/maxminddb-golang) or
|
||||
`example_test.go` for examples.
|
||||
|
||||
## Contributing ##
|
||||
|
||||
Contributions welcome! Please fork the repository and open a pull request
|
||||
with your changes.
|
||||
|
||||
## License ##
|
||||
|
||||
This is free software, licensed under the ISC License.
|
@ -0,0 +1,897 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type decoder struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
type dataType int
|
||||
|
||||
const (
|
||||
_Extended dataType = iota
|
||||
_Pointer
|
||||
_String
|
||||
_Float64
|
||||
_Bytes
|
||||
_Uint16
|
||||
_Uint32
|
||||
_Map
|
||||
_Int32
|
||||
_Uint64
|
||||
_Uint128
|
||||
_Slice
|
||||
// We don't use the next two. They are placeholders. See the spec
|
||||
// for more details.
|
||||
_Container //nolint: deadcode, varcheck // above
|
||||
_Marker //nolint: deadcode, varcheck // above
|
||||
_Bool
|
||||
_Float32
|
||||
)
|
||||
|
||||
const (
|
||||
// This is the value used in libmaxminddb.
|
||||
maximumDataStructureDepth = 512
|
||||
)
|
||||
|
||||
func (d *decoder) decode(offset uint, result reflect.Value, depth int) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"exceeded maximum data structure depth; database is likely corrupt",
|
||||
)
|
||||
}
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if typeNum != _Pointer && result.Kind() == reflect.Uintptr {
|
||||
result.Set(reflect.ValueOf(uintptr(offset)))
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return d.decodeFromType(typeNum, size, newOffset, result, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeToDeserializer(
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
getNext bool,
|
||||
) (uint, error) {
|
||||
if depth > maximumDataStructureDepth {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"exceeded maximum data structure depth; database is likely corrupt",
|
||||
)
|
||||
}
|
||||
skip, err := dser.ShouldSkip(uintptr(offset))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if skip {
|
||||
if getNext {
|
||||
return d.nextValueOffset(offset, 1)
|
||||
}
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
typeNum, size, newOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return d.decodeFromTypeToDeserializer(typeNum, size, newOffset, dser, depth+1)
|
||||
}
|
||||
|
||||
func (d *decoder) decodeCtrlData(offset uint) (dataType, uint, uint, error) {
|
||||
newOffset := offset + 1
|
||||
if offset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
ctrlByte := d.buffer[offset]
|
||||
|
||||
typeNum := dataType(ctrlByte >> 5)
|
||||
if typeNum == _Extended {
|
||||
if newOffset >= uint(len(d.buffer)) {
|
||||
return 0, 0, 0, newOffsetError()
|
||||
}
|
||||
typeNum = dataType(d.buffer[newOffset] + 7)
|
||||
newOffset++
|
||||
}
|
||||
|
||||
var size uint
|
||||
size, newOffset, err := d.sizeFromCtrlByte(ctrlByte, newOffset, typeNum)
|
||||
return typeNum, size, newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) sizeFromCtrlByte(
|
||||
ctrlByte byte,
|
||||
offset uint,
|
||||
typeNum dataType,
|
||||
) (uint, uint, error) {
|
||||
size := uint(ctrlByte & 0x1f)
|
||||
if typeNum == _Extended {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
var bytesToRead uint
|
||||
if size < 29 {
|
||||
return size, offset, nil
|
||||
}
|
||||
|
||||
bytesToRead = size - 28
|
||||
newOffset := offset + bytesToRead
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
if size == 29 {
|
||||
return 29 + uint(d.buffer[offset]), offset + 1, nil
|
||||
}
|
||||
|
||||
sizeBytes := d.buffer[offset:newOffset]
|
||||
|
||||
switch {
|
||||
case size == 30:
|
||||
size = 285 + uintFromBytes(0, sizeBytes)
|
||||
case size > 30:
|
||||
size = uintFromBytes(0, sizeBytes) + 65821
|
||||
}
|
||||
return size, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromType(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = d.indirect(result)
|
||||
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
return d.unmarshalBool(size, offset, result)
|
||||
case _Map:
|
||||
return d.unmarshalMap(size, offset, result, depth)
|
||||
case _Pointer:
|
||||
return d.unmarshalPointer(size, offset, result, depth)
|
||||
case _Slice:
|
||||
return d.unmarshalSlice(size, offset, result, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
return d.unmarshalBytes(size, offset, result)
|
||||
case _Float32:
|
||||
return d.unmarshalFloat32(size, offset, result)
|
||||
case _Float64:
|
||||
return d.unmarshalFloat64(size, offset, result)
|
||||
case _Int32:
|
||||
return d.unmarshalInt32(size, offset, result)
|
||||
case _String:
|
||||
return d.unmarshalString(size, offset, result)
|
||||
case _Uint16:
|
||||
return d.unmarshalUint(size, offset, result, 16)
|
||||
case _Uint32:
|
||||
return d.unmarshalUint(size, offset, result, 32)
|
||||
case _Uint64:
|
||||
return d.unmarshalUint(size, offset, result, 64)
|
||||
case _Uint128:
|
||||
return d.unmarshalUint128(size, offset, result)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFromTypeToDeserializer(
|
||||
dtype dataType,
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
// For these types, size has a special meaning
|
||||
switch dtype {
|
||||
case _Bool:
|
||||
v, offset := d.decodeBool(size, offset)
|
||||
return offset, dser.Bool(v)
|
||||
case _Map:
|
||||
return d.decodeMapToDeserializer(size, offset, dser, depth)
|
||||
case _Pointer:
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decodeToDeserializer(pointer, dser, depth, false)
|
||||
return newOffset, err
|
||||
case _Slice:
|
||||
return d.decodeSliceToDeserializer(size, offset, dser, depth)
|
||||
}
|
||||
|
||||
// For the remaining types, size is the byte size
|
||||
if offset+size > uint(len(d.buffer)) {
|
||||
return 0, newOffsetError()
|
||||
}
|
||||
switch dtype {
|
||||
case _Bytes:
|
||||
v, offset := d.decodeBytes(size, offset)
|
||||
return offset, dser.Bytes(v)
|
||||
case _Float32:
|
||||
v, offset := d.decodeFloat32(size, offset)
|
||||
return offset, dser.Float32(v)
|
||||
case _Float64:
|
||||
v, offset := d.decodeFloat64(size, offset)
|
||||
return offset, dser.Float64(v)
|
||||
case _Int32:
|
||||
v, offset := d.decodeInt(size, offset)
|
||||
return offset, dser.Int32(int32(v))
|
||||
case _String:
|
||||
v, offset := d.decodeString(size, offset)
|
||||
return offset, dser.String(v)
|
||||
case _Uint16:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint16(uint16(v))
|
||||
case _Uint32:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint32(uint32(v))
|
||||
case _Uint64:
|
||||
v, offset := d.decodeUint(size, offset)
|
||||
return offset, dser.Uint64(v)
|
||||
case _Uint128:
|
||||
v, offset := d.decodeUint128(size, offset)
|
||||
return offset, dser.Uint128(v)
|
||||
default:
|
||||
return 0, newInvalidDatabaseError("unknown type: %d", dtype)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalBool(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 1 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (bool size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeBool(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Bool:
|
||||
result.SetBool(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
// indirect follows pointers and create values as necessary. This is
|
||||
// heavily based on encoding/json as my original version had a subtle
|
||||
// bug. This method should be considered to be licensed under
|
||||
// https://golang.org/LICENSE
|
||||
func (d *decoder) indirect(result reflect.Value) reflect.Value {
|
||||
for {
|
||||
// Load value from interface, but only if the result will be
|
||||
// usefully addressable.
|
||||
if result.Kind() == reflect.Interface && !result.IsNil() {
|
||||
e := result.Elem()
|
||||
if e.Kind() == reflect.Ptr && !e.IsNil() {
|
||||
result = e
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if result.Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.New(result.Type().Elem()))
|
||||
}
|
||||
|
||||
result = result.Elem()
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var sliceType = reflect.TypeOf([]byte{})
|
||||
|
||||
func (d *decoder) unmarshalBytes(size, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset := d.decodeBytes(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
if result.Type() == sliceType {
|
||||
result.SetBytes(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat32(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 4 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (float32 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeFloat32(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
result.SetFloat(float64(value))
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalFloat64(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size != 8 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (float 64 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeFloat64(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if result.OverflowFloat(value) {
|
||||
return 0, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
result.SetFloat(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalInt32(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 4 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (int32 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeInt(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Uintptr:
|
||||
n := uint64(value)
|
||||
if !result.OverflowUint(n) {
|
||||
result.SetUint(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result = d.indirect(result)
|
||||
switch result.Kind() {
|
||||
default:
|
||||
return 0, newUnmarshalTypeError("map", result.Type())
|
||||
case reflect.Struct:
|
||||
return d.decodeStruct(size, offset, result, depth)
|
||||
case reflect.Map:
|
||||
return d.decodeMap(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
rv := reflect.ValueOf(make(map[string]interface{}, size))
|
||||
newOffset, err := d.decodeMap(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
return 0, newUnmarshalTypeError("map", result.Type())
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalPointer(
|
||||
size, offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
pointer, newOffset, err := d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
_, err = d.decode(pointer, result, depth)
|
||||
return newOffset, err
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
switch result.Kind() {
|
||||
case reflect.Slice:
|
||||
return d.decodeSlice(size, offset, result, depth)
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
a := []interface{}{}
|
||||
rv := reflect.ValueOf(&a).Elem()
|
||||
newOffset, err := d.decodeSlice(size, offset, rv, depth)
|
||||
result.Set(rv)
|
||||
return newOffset, err
|
||||
}
|
||||
}
|
||||
return 0, newUnmarshalTypeError("array", result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalString(size, offset uint, result reflect.Value) (uint, error) {
|
||||
value, newOffset := d.decodeString(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.String:
|
||||
result.SetString(value)
|
||||
return newOffset, nil
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshalUint(
|
||||
size, offset uint,
|
||||
result reflect.Value,
|
||||
uintType uint,
|
||||
) (uint, error) {
|
||||
if size > uintType/8 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (uint%v size of %v)",
|
||||
uintType,
|
||||
size,
|
||||
)
|
||||
}
|
||||
|
||||
value, newOffset := d.decodeUint(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
n := int64(value)
|
||||
if !result.OverflowInt(n) {
|
||||
result.SetInt(n)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Uintptr:
|
||||
if !result.OverflowUint(value) {
|
||||
result.SetUint(value)
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
var bigIntType = reflect.TypeOf(big.Int{})
|
||||
|
||||
func (d *decoder) unmarshalUint128(size, offset uint, result reflect.Value) (uint, error) {
|
||||
if size > 16 {
|
||||
return 0, newInvalidDatabaseError(
|
||||
"the MaxMind DB file's data section contains bad data (uint128 size of %v)",
|
||||
size,
|
||||
)
|
||||
}
|
||||
value, newOffset := d.decodeUint128(size, offset)
|
||||
|
||||
switch result.Kind() {
|
||||
case reflect.Struct:
|
||||
if result.Type() == bigIntType {
|
||||
result.Set(reflect.ValueOf(*value))
|
||||
return newOffset, nil
|
||||
}
|
||||
case reflect.Interface:
|
||||
if result.NumMethod() == 0 {
|
||||
result.Set(reflect.ValueOf(value))
|
||||
return newOffset, nil
|
||||
}
|
||||
}
|
||||
return newOffset, newUnmarshalTypeError(value, result.Type())
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBool(size, offset uint) (bool, uint) {
|
||||
return size != 0, offset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeBytes(size, offset uint) ([]byte, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := make([]byte, size)
|
||||
copy(bytes, d.buffer[offset:newOffset])
|
||||
return bytes, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat64(size, offset uint) (float64, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint64(d.buffer[offset:newOffset])
|
||||
return math.Float64frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeFloat32(size, offset uint) (float32, uint) {
|
||||
newOffset := offset + size
|
||||
bits := binary.BigEndian.Uint32(d.buffer[offset:newOffset])
|
||||
return math.Float32frombits(bits), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeInt(size, offset uint) (int, uint) {
|
||||
newOffset := offset + size
|
||||
var val int32
|
||||
for _, b := range d.buffer[offset:newOffset] {
|
||||
val = (val << 8) | int32(b)
|
||||
}
|
||||
return int(val), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMap(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
if result.IsNil() {
|
||||
result.Set(reflect.MakeMapWithSize(result.Type(), int(size)))
|
||||
}
|
||||
|
||||
mapType := result.Type()
|
||||
keyValue := reflect.New(mapType.Key()).Elem()
|
||||
elemType := mapType.Elem()
|
||||
elemKind := elemType.Kind()
|
||||
var elemValue reflect.Value
|
||||
for i := uint(0); i < size; i++ {
|
||||
var key []byte
|
||||
var err error
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !elemValue.IsValid() || elemKind == reflect.Interface {
|
||||
elemValue = reflect.New(elemType).Elem()
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, elemValue, depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
keyValue.SetString(string(key))
|
||||
result.SetMapIndex(keyValue, elemValue)
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeMapToDeserializer(
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
err := dser.StartMap(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
// TODO - implement key/value skipping?
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
err = dser.End()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodePointer(
|
||||
size uint,
|
||||
offset uint,
|
||||
) (uint, uint, error) {
|
||||
pointerSize := ((size >> 3) & 0x3) + 1
|
||||
newOffset := offset + pointerSize
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return 0, 0, newOffsetError()
|
||||
}
|
||||
pointerBytes := d.buffer[offset:newOffset]
|
||||
var prefix uint
|
||||
if pointerSize == 4 {
|
||||
prefix = 0
|
||||
} else {
|
||||
prefix = size & 0x7
|
||||
}
|
||||
unpacked := uintFromBytes(prefix, pointerBytes)
|
||||
|
||||
var pointerValueOffset uint
|
||||
switch pointerSize {
|
||||
case 1:
|
||||
pointerValueOffset = 0
|
||||
case 2:
|
||||
pointerValueOffset = 2048
|
||||
case 3:
|
||||
pointerValueOffset = 526336
|
||||
case 4:
|
||||
pointerValueOffset = 0
|
||||
}
|
||||
|
||||
pointer := unpacked + pointerValueOffset
|
||||
|
||||
return pointer, newOffset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSlice(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
result.Set(reflect.MakeSlice(result.Type(), int(size), int(size)))
|
||||
for i := 0; i < int(size); i++ {
|
||||
var err error
|
||||
offset, err = d.decode(offset, result.Index(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeSliceToDeserializer(
|
||||
size uint,
|
||||
offset uint,
|
||||
dser deserializer,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
err := dser.StartSlice(size)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i := uint(0); i < size; i++ {
|
||||
offset, err = d.decodeToDeserializer(offset, dser, depth, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
err = dser.End()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
func (d *decoder) decodeString(size, offset uint) (string, uint) {
|
||||
newOffset := offset + size
|
||||
return string(d.buffer[offset:newOffset]), newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeStruct(
|
||||
size uint,
|
||||
offset uint,
|
||||
result reflect.Value,
|
||||
depth int,
|
||||
) (uint, error) {
|
||||
fields := cachedFields(result)
|
||||
|
||||
// This fills in embedded structs
|
||||
for _, i := range fields.anonymousFields {
|
||||
_, err := d.unmarshalMap(size, offset, result.Field(i), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// This handles named fields
|
||||
for i := uint(0); i < size; i++ {
|
||||
var (
|
||||
err error
|
||||
key []byte
|
||||
)
|
||||
key, offset, err = d.decodeKey(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// The string() does not create a copy due to this compiler
|
||||
// optimization: https://github.com/golang/go/issues/3512
|
||||
j, ok := fields.namedFields[string(key)]
|
||||
if !ok {
|
||||
offset, err = d.nextValueOffset(offset, 1)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
offset, err = d.decode(offset, result.Field(j), depth)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
type fieldsType struct {
|
||||
namedFields map[string]int
|
||||
anonymousFields []int
|
||||
}
|
||||
|
||||
var fieldsMap sync.Map
|
||||
|
||||
func cachedFields(result reflect.Value) *fieldsType {
|
||||
resultType := result.Type()
|
||||
|
||||
if fields, ok := fieldsMap.Load(resultType); ok {
|
||||
return fields.(*fieldsType)
|
||||
}
|
||||
numFields := resultType.NumField()
|
||||
namedFields := make(map[string]int, numFields)
|
||||
var anonymous []int
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := resultType.Field(i)
|
||||
|
||||
fieldName := field.Name
|
||||
if tag := field.Tag.Get("maxminddb"); tag != "" {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName = tag
|
||||
}
|
||||
if field.Anonymous {
|
||||
anonymous = append(anonymous, i)
|
||||
continue
|
||||
}
|
||||
namedFields[fieldName] = i
|
||||
}
|
||||
fields := &fieldsType{namedFields, anonymous}
|
||||
fieldsMap.Store(resultType, fields)
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint(size, offset uint) (uint64, uint) {
|
||||
newOffset := offset + size
|
||||
bytes := d.buffer[offset:newOffset]
|
||||
|
||||
var val uint64
|
||||
for _, b := range bytes {
|
||||
val = (val << 8) | uint64(b)
|
||||
}
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func (d *decoder) decodeUint128(size, offset uint) (*big.Int, uint) {
|
||||
newOffset := offset + size
|
||||
val := new(big.Int)
|
||||
val.SetBytes(d.buffer[offset:newOffset])
|
||||
|
||||
return val, newOffset
|
||||
}
|
||||
|
||||
func uintFromBytes(prefix uint, uintBytes []byte) uint {
|
||||
val := prefix
|
||||
for _, b := range uintBytes {
|
||||
val = (val << 8) | uint(b)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// decodeKey decodes a map key into []byte slice. We use a []byte so that we
|
||||
// can take advantage of https://github.com/golang/go/issues/3512 to avoid
|
||||
// copying the bytes when decoding a struct. Previously, we achieved this by
|
||||
// using unsafe.
|
||||
func (d *decoder) decodeKey(offset uint) ([]byte, uint, error) {
|
||||
typeNum, size, dataOffset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if typeNum == _Pointer {
|
||||
pointer, ptrOffset, err := d.decodePointer(size, dataOffset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
key, _, err := d.decodeKey(pointer)
|
||||
return key, ptrOffset, err
|
||||
}
|
||||
if typeNum != _String {
|
||||
return nil, 0, newInvalidDatabaseError("unexpected type when decoding string: %v", typeNum)
|
||||
}
|
||||
newOffset := dataOffset + size
|
||||
if newOffset > uint(len(d.buffer)) {
|
||||
return nil, 0, newOffsetError()
|
||||
}
|
||||
return d.buffer[dataOffset:newOffset], newOffset, nil
|
||||
}
|
||||
|
||||
// This function is used to skip ahead to the next value without decoding
|
||||
// the one at the offset passed in. The size bits have different meanings for
|
||||
// different data types.
|
||||
func (d *decoder) nextValueOffset(offset, numberToSkip uint) (uint, error) {
|
||||
if numberToSkip == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
typeNum, size, offset, err := d.decodeCtrlData(offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
switch typeNum {
|
||||
case _Pointer:
|
||||
_, offset, err = d.decodePointer(size, offset)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
case _Map:
|
||||
numberToSkip += 2 * size
|
||||
case _Slice:
|
||||
numberToSkip += size
|
||||
case _Bool:
|
||||
default:
|
||||
offset += size
|
||||
}
|
||||
return d.nextValueOffset(offset, numberToSkip-1)
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
package maxminddb
|
||||
|
||||
import "math/big"
|
||||
|
||||
// deserializer is an interface for a type that deserializes an MaxMind DB
|
||||
// data record to some other type. This exists as an alternative to the
|
||||
// standard reflection API.
|
||||
//
|
||||
// This is fundamentally different than the Unmarshaler interface that
|
||||
// several packages provide. A Deserializer will generally create the
|
||||
// final struct or value rather than unmarshaling to itself.
|
||||
//
|
||||
// This interface and the associated unmarshaling code is EXPERIMENTAL!
|
||||
// It is not currently covered by any Semantic Versioning guarantees.
|
||||
// Use at your own risk.
|
||||
type deserializer interface {
|
||||
ShouldSkip(offset uintptr) (bool, error)
|
||||
StartSlice(size uint) error
|
||||
StartMap(size uint) error
|
||||
End() error
|
||||
String(string) error
|
||||
Float64(float64) error
|
||||
Bytes([]byte) error
|
||||
Uint16(uint16) error
|
||||
Uint32(uint32) error
|
||||
Int32(int32) error
|
||||
Uint64(uint64) error
|
||||
Uint128(*big.Int) error
|
||||
Bool(bool) error
|
||||
Float32(float32) error
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// InvalidDatabaseError is returned when the database contains invalid data
|
||||
// and cannot be parsed.
|
||||
type InvalidDatabaseError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func newOffsetError() InvalidDatabaseError {
|
||||
return InvalidDatabaseError{"unexpected end of database"}
|
||||
}
|
||||
|
||||
func newInvalidDatabaseError(format string, args ...interface{}) InvalidDatabaseError {
|
||||
return InvalidDatabaseError{fmt.Sprintf(format, args...)}
|
||||
}
|
||||
|
||||
func (e InvalidDatabaseError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// UnmarshalTypeError is returned when the value in the database cannot be
|
||||
// assigned to the specified data type.
|
||||
type UnmarshalTypeError struct {
|
||||
Value string // stringified copy of the database value that caused the error
|
||||
Type reflect.Type // type of the value that could not be assign to
|
||||
}
|
||||
|
||||
func newUnmarshalTypeError(value interface{}, rType reflect.Type) UnmarshalTypeError {
|
||||
return UnmarshalTypeError{
|
||||
Value: fmt.Sprintf("%v", value),
|
||||
Type: rType,
|
||||
}
|
||||
}
|
||||
|
||||
func (e UnmarshalTypeError) Error() string {
|
||||
return fmt.Sprintf("maxminddb: cannot unmarshal %s into type %s", e.Value, e.Type.String())
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
//go:build !windows && !appengine && !plan9
|
||||
// +build !windows,!appengine,!plan9
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mmap(fd, length int) (data []byte, err error) {
|
||||
return unix.Mmap(fd, 0, length, unix.PROT_READ, unix.MAP_SHARED)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
return unix.Munmap(b)
|
||||
}
|
@ -0,0 +1,85 @@
|
||||
// +build windows,!appengine
|
||||
|
||||
package maxminddb
|
||||
|
||||
// Windows support largely borrowed from mmap-go.
|
||||
//
|
||||
// Copyright 2011 Evan Shaw. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
type memoryMap []byte
|
||||
|
||||
// Windows
|
||||
var handleLock sync.Mutex
|
||||
var handleMap = map[uintptr]windows.Handle{}
|
||||
|
||||
func mmap(fd int, length int) (data []byte, err error) {
|
||||
h, errno := windows.CreateFileMapping(windows.Handle(fd), nil,
|
||||
uint32(windows.PAGE_READONLY), 0, uint32(length), nil)
|
||||
if h == 0 {
|
||||
return nil, os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
addr, errno := windows.MapViewOfFile(h, uint32(windows.FILE_MAP_READ), 0,
|
||||
0, uintptr(length))
|
||||
if addr == 0 {
|
||||
return nil, os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
handleLock.Lock()
|
||||
handleMap[addr] = h
|
||||
handleLock.Unlock()
|
||||
|
||||
m := memoryMap{}
|
||||
dh := m.header()
|
||||
dh.Data = addr
|
||||
dh.Len = length
|
||||
dh.Cap = dh.Len
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (m *memoryMap) header() *reflect.SliceHeader {
|
||||
return (*reflect.SliceHeader)(unsafe.Pointer(m))
|
||||
}
|
||||
|
||||
func flush(addr, len uintptr) error {
|
||||
errno := windows.FlushViewOfFile(addr, len)
|
||||
return os.NewSyscallError("FlushViewOfFile", errno)
|
||||
}
|
||||
|
||||
func munmap(b []byte) (err error) {
|
||||
m := memoryMap(b)
|
||||
dh := m.header()
|
||||
|
||||
addr := dh.Data
|
||||
length := uintptr(dh.Len)
|
||||
|
||||
flush(addr, length)
|
||||
err = windows.UnmapViewOfFile(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handleLock.Lock()
|
||||
defer handleLock.Unlock()
|
||||
handle, ok := handleMap[addr]
|
||||
if !ok {
|
||||
// should be impossible; we would've errored above
|
||||
return errors.New("unknown base address")
|
||||
}
|
||||
delete(handleMap, addr)
|
||||
|
||||
e := windows.CloseHandle(windows.Handle(handle))
|
||||
return os.NewSyscallError("CloseHandle", e)
|
||||
}
|
@ -0,0 +1,58 @@
|
||||
package maxminddb
|
||||
|
||||
type nodeReader interface {
|
||||
readLeft(uint) uint
|
||||
readRight(uint) uint
|
||||
}
|
||||
|
||||
type nodeReader24 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader24) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 8) |
|
||||
uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader24) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+3]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+4]) << 8) |
|
||||
uint(n.buffer[nodeNumber+5])
|
||||
}
|
||||
|
||||
type nodeReader28 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader28) readLeft(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0xF0) << 20) |
|
||||
(uint(n.buffer[nodeNumber]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 8) |
|
||||
uint(n.buffer[nodeNumber+2])
|
||||
}
|
||||
|
||||
func (n nodeReader28) readRight(nodeNumber uint) uint {
|
||||
return ((uint(n.buffer[nodeNumber+3]) & 0x0F) << 24) |
|
||||
(uint(n.buffer[nodeNumber+4]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+5]) << 8) |
|
||||
uint(n.buffer[nodeNumber+6])
|
||||
}
|
||||
|
||||
type nodeReader32 struct {
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (n nodeReader32) readLeft(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber]) << 24) |
|
||||
(uint(n.buffer[nodeNumber+1]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+2]) << 8) |
|
||||
uint(n.buffer[nodeNumber+3])
|
||||
}
|
||||
|
||||
func (n nodeReader32) readRight(nodeNumber uint) uint {
|
||||
return (uint(n.buffer[nodeNumber+4]) << 24) |
|
||||
(uint(n.buffer[nodeNumber+5]) << 16) |
|
||||
(uint(n.buffer[nodeNumber+6]) << 8) |
|
||||
uint(n.buffer[nodeNumber+7])
|
||||
}
|
@ -0,0 +1,310 @@
|
||||
// Package maxminddb provides a reader for the MaxMind DB file format.
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
const (
|
||||
// NotFound is returned by LookupOffset when a matched root record offset
|
||||
// cannot be found.
|
||||
NotFound = ^uintptr(0)
|
||||
|
||||
dataSectionSeparatorSize = 16
|
||||
)
|
||||
|
||||
var metadataStartMarker = []byte("\xAB\xCD\xEFMaxMind.com")
|
||||
|
||||
// Reader holds the data corresponding to the MaxMind DB file. Its only public
|
||||
// field is Metadata, which contains the metadata from the MaxMind DB file.
|
||||
//
|
||||
// All of the methods on Reader are thread-safe. The struct may be safely
|
||||
// shared across goroutines.
|
||||
type Reader struct {
|
||||
hasMappedFile bool
|
||||
buffer []byte
|
||||
nodeReader nodeReader
|
||||
decoder decoder
|
||||
Metadata Metadata
|
||||
ipv4Start uint
|
||||
ipv4StartBitDepth int
|
||||
nodeOffsetMult uint
|
||||
}
|
||||
|
||||
// Metadata holds the metadata decoded from the MaxMind DB file. In particular
|
||||
// it has the format version, the build time as Unix epoch time, the database
|
||||
// type and description, the IP version supported, and a slice of the natural
|
||||
// languages included.
|
||||
type Metadata struct {
|
||||
BinaryFormatMajorVersion uint `maxminddb:"binary_format_major_version"`
|
||||
BinaryFormatMinorVersion uint `maxminddb:"binary_format_minor_version"`
|
||||
BuildEpoch uint `maxminddb:"build_epoch"`
|
||||
DatabaseType string `maxminddb:"database_type"`
|
||||
Description map[string]string `maxminddb:"description"`
|
||||
IPVersion uint `maxminddb:"ip_version"`
|
||||
Languages []string `maxminddb:"languages"`
|
||||
NodeCount uint `maxminddb:"node_count"`
|
||||
RecordSize uint `maxminddb:"record_size"`
|
||||
}
|
||||
|
||||
// FromBytes takes a byte slice corresponding to a MaxMind DB file and returns
|
||||
// a Reader structure or an error.
|
||||
func FromBytes(buffer []byte) (*Reader, error) {
|
||||
metadataStart := bytes.LastIndex(buffer, metadataStartMarker)
|
||||
|
||||
if metadataStart == -1 {
|
||||
return nil, newInvalidDatabaseError("error opening database: invalid MaxMind DB file")
|
||||
}
|
||||
|
||||
metadataStart += len(metadataStartMarker)
|
||||
metadataDecoder := decoder{buffer[metadataStart:]}
|
||||
|
||||
var metadata Metadata
|
||||
|
||||
rvMetdata := reflect.ValueOf(&metadata)
|
||||
_, err := metadataDecoder.decode(0, rvMetdata, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
searchTreeSize := metadata.NodeCount * metadata.RecordSize / 4
|
||||
dataSectionStart := searchTreeSize + dataSectionSeparatorSize
|
||||
dataSectionEnd := uint(metadataStart - len(metadataStartMarker))
|
||||
if dataSectionStart > dataSectionEnd {
|
||||
return nil, newInvalidDatabaseError("the MaxMind DB contains invalid metadata")
|
||||
}
|
||||
d := decoder{
|
||||
buffer[searchTreeSize+dataSectionSeparatorSize : metadataStart-len(metadataStartMarker)],
|
||||
}
|
||||
|
||||
nodeBuffer := buffer[:searchTreeSize]
|
||||
var nodeReader nodeReader
|
||||
switch metadata.RecordSize {
|
||||
case 24:
|
||||
nodeReader = nodeReader24{buffer: nodeBuffer}
|
||||
case 28:
|
||||
nodeReader = nodeReader28{buffer: nodeBuffer}
|
||||
case 32:
|
||||
nodeReader = nodeReader32{buffer: nodeBuffer}
|
||||
default:
|
||||
return nil, newInvalidDatabaseError("unknown record size: %d", metadata.RecordSize)
|
||||
}
|
||||
|
||||
reader := &Reader{
|
||||
buffer: buffer,
|
||||
nodeReader: nodeReader,
|
||||
decoder: d,
|
||||
Metadata: metadata,
|
||||
ipv4Start: 0,
|
||||
nodeOffsetMult: metadata.RecordSize / 4,
|
||||
}
|
||||
|
||||
reader.setIPv4Start()
|
||||
|
||||
return reader, err
|
||||
}
|
||||
|
||||
func (r *Reader) setIPv4Start() {
|
||||
if r.Metadata.IPVersion != 6 {
|
||||
return
|
||||
}
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
node := uint(0)
|
||||
i := 0
|
||||
for ; i < 96 && node < nodeCount; i++ {
|
||||
node = r.nodeReader.readLeft(node * r.nodeOffsetMult)
|
||||
}
|
||||
r.ipv4Start = node
|
||||
r.ipv4StartBitDepth = i
|
||||
}
|
||||
|
||||
// Lookup retrieves the database record for ip and stores it in the value
|
||||
// pointed to by result. If result is nil or not a pointer, an error is
|
||||
// returned. If the data in the database record cannot be stored in result
|
||||
// because of type differences, an UnmarshalTypeError is returned. If the
|
||||
// database is invalid or otherwise cannot be read, an InvalidDatabaseError
|
||||
// is returned.
|
||||
func (r *Reader) Lookup(ip net.IP, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return err
|
||||
}
|
||||
return r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupNetwork retrieves the database record for ip and stores it in the
|
||||
// value pointed to by result. The network returned is the network associated
|
||||
// with the data record in the database. The ok return value indicates whether
|
||||
// the database contained a record for the ip.
|
||||
//
|
||||
// If result is nil or not a pointer, an error is returned. If the data in the
|
||||
// database record cannot be stored in result because of type differences, an
|
||||
// UnmarshalTypeError is returned. If the database is invalid or otherwise
|
||||
// cannot be read, an InvalidDatabaseError is returned.
|
||||
func (r *Reader) LookupNetwork(
|
||||
ip net.IP,
|
||||
result interface{},
|
||||
) (network *net.IPNet, ok bool, err error) {
|
||||
if r.buffer == nil {
|
||||
return nil, false, errors.New("cannot call Lookup on a closed database")
|
||||
}
|
||||
pointer, prefixLength, ip, err := r.lookupPointer(ip)
|
||||
|
||||
network = r.cidr(ip, prefixLength)
|
||||
if pointer == 0 || err != nil {
|
||||
return network, false, err
|
||||
}
|
||||
|
||||
return network, true, r.retrieveData(pointer, result)
|
||||
}
|
||||
|
||||
// LookupOffset maps an argument net.IP to a corresponding record offset in the
|
||||
// database. NotFound is returned if no such record is found, and a record may
|
||||
// otherwise be extracted by passing the returned offset to Decode. LookupOffset
|
||||
// is an advanced API, which exists to provide clients with a means to cache
|
||||
// previously-decoded records.
|
||||
func (r *Reader) LookupOffset(ip net.IP) (uintptr, error) {
|
||||
if r.buffer == nil {
|
||||
return 0, errors.New("cannot call LookupOffset on a closed database")
|
||||
}
|
||||
pointer, _, _, err := r.lookupPointer(ip)
|
||||
if pointer == 0 || err != nil {
|
||||
return NotFound, err
|
||||
}
|
||||
return r.resolveDataPointer(pointer)
|
||||
}
|
||||
|
||||
func (r *Reader) cidr(ip net.IP, prefixLength int) *net.IPNet {
|
||||
// This is necessary as the node that the IPv4 start is at may
|
||||
// be at a bit depth that is less that 96, i.e., ipv4Start points
|
||||
// to a leaf node. For instance, if a record was inserted at ::/8,
|
||||
// the ipv4Start would point directly at the leaf node for the
|
||||
// record and would have a bit depth of 8. This would not happen
|
||||
// with databases currently distributed by MaxMind as all of them
|
||||
// have an IPv4 subtree that is greater than a single node.
|
||||
if r.Metadata.IPVersion == 6 &&
|
||||
len(ip) == net.IPv4len &&
|
||||
r.ipv4StartBitDepth != 96 {
|
||||
return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(r.ipv4StartBitDepth, 128)}
|
||||
}
|
||||
|
||||
mask := net.CIDRMask(prefixLength, len(ip)*8)
|
||||
return &net.IPNet{IP: ip.Mask(mask), Mask: mask}
|
||||
}
|
||||
|
||||
// Decode the record at |offset| into |result|. The result value pointed to
|
||||
// must be a data value that corresponds to a record in the database. This may
|
||||
// include a struct representation of the data, a map capable of holding the
|
||||
// data or an empty interface{} value.
|
||||
//
|
||||
// If result is a pointer to a struct, the struct need not include a field
|
||||
// for every value that may be in the database. If a field is not present in
|
||||
// the structure, the decoder will not decode that field, reducing the time
|
||||
// required to decode the record.
|
||||
//
|
||||
// As a special case, a struct field of type uintptr will be used to capture
|
||||
// the offset of the value. Decode may later be used to extract the stored
|
||||
// value from the offset. MaxMind DBs are highly normalized: for example in
|
||||
// the City database, all records of the same country will reference a
|
||||
// single representative record for that country. This uintptr behavior allows
|
||||
// clients to leverage this normalization in their own sub-record caching.
|
||||
func (r *Reader) Decode(offset uintptr, result interface{}) error {
|
||||
if r.buffer == nil {
|
||||
return errors.New("cannot call Decode on a closed database")
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) decode(offset uintptr, result interface{}) error {
|
||||
rv := reflect.ValueOf(result)
|
||||
if rv.Kind() != reflect.Ptr || rv.IsNil() {
|
||||
return errors.New("result param must be a pointer")
|
||||
}
|
||||
|
||||
if dser, ok := result.(deserializer); ok {
|
||||
_, err := r.decoder.decodeToDeserializer(uint(offset), dser, 0, false)
|
||||
return err
|
||||
}
|
||||
|
||||
_, err := r.decoder.decode(uint(offset), rv, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *Reader) lookupPointer(ip net.IP) (uint, int, net.IP, error) {
|
||||
if ip == nil {
|
||||
return 0, 0, nil, errors.New("IP passed to Lookup cannot be nil")
|
||||
}
|
||||
|
||||
ipV4Address := ip.To4()
|
||||
if ipV4Address != nil {
|
||||
ip = ipV4Address
|
||||
}
|
||||
if len(ip) == 16 && r.Metadata.IPVersion == 4 {
|
||||
return 0, 0, ip, fmt.Errorf(
|
||||
"error looking up '%s': you attempted to look up an IPv6 address in an IPv4-only database",
|
||||
ip.String(),
|
||||
)
|
||||
}
|
||||
|
||||
bitCount := uint(len(ip) * 8)
|
||||
|
||||
var node uint
|
||||
if bitCount == 32 {
|
||||
node = r.ipv4Start
|
||||
}
|
||||
node, prefixLength := r.traverseTree(ip, node, bitCount)
|
||||
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
if node == nodeCount {
|
||||
// Record is empty
|
||||
return 0, prefixLength, ip, nil
|
||||
} else if node > nodeCount {
|
||||
return node, prefixLength, ip, nil
|
||||
}
|
||||
|
||||
return 0, prefixLength, ip, newInvalidDatabaseError("invalid node in search tree")
|
||||
}
|
||||
|
||||
func (r *Reader) traverseTree(ip net.IP, node, bitCount uint) (uint, int) {
|
||||
nodeCount := r.Metadata.NodeCount
|
||||
|
||||
i := uint(0)
|
||||
for ; i < bitCount && node < nodeCount; i++ {
|
||||
bit := uint(1) & (uint(ip[i>>3]) >> (7 - (i % 8)))
|
||||
|
||||
offset := node * r.nodeOffsetMult
|
||||
if bit == 0 {
|
||||
node = r.nodeReader.readLeft(offset)
|
||||
} else {
|
||||
node = r.nodeReader.readRight(offset)
|
||||
}
|
||||
}
|
||||
|
||||
return node, int(i)
|
||||
}
|
||||
|
||||
func (r *Reader) retrieveData(pointer uint, result interface{}) error {
|
||||
offset, err := r.resolveDataPointer(pointer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.decode(offset, result)
|
||||
}
|
||||
|
||||
func (r *Reader) resolveDataPointer(pointer uint) (uintptr, error) {
|
||||
resolved := uintptr(pointer - r.Metadata.NodeCount - dataSectionSeparatorSize)
|
||||
|
||||
if resolved >= uintptr(len(r.buffer)) {
|
||||
return 0, newInvalidDatabaseError("the MaxMind DB file's search tree is corrupt")
|
||||
}
|
||||
return resolved, nil
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
// +build appengine plan9
|
||||
|
||||
package maxminddb
|
||||
|
||||
import "io/ioutil"
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
bytes, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return FromBytes(bytes)
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method sets the underlying buffer
|
||||
// to nil, returning the resources to the system.
|
||||
func (r *Reader) Close() error {
|
||||
r.buffer = nil
|
||||
return nil
|
||||
}
|
@ -0,0 +1,66 @@
|
||||
//go:build !appengine && !plan9
|
||||
// +build !appengine,!plan9
|
||||
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Open takes a string path to a MaxMind DB file and returns a Reader
|
||||
// structure or an error. The database file is opened using a memory map,
|
||||
// except on Google App Engine where mmap is not supported; there the database
|
||||
// is loaded into memory. Use the Close method on the Reader object to return
|
||||
// the resources to the system.
|
||||
func Open(file string) (*Reader, error) {
|
||||
mapFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats, err := mapFile.Stat()
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileSize := int(stats.Size())
|
||||
mmap, err := mmap(int(mapFile.Fd()), fileSize)
|
||||
if err != nil {
|
||||
_ = mapFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mapFile.Close(); err != nil {
|
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := FromBytes(mmap)
|
||||
if err != nil {
|
||||
//nolint:errcheck // we prefer to return the original error
|
||||
munmap(mmap)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader.hasMappedFile = true
|
||||
runtime.SetFinalizer(reader, (*Reader).Close)
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
// Close unmaps the database file from virtual memory and returns the
|
||||
// resources to the system. If called on a Reader opened using FromBytes
|
||||
// or Open on Google App Engine, this method does nothing.
|
||||
func (r *Reader) Close() error {
|
||||
var err error
|
||||
if r.hasMappedFile {
|
||||
runtime.SetFinalizer(r, nil)
|
||||
r.hasMappedFile = false
|
||||
err = munmap(r.buffer)
|
||||
}
|
||||
r.buffer = nil
|
||||
return err
|
||||
}
|
@ -0,0 +1,205 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// Internal structure used to keep track of nodes we still need to visit.
|
||||
type netNode struct {
|
||||
ip net.IP
|
||||
bit uint
|
||||
pointer uint
|
||||
}
|
||||
|
||||
// Networks represents a set of subnets that we are iterating over.
|
||||
type Networks struct {
|
||||
reader *Reader
|
||||
nodes []netNode // Nodes we still have to visit.
|
||||
lastNode netNode
|
||||
err error
|
||||
|
||||
skipAliasedNetworks bool
|
||||
}
|
||||
|
||||
var (
|
||||
allIPv4 = &net.IPNet{IP: make(net.IP, 4), Mask: net.CIDRMask(0, 32)}
|
||||
allIPv6 = &net.IPNet{IP: make(net.IP, 16), Mask: net.CIDRMask(0, 128)}
|
||||
)
|
||||
|
||||
// NetworksOption are options for Networks and NetworksWithin.
|
||||
type NetworksOption func(*Networks)
|
||||
|
||||
// SkipAliasedNetworks is an option for Networks and NetworksWithin that
|
||||
// makes them not iterate over aliases of the IPv4 subtree in an IPv6
|
||||
// database, e.g., ::ffff:0:0/96, 2001::/32, and 2002::/16.
|
||||
//
|
||||
// You most likely want to set this. The only reason it isn't the default
|
||||
// behavior is to provide backwards compatibility to existing users.
|
||||
func SkipAliasedNetworks(networks *Networks) {
|
||||
networks.skipAliasedNetworks = true
|
||||
}
|
||||
|
||||
// Networks returns an iterator that can be used to traverse all networks in
|
||||
// the database.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
func (r *Reader) Networks(options ...NetworksOption) *Networks {
|
||||
var networks *Networks
|
||||
if r.Metadata.IPVersion == 6 {
|
||||
networks = r.NetworksWithin(allIPv6, options...)
|
||||
} else {
|
||||
networks = r.NetworksWithin(allIPv4, options...)
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// NetworksWithin returns an iterator that can be used to traverse all networks
|
||||
// in the database which are contained in a given network.
|
||||
//
|
||||
// Please note that a MaxMind DB may map IPv4 networks into several locations
|
||||
// in an IPv6 database. This iterator will iterate over all of these locations
|
||||
// separately. To only iterate over the IPv4 networks once, use the
|
||||
// SkipAliasedNetworks option.
|
||||
//
|
||||
// If the provided network is contained within a network in the database, the
|
||||
// iterator will iterate over exactly one network, the containing network.
|
||||
func (r *Reader) NetworksWithin(network *net.IPNet, options ...NetworksOption) *Networks {
|
||||
if r.Metadata.IPVersion == 4 && network.IP.To4() == nil {
|
||||
return &Networks{
|
||||
err: fmt.Errorf(
|
||||
"error getting networks with '%s': you attempted to use an IPv6 network in an IPv4-only database",
|
||||
network.String(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
networks := &Networks{reader: r}
|
||||
for _, option := range options {
|
||||
option(networks)
|
||||
}
|
||||
|
||||
ip := network.IP
|
||||
prefixLength, _ := network.Mask.Size()
|
||||
|
||||
if r.Metadata.IPVersion == 6 && len(ip) == net.IPv4len {
|
||||
if networks.skipAliasedNetworks {
|
||||
ip = net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ip[0], ip[1], ip[2], ip[3]}
|
||||
} else {
|
||||
ip = ip.To16()
|
||||
}
|
||||
prefixLength += 96
|
||||
}
|
||||
|
||||
pointer, bit := r.traverseTree(ip, 0, uint(prefixLength))
|
||||
networks.nodes = []netNode{
|
||||
{
|
||||
ip: ip,
|
||||
bit: uint(bit),
|
||||
pointer: pointer,
|
||||
},
|
||||
}
|
||||
|
||||
return networks
|
||||
}
|
||||
|
||||
// Next prepares the next network for reading with the Network method. It
|
||||
// returns true if there is another network to be processed and false if there
|
||||
// are no more networks or if there is an error.
|
||||
func (n *Networks) Next() bool {
|
||||
if n.err != nil {
|
||||
return false
|
||||
}
|
||||
for len(n.nodes) > 0 {
|
||||
node := n.nodes[len(n.nodes)-1]
|
||||
n.nodes = n.nodes[:len(n.nodes)-1]
|
||||
|
||||
for node.pointer != n.reader.Metadata.NodeCount {
|
||||
// This skips IPv4 aliases without hardcoding the networks that the writer
|
||||
// currently aliases.
|
||||
if n.skipAliasedNetworks && n.reader.ipv4Start != 0 &&
|
||||
node.pointer == n.reader.ipv4Start && !isInIPv4Subtree(node.ip) {
|
||||
break
|
||||
}
|
||||
|
||||
if node.pointer > n.reader.Metadata.NodeCount {
|
||||
n.lastNode = node
|
||||
return true
|
||||
}
|
||||
ipRight := make(net.IP, len(node.ip))
|
||||
copy(ipRight, node.ip)
|
||||
if len(ipRight) <= int(node.bit>>3) {
|
||||
n.err = newInvalidDatabaseError(
|
||||
"invalid search tree at %v/%v", ipRight, node.bit)
|
||||
return false
|
||||
}
|
||||
ipRight[node.bit>>3] |= 1 << (7 - (node.bit % 8))
|
||||
|
||||
offset := node.pointer * n.reader.nodeOffsetMult
|
||||
rightPointer := n.reader.nodeReader.readRight(offset)
|
||||
|
||||
node.bit++
|
||||
n.nodes = append(n.nodes, netNode{
|
||||
pointer: rightPointer,
|
||||
ip: ipRight,
|
||||
bit: node.bit,
|
||||
})
|
||||
|
||||
node.pointer = n.reader.nodeReader.readLeft(offset)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Network returns the current network or an error if there is a problem
|
||||
// decoding the data for the network. It takes a pointer to a result value to
|
||||
// decode the network's data into.
|
||||
func (n *Networks) Network(result interface{}) (*net.IPNet, error) {
|
||||
if n.err != nil {
|
||||
return nil, n.err
|
||||
}
|
||||
if err := n.reader.retrieveData(n.lastNode.pointer, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ip := n.lastNode.ip
|
||||
prefixLength := int(n.lastNode.bit)
|
||||
|
||||
// We do this because uses of SkipAliasedNetworks expect the IPv4 networks
|
||||
// to be returned as IPv4 networks. If we are not skipping aliased
|
||||
// networks, then the user will get IPv4 networks from the ::FFFF:0:0/96
|
||||
// network as Go automatically converts those.
|
||||
if n.skipAliasedNetworks && isInIPv4Subtree(ip) {
|
||||
ip = ip[12:]
|
||||
prefixLength -= 96
|
||||
}
|
||||
|
||||
return &net.IPNet{
|
||||
IP: ip,
|
||||
Mask: net.CIDRMask(prefixLength, len(ip)*8),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Err returns an error, if any, that was encountered during iteration.
|
||||
func (n *Networks) Err() error {
|
||||
return n.err
|
||||
}
|
||||
|
||||
// isInIPv4Subtree returns true if the IP is an IPv6 address in the database's
|
||||
// IPv4 subtree.
|
||||
func isInIPv4Subtree(ip net.IP) bool {
|
||||
if len(ip) != 16 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < 12; i++ {
|
||||
if ip[i] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
@ -0,0 +1,201 @@
|
||||
package maxminddb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type verifier struct {
|
||||
reader *Reader
|
||||
}
|
||||
|
||||
// Verify checks that the database is valid. It validates the search tree,
|
||||
// the data section, and the metadata section. This verifier is stricter than
|
||||
// the specification and may return errors on databases that are readable.
|
||||
func (r *Reader) Verify() error {
|
||||
v := verifier{r}
|
||||
if err := v.verifyMetadata(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := v.verifyDatabase()
|
||||
runtime.KeepAlive(v.reader)
|
||||
return err
|
||||
}
|
||||
|
||||
func (v *verifier) verifyMetadata() error {
|
||||
metadata := v.reader.Metadata
|
||||
|
||||
if metadata.BinaryFormatMajorVersion != 2 {
|
||||
return testError(
|
||||
"binary_format_major_version",
|
||||
2,
|
||||
metadata.BinaryFormatMajorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.BinaryFormatMinorVersion != 0 {
|
||||
return testError(
|
||||
"binary_format_minor_version",
|
||||
0,
|
||||
metadata.BinaryFormatMinorVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.DatabaseType == "" {
|
||||
return testError(
|
||||
"database_type",
|
||||
"non-empty string",
|
||||
metadata.DatabaseType,
|
||||
)
|
||||
}
|
||||
|
||||
if len(metadata.Description) == 0 {
|
||||
return testError(
|
||||
"description",
|
||||
"non-empty slice",
|
||||
metadata.Description,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.IPVersion != 4 && metadata.IPVersion != 6 {
|
||||
return testError(
|
||||
"ip_version",
|
||||
"4 or 6",
|
||||
metadata.IPVersion,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.RecordSize != 24 &&
|
||||
metadata.RecordSize != 28 &&
|
||||
metadata.RecordSize != 32 {
|
||||
return testError(
|
||||
"record_size",
|
||||
"24, 28, or 32",
|
||||
metadata.RecordSize,
|
||||
)
|
||||
}
|
||||
|
||||
if metadata.NodeCount == 0 {
|
||||
return testError(
|
||||
"node_count",
|
||||
"positive integer",
|
||||
metadata.NodeCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDatabase() error {
|
||||
offsets, err := v.verifySearchTree()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := v.verifyDataSectionSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return v.verifyDataSection(offsets)
|
||||
}
|
||||
|
||||
func (v *verifier) verifySearchTree() (map[uint]bool, error) {
|
||||
offsets := make(map[uint]bool)
|
||||
|
||||
it := v.reader.Networks()
|
||||
for it.Next() {
|
||||
offset, err := v.reader.resolveDataPointer(it.lastNode.pointer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
offsets[uint(offset)] = true
|
||||
}
|
||||
if err := it.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSectionSeparator() error {
|
||||
separatorStart := v.reader.Metadata.NodeCount * v.reader.Metadata.RecordSize / 4
|
||||
|
||||
separator := v.reader.buffer[separatorStart : separatorStart+dataSectionSeparatorSize]
|
||||
|
||||
for _, b := range separator {
|
||||
if b != 0 {
|
||||
return newInvalidDatabaseError("unexpected byte in data separator: %v", separator)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *verifier) verifyDataSection(offsets map[uint]bool) error {
|
||||
pointerCount := len(offsets)
|
||||
|
||||
decoder := v.reader.decoder
|
||||
|
||||
var offset uint
|
||||
bufferLen := uint(len(decoder.buffer))
|
||||
for offset < bufferLen {
|
||||
var data interface{}
|
||||
rv := reflect.ValueOf(&data)
|
||||
newOffset, err := decoder.decode(offset, rv, 0)
|
||||
if err != nil {
|
||||
return newInvalidDatabaseError(
|
||||
"received decoding error (%v) at offset of %v",
|
||||
err,
|
||||
offset,
|
||||
)
|
||||
}
|
||||
if newOffset <= offset {
|
||||
return newInvalidDatabaseError(
|
||||
"data section offset unexpectedly went from %v to %v",
|
||||
offset,
|
||||
newOffset,
|
||||
)
|
||||
}
|
||||
|
||||
pointer := offset
|
||||
|
||||
if _, ok := offsets[pointer]; !ok {
|
||||
return newInvalidDatabaseError(
|
||||
"found data (%v) at %v that the search tree does not point to",
|
||||
data,
|
||||
pointer,
|
||||
)
|
||||
}
|
||||
delete(offsets, pointer)
|
||||
|
||||
offset = newOffset
|
||||
}
|
||||
|
||||
if offset != bufferLen {
|
||||
return newInvalidDatabaseError(
|
||||
"unexpected data at the end of the data section (last offset: %v, end: %v)",
|
||||
offset,
|
||||
bufferLen,
|
||||
)
|
||||
}
|
||||
|
||||
if len(offsets) != 0 {
|
||||
return newInvalidDatabaseError(
|
||||
"found %v pointers (of %v) in the search tree that we did not see in the data section",
|
||||
len(offsets),
|
||||
pointerCount,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testError(
|
||||
field string,
|
||||
expected interface{},
|
||||
actual interface{},
|
||||
) error {
|
||||
return newInvalidDatabaseError(
|
||||
"%v - Expected: %v Actual: %v",
|
||||
field,
|
||||
expected,
|
||||
actual,
|
||||
)
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
package goip
|
||||
|
||||
import (
|
||||
"go.dtapp.net/goip/geoip"
|
||||
"go.dtapp.net/goip/ip2region"
|
||||
"go.dtapp.net/goip/ip2region_v2"
|
||||
"go.dtapp.net/goip/ipv6wry"
|
||||
"go.dtapp.net/goip/qqwry"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
ip2regionV2Client *ip2region_v2.Client
|
||||
ip2regionClient *ip2region.Client
|
||||
qqwryClient *qqwry.Client
|
||||
geoIpClient *geoip.Client
|
||||
ipv6wryClient *ipv6wry.Client
|
||||
}
|
||||
|
||||
// NewIp 实例化
|
||||
func NewIp() *Client {
|
||||
|
||||
c := &Client{}
|
||||
|
||||
c.ip2regionV2Client, _ = ip2region_v2.New()
|
||||
|
||||
c.ip2regionClient = ip2region.New()
|
||||
|
||||
c.qqwryClient = qqwry.New()
|
||||
|
||||
c.geoIpClient, _ = geoip.New()
|
||||
|
||||
c.ipv6wryClient = ipv6wry.New()
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Client) Close() {
|
||||
c.geoIpClient.Close()
|
||||
}
|
@ -1,3 +1,3 @@
|
||||
package goip
|
||||
|
||||
const Version = "1.0.30"
|
||||
const Version = "1.0.33"
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 67 MiB |
Binary file not shown.
@ -0,0 +1,52 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"github.com/oschwald/geoip2-golang"
|
||||
)
|
||||
|
||||
//go:embed GeoLite2-ASN.mmdb
|
||||
var asnBuff []byte
|
||||
|
||||
//go:embed GeoLite2-City.mmdb
|
||||
var cityBuff []byte
|
||||
|
||||
//go:embed GeoLite2-Country.mmdb
|
||||
var countryBuff []byte
|
||||
|
||||
type Client struct {
|
||||
asnDb *geoip2.Reader
|
||||
cityDb *geoip2.Reader
|
||||
countryDb *geoip2.Reader
|
||||
}
|
||||
|
||||
func New() (*Client, error) {
|
||||
|
||||
var err error
|
||||
c := &Client{}
|
||||
|
||||
c.asnDb, err = geoip2.FromBytes(asnBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.cityDb, err = geoip2.FromBytes(cityBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.countryDb, err = geoip2.FromBytes(countryBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
||||
|
||||
func (c *Client) Close() {
|
||||
|
||||
c.asnDb.Close()
|
||||
c.cityDb.Close()
|
||||
c.countryDb.Close()
|
||||
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func OnlineDownload(downloadUrl string, downloadName string) {
|
||||
resp, err := http.Get(downloadUrl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
err = ioutil.WriteFile("./"+downloadName, body, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./"+downloadName)
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
"go.dtapp.net/gostring"
|
||||
)
|
||||
|
||||
var licenseKey = "" // 许可证密钥
|
||||
|
||||
func GetGeoLite2AsnDownloadUrl() string {
|
||||
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
|
||||
}
|
||||
|
||||
//func GetGeoLite2AsnCsvDownloadUrl() string {
|
||||
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
|
||||
//}
|
||||
|
||||
func GetGeoLite2CityDownloadUrl() string {
|
||||
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
|
||||
}
|
||||
|
||||
//func GetGeoLite2CityCsvDownloadUrl() string {
|
||||
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
|
||||
//}
|
||||
|
||||
func GetGeoLite2CountryDownloadUrl() string {
|
||||
return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=YOUR_LICENSE_KEY&suffix=tar.gz", "YOUR_LICENSE_KEY", licenseKey)
|
||||
}
|
||||
|
||||
//func GetGeoLite2CountryCsvDownloadUrl() string {
|
||||
// return gostring.Replace("https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country-CSV&license_key=YOUR_LICENSE_KEY&suffix=zip", "YOUR_LICENSE_KEY", licenseKey)
|
||||
//}
|
@ -0,0 +1,66 @@
|
||||
package geoip
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"net"
|
||||
)
|
||||
|
||||
// QueryCityResult 返回
|
||||
type QueryCityResult struct {
|
||||
Ip string `json:"ip,omitempty"` // ip
|
||||
Continent struct {
|
||||
Code string `json:"code,omitempty"` // 大陆代码
|
||||
Name string `json:"name,omitempty"` // 大陆名称
|
||||
} `json:"continent,omitempty"`
|
||||
Country struct {
|
||||
Code string `json:"code,omitempty"` // 国家代码
|
||||
Name string `json:"name,omitempty"` // 国家名称
|
||||
} `json:"country,omitempty"`
|
||||
Province struct {
|
||||
Code string `json:"code,omitempty"` // 省份代码
|
||||
Name string `json:"name,omitempty"` // 省份名称
|
||||
} `json:"province,omitempty"`
|
||||
City struct {
|
||||
Name string `json:"name,omitempty"` // 城市名称
|
||||
} `json:"city,omitempty"`
|
||||
Location struct {
|
||||
TimeZone string `json:"time_zone,omitempty"` // 位置时区
|
||||
Latitude float64 `json:"latitude,omitempty"` // 坐标纬度
|
||||
Longitude float64 `json:"longitude,omitempty"` // 坐标经度
|
||||
} `json:"location,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Client) QueryCity(ipAddress net.IP) (result QueryCityResult, err error) {
|
||||
|
||||
record, err := c.cityDb.City(ipAddress)
|
||||
if err != nil {
|
||||
return QueryCityResult{}, err
|
||||
}
|
||||
|
||||
// ip
|
||||
result.Ip = ipAddress.String()
|
||||
|
||||
// 大陆
|
||||
result.Continent.Code = record.Continent.Code
|
||||
result.Continent.Name = record.Continent.Names["zh-CN"]
|
||||
|
||||
// 国家
|
||||
result.Country.Code = record.Country.IsoCode
|
||||
result.Country.Name = record.Country.Names["zh-CN"]
|
||||
|
||||
// 省份
|
||||
if len(record.Subdivisions) > 0 {
|
||||
result.Province.Code = record.Subdivisions[0].IsoCode
|
||||
result.Province.Name = record.Subdivisions[0].Names["zh-CN"]
|
||||
}
|
||||
|
||||
// 城市
|
||||
result.City.Name = record.City.Names["zh-CN"]
|
||||
|
||||
// 位置
|
||||
result.Location.TimeZone = record.Location.TimeZone
|
||||
result.Location.Latitude = record.Location.Latitude
|
||||
result.Location.Longitude = record.Location.Longitude
|
||||
|
||||
return result, err
|
||||
}
|
@ -1,63 +0,0 @@
|
||||
package goip
|
||||
|
||||
import (
|
||||
"go.dtapp.net/goip/ip2region"
|
||||
v4 "go.dtapp.net/goip/v4"
|
||||
v6 "go.dtapp.net/goip/v6"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
V4Region ip2region.Ip2Region // IPV4
|
||||
V4db v4.Pointer // IPV4
|
||||
V6db v6.Pointer // IPV6
|
||||
}
|
||||
|
||||
// NewIp 实例化
|
||||
func NewIp() *Client {
|
||||
app := &Client{}
|
||||
v4Num := app.V4db.InitIPV4Data()
|
||||
log.Printf("IPV4 库加载完成 共加载:%d 条 IP 记录\n", v4Num)
|
||||
v6Num := app.V6db.InitIPV4Data()
|
||||
log.Printf("IPV6 库加载完成 共加载:%d 条 IP 记录\n", v6Num)
|
||||
return app
|
||||
}
|
||||
|
||||
func (c *Client) Ipv4(ip string) (res v4.Result, resInfo ip2region.IpInfo) {
|
||||
res = c.V4db.Find(ip)
|
||||
resInfo, _ = c.V4Region.MemorySearch(ip)
|
||||
return res, resInfo
|
||||
}
|
||||
|
||||
func (c *Client) Ipv6(ip string) (res v6.Result) {
|
||||
res = c.V6db.Find(ip)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Client) isIpv4OrIpv6(ip string) string {
|
||||
if len(ip) < 7 {
|
||||
return ""
|
||||
}
|
||||
arrIpv4 := strings.Split(ip, ".")
|
||||
if len(arrIpv4) == 4 {
|
||||
//. 判断IPv4
|
||||
for _, val := range arrIpv4 {
|
||||
if !c.CheckIpv4(val) {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return ipv4
|
||||
}
|
||||
arrIpv6 := strings.Split(ip, ":")
|
||||
if len(arrIpv6) == 8 {
|
||||
// 判断Ipv6
|
||||
for _, val := range arrIpv6 {
|
||||
if !c.CheckIpv6(val) {
|
||||
return "Neither"
|
||||
}
|
||||
}
|
||||
return ipv6
|
||||
}
|
||||
return ""
|
||||
}
|
@ -0,0 +1,103 @@
|
||||
package ip2region
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"go.dtapp.net/gostring"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
IndexBlockLength = 12
|
||||
)
|
||||
|
||||
//go:embed ip2region.db
|
||||
var dbBuff []byte
|
||||
|
||||
type Client struct {
|
||||
// db file handler
|
||||
dbFileHandler *os.File
|
||||
|
||||
//header block info
|
||||
|
||||
headerSip []int64
|
||||
headerPtr []int64
|
||||
headerLen int64
|
||||
|
||||
// super block index info
|
||||
firstIndexPtr int64
|
||||
lastIndexPtr int64
|
||||
totalBlocks int64
|
||||
|
||||
// for memory mode only
|
||||
// the original db binary string
|
||||
|
||||
dbFile string
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
|
||||
c := &Client{}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// 获取Ip信息
|
||||
func getIpInfo(ipStr string, cityId int64, line []byte) (result QueryResult) {
|
||||
|
||||
lineSlice := strings.Split(string(line), "|")
|
||||
length := len(lineSlice)
|
||||
result.CityId = cityId
|
||||
if length < 5 {
|
||||
for i := 0; i <= 5-length; i++ {
|
||||
lineSlice = append(lineSlice, "")
|
||||
}
|
||||
}
|
||||
|
||||
if lineSlice[0] != "0" {
|
||||
result.Country = gostring.SpaceAndLineBreak(lineSlice[0])
|
||||
}
|
||||
if lineSlice[1] != "0" {
|
||||
result.Region = gostring.SpaceAndLineBreak(lineSlice[1])
|
||||
}
|
||||
if lineSlice[2] != "0" {
|
||||
result.Province = gostring.SpaceAndLineBreak(lineSlice[2])
|
||||
}
|
||||
if lineSlice[3] != "0" {
|
||||
result.City = gostring.SpaceAndLineBreak(lineSlice[3])
|
||||
}
|
||||
if lineSlice[4] != "0" {
|
||||
result.Isp = gostring.SpaceAndLineBreak(lineSlice[4])
|
||||
}
|
||||
|
||||
result.Ip = ipStr
|
||||
return result
|
||||
}
|
||||
|
||||
func getLong(b []byte, offset int64) int64 {
|
||||
|
||||
val := int64(b[offset]) |
|
||||
int64(b[offset+1])<<8 |
|
||||
int64(b[offset+2])<<16 |
|
||||
int64(b[offset+3])<<24
|
||||
|
||||
return val
|
||||
|
||||
}
|
||||
|
||||
func ip2long(IpStr string) (int64, error) {
|
||||
bits := strings.Split(IpStr, ".")
|
||||
if len(bits) != 4 {
|
||||
return 0, errors.New("ip format error")
|
||||
}
|
||||
|
||||
var sum int64
|
||||
for i, n := range bits {
|
||||
bit, _ := strconv.ParseInt(n, 10, 64)
|
||||
sum += bit << uint(24-8*i)
|
||||
}
|
||||
|
||||
return sum, nil
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
package ip2region_v2
|
||||
|
||||
import _ "embed"
|
||||
|
||||
//go:embed ip2region.xdb
|
||||
var cBuff []byte
|
||||
|
||||
type Client struct {
|
||||
db *Searcher
|
||||
}
|
||||
|
||||
func New() (*Client, error) {
|
||||
|
||||
var err error
|
||||
c := &Client{}
|
||||
|
||||
// 1、从 dbPath 加载整个 xdb 到内存
|
||||
|
||||
// 2、用全局的 cBuff 创建完全基于内存的查询对象。
|
||||
c.db, err = NewWithBuffer(cBuff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, err
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func OnlineDownload() {
|
||||
resp, err := http.Get("https://ghproxy.com/?q=https://github.com/lionsoul2014/ip2region/blob/master/data/ip2region.xdb?raw=true")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
err = ioutil.WriteFile("./ip2region.xdb", body, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
log.Printf("已下载最新 ip2region.xdb 数据库 %s ", "./ip2region.xdb")
|
||||
}
|
Binary file not shown.
@ -0,0 +1,49 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"go.dtapp.net/gostring"
|
||||
"net"
|
||||
)
|
||||
|
||||
// QueryResult 返回
|
||||
type QueryResult struct {
|
||||
Ip string `json:"ip,omitempty"` // ip
|
||||
Country string `json:"country,omitempty"` // 国家
|
||||
Province string `json:"province,omitempty"` // 省份
|
||||
City string `json:"city,omitempty"` // 城市
|
||||
Operator string `json:"operator,omitempty"` // 运营商
|
||||
}
|
||||
|
||||
func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
|
||||
|
||||
// 备注:并发使用,用整个 xdb 缓存创建的 searcher 对象可以安全用于并发。
|
||||
|
||||
str, err := c.db.SearchByStr(ipAddress.String())
|
||||
if err != nil {
|
||||
return QueryResult{}, err
|
||||
}
|
||||
|
||||
split := gostring.Split(str, "|")
|
||||
if len(split) <= 0 {
|
||||
return QueryResult{}, err
|
||||
}
|
||||
|
||||
result.Ip = ipAddress.String()
|
||||
|
||||
result.Country = split[0]
|
||||
result.Province = split[2]
|
||||
if result.Province == "0" {
|
||||
result.Province = ""
|
||||
}
|
||||
result.City = split[3]
|
||||
if result.City == "0" {
|
||||
result.City = ""
|
||||
}
|
||||
result.Operator = split[4]
|
||||
if result.Operator == "0" {
|
||||
result.Operator = ""
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
@ -0,0 +1,240 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
const (
|
||||
HeaderInfoLength = 256
|
||||
VectorIndexRows = 256
|
||||
VectorIndexCols = 256
|
||||
VectorIndexSize = 8
|
||||
SegmentIndexBlockSize = 14
|
||||
)
|
||||
|
||||
// --- Index policy define
|
||||
|
||||
type IndexPolicy int
|
||||
|
||||
const (
|
||||
VectorIndexPolicy IndexPolicy = 1
|
||||
BTreeIndexPolicy IndexPolicy = 2
|
||||
)
|
||||
|
||||
func (i IndexPolicy) String() string {
|
||||
switch i {
|
||||
case VectorIndexPolicy:
|
||||
return "VectorIndex"
|
||||
case BTreeIndexPolicy:
|
||||
return "BtreeIndex"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// --- Header define
|
||||
|
||||
type Header struct {
|
||||
// data []byte
|
||||
Version uint16
|
||||
IndexPolicy IndexPolicy
|
||||
CreatedAt uint32
|
||||
StartIndexPtr uint32
|
||||
EndIndexPtr uint32
|
||||
}
|
||||
|
||||
func NewHeader(input []byte) (*Header, error) {
|
||||
if len(input) < 16 {
|
||||
return nil, fmt.Errorf("invalid input buffer")
|
||||
}
|
||||
|
||||
return &Header{
|
||||
Version: binary.LittleEndian.Uint16(input),
|
||||
IndexPolicy: IndexPolicy(binary.LittleEndian.Uint16(input[2:])),
|
||||
CreatedAt: binary.LittleEndian.Uint32(input[4:]),
|
||||
StartIndexPtr: binary.LittleEndian.Uint32(input[8:]),
|
||||
EndIndexPtr: binary.LittleEndian.Uint32(input[12:]),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// --- searcher implementation
|
||||
|
||||
type Searcher struct {
|
||||
handle *os.File
|
||||
|
||||
// header info
|
||||
header *Header
|
||||
ioCount int
|
||||
|
||||
// use it only when this feature enabled.
|
||||
// Preload the vector index will reduce the number of IO operations
|
||||
// thus speedup the search process
|
||||
vectorIndex []byte
|
||||
|
||||
// content buffer.
|
||||
// running with the whole xdb file cached
|
||||
contentBuff []byte
|
||||
}
|
||||
|
||||
func baseNew(dbFile string, vIndex []byte, cBuff []byte) (*Searcher, error) {
|
||||
var err error
|
||||
|
||||
// content buff first
|
||||
if cBuff != nil {
|
||||
return &Searcher{
|
||||
vectorIndex: nil,
|
||||
contentBuff: cBuff,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// open the xdb binary file
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Searcher{
|
||||
handle: handle,
|
||||
vectorIndex: vIndex,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewWithFileOnly(dbFile string) (*Searcher, error) {
|
||||
return baseNew(dbFile, nil, nil)
|
||||
}
|
||||
|
||||
func NewWithVectorIndex(dbFile string, vIndex []byte) (*Searcher, error) {
|
||||
return baseNew(dbFile, vIndex, nil)
|
||||
}
|
||||
|
||||
func NewWithBuffer(cBuff []byte) (*Searcher, error) {
|
||||
return baseNew("", nil, cBuff)
|
||||
}
|
||||
|
||||
func (s *Searcher) Close() {
|
||||
if s.handle != nil {
|
||||
err := s.handle.Close()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetIOCount return the global io count for the last search
|
||||
func (s *Searcher) GetIOCount() int {
|
||||
return s.ioCount
|
||||
}
|
||||
|
||||
// SearchByStr find the region for the specified ip string
|
||||
func (s *Searcher) SearchByStr(str string) (string, error) {
|
||||
ip, err := CheckIP(str)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return s.Search(ip)
|
||||
}
|
||||
|
||||
// Search find the region for the specified long ip
|
||||
func (s *Searcher) Search(ip uint32) (string, error) {
|
||||
// reset the global ioCount
|
||||
s.ioCount = 0
|
||||
|
||||
// locate the segment index block based on the vector index
|
||||
var il0 = (ip >> 24) & 0xFF
|
||||
var il1 = (ip >> 16) & 0xFF
|
||||
var idx = il0*VectorIndexCols*VectorIndexSize + il1*VectorIndexSize
|
||||
var sPtr, ePtr = uint32(0), uint32(0)
|
||||
if s.vectorIndex != nil {
|
||||
sPtr = binary.LittleEndian.Uint32(s.vectorIndex[idx:])
|
||||
ePtr = binary.LittleEndian.Uint32(s.vectorIndex[idx+4:])
|
||||
} else if s.contentBuff != nil {
|
||||
sPtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx:])
|
||||
ePtr = binary.LittleEndian.Uint32(s.contentBuff[HeaderInfoLength+idx+4:])
|
||||
} else {
|
||||
// read the vector index block
|
||||
var buff = make([]byte, VectorIndexSize)
|
||||
err := s.read(int64(HeaderInfoLength+idx), buff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read vector index block at %d: %w", HeaderInfoLength+idx, err)
|
||||
}
|
||||
|
||||
sPtr = binary.LittleEndian.Uint32(buff)
|
||||
ePtr = binary.LittleEndian.Uint32(buff[4:])
|
||||
}
|
||||
|
||||
// fmt.Printf("sPtr=%d, ePtr=%d", sPtr, ePtr)
|
||||
|
||||
// binary search the segment index to get the region
|
||||
var dataLen, dataPtr = 0, uint32(0)
|
||||
var buff = make([]byte, SegmentIndexBlockSize)
|
||||
var l, h = 0, int((ePtr - sPtr) / SegmentIndexBlockSize)
|
||||
for l <= h {
|
||||
m := (l + h) >> 1
|
||||
p := sPtr + uint32(m*SegmentIndexBlockSize)
|
||||
err := s.read(int64(p), buff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read segment index at %d: %w", p, err)
|
||||
}
|
||||
|
||||
// decode the data step by step to reduce the unnecessary operations
|
||||
sip := binary.LittleEndian.Uint32(buff)
|
||||
if ip < sip {
|
||||
h = m - 1
|
||||
} else {
|
||||
eip := binary.LittleEndian.Uint32(buff[4:])
|
||||
if ip > eip {
|
||||
l = m + 1
|
||||
} else {
|
||||
dataLen = int(binary.LittleEndian.Uint16(buff[8:]))
|
||||
dataPtr = binary.LittleEndian.Uint32(buff[10:])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//fmt.Printf("dataLen: %d, dataPtr: %d", dataLen, dataPtr)
|
||||
if dataLen == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// load and return the region data
|
||||
var regionBuff = make([]byte, dataLen)
|
||||
err := s.read(int64(dataPtr), regionBuff)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("read region at %d: %w", dataPtr, err)
|
||||
}
|
||||
|
||||
return string(regionBuff), nil
|
||||
}
|
||||
|
||||
// do the data read operation based on the setting.
|
||||
// content buffer first or will read from the file.
|
||||
// this operation will invoke the Seek for file based read.
|
||||
func (s *Searcher) read(offset int64, buff []byte) error {
|
||||
if s.contentBuff != nil {
|
||||
cLen := copy(buff, s.contentBuff[offset:])
|
||||
if cLen != len(buff) {
|
||||
return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
} else {
|
||||
_, err := s.handle.Seek(offset, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("seek to %d: %w", offset, err)
|
||||
}
|
||||
|
||||
s.ioCount++
|
||||
rLen, err := s.handle.Read(buff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("handle read: %w", err)
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -0,0 +1,165 @@
|
||||
package ip2region_v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var shiftIndex = []int{24, 16, 8, 0}
|
||||
|
||||
func CheckIP(ip string) (uint32, error) {
|
||||
var ps = strings.Split(ip, ".")
|
||||
if len(ps) != 4 {
|
||||
return 0, fmt.Errorf("invalid ip address `%s`", ip)
|
||||
}
|
||||
|
||||
var val = uint32(0)
|
||||
for i, s := range ps {
|
||||
d, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("the %dth part `%s` is not an integer", i, s)
|
||||
}
|
||||
|
||||
if d < 0 || d > 255 {
|
||||
return 0, fmt.Errorf("the %dth part `%s` should be an integer bettween 0 and 255", i, s)
|
||||
}
|
||||
|
||||
val |= uint32(d) << shiftIndex[i]
|
||||
}
|
||||
|
||||
// convert the ip to integer
|
||||
return val, nil
|
||||
}
|
||||
|
||||
func Long2IP(ip uint32) string {
|
||||
return fmt.Sprintf("%d.%d.%d.%d", (ip>>24)&0xFF, (ip>>16)&0xFF, (ip>>8)&0xFF, ip&0xFF)
|
||||
}
|
||||
|
||||
func MidIP(sip uint32, eip uint32) uint32 {
|
||||
return uint32((uint64(sip) + uint64(eip)) >> 1)
|
||||
}
|
||||
|
||||
// LoadHeader load the header info from the specified handle
|
||||
func LoadHeader(handle *os.File) (*Header, error) {
|
||||
_, err := handle.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek to the header: %w", err)
|
||||
}
|
||||
|
||||
var buff = make([]byte, HeaderInfoLength)
|
||||
rLen, err := handle.Read(buff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
|
||||
return NewHeader(buff)
|
||||
}
|
||||
|
||||
// LoadHeaderFromFile load header info from the specified db file path
|
||||
func LoadHeaderFromFile(dbFile string) (*Header, error) {
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
|
||||
}
|
||||
|
||||
header, err := LoadHeader(handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = handle.Close()
|
||||
return header, nil
|
||||
}
|
||||
|
||||
// LoadHeaderFromBuff wrap the header info from the content buffer
|
||||
func LoadHeaderFromBuff(cBuff []byte) (*Header, error) {
|
||||
return NewHeader(cBuff[0:256])
|
||||
}
|
||||
|
||||
// LoadVectorIndex util function to load the vector index from the specified file handle
|
||||
func LoadVectorIndex(handle *os.File) ([]byte, error) {
|
||||
// load all the vector index block
|
||||
_, err := handle.Seek(HeaderInfoLength, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek to vector index: %w", err)
|
||||
}
|
||||
|
||||
var buff = make([]byte, VectorIndexRows*VectorIndexCols*VectorIndexSize)
|
||||
rLen, err := handle.Read(buff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
|
||||
return buff, nil
|
||||
}
|
||||
|
||||
// LoadVectorIndexFromFile load vector index from a specified file path
|
||||
func LoadVectorIndexFromFile(dbFile string) ([]byte, error) {
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
|
||||
}
|
||||
|
||||
vIndex, err := LoadVectorIndex(handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = handle.Close()
|
||||
return vIndex, nil
|
||||
}
|
||||
|
||||
// LoadContent load the whole xdb content from the specified file handle
|
||||
func LoadContent(handle *os.File) ([]byte, error) {
|
||||
// get file size
|
||||
fi, err := handle.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("stat: %w", err)
|
||||
}
|
||||
|
||||
size := fi.Size()
|
||||
|
||||
// seek to the head of the file
|
||||
_, err = handle.Seek(0, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("seek to get xdb file length: %w", err)
|
||||
}
|
||||
|
||||
var buff = make([]byte, size)
|
||||
rLen, err := handle.Read(buff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rLen != len(buff) {
|
||||
return nil, fmt.Errorf("incomplete read: readed bytes should be %d", len(buff))
|
||||
}
|
||||
|
||||
return buff, nil
|
||||
}
|
||||
|
||||
// LoadContentFromFile load the whole xdb content from the specified db file path
|
||||
func LoadContentFromFile(dbFile string) ([]byte, error) {
|
||||
handle, err := os.OpenFile(dbFile, os.O_RDONLY, 0600)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open xdb file `%s`: %w", dbFile, err)
|
||||
}
|
||||
|
||||
cBuff, err := LoadContent(handle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = handle.Close()
|
||||
return cBuff, nil
|
||||
}
|
@ -0,0 +1,138 @@
|
||||
package ipv6wry
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
header []byte
|
||||
country []byte
|
||||
area []byte
|
||||
v6ip uint64
|
||||
offset uint32
|
||||
start uint32
|
||||
end uint32
|
||||
)
|
||||
|
||||
//go:embed ipv6wry.db
|
||||
var datBuff []byte
|
||||
|
||||
type Client struct {
|
||||
Offset uint32
|
||||
ItemLen uint32
|
||||
IndexLen uint32
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
|
||||
c := &Client{}
|
||||
|
||||
buf := datBuff[0:8]
|
||||
start := binary.LittleEndian.Uint32(buf[:4])
|
||||
end := binary.LittleEndian.Uint32(buf[4:])
|
||||
|
||||
num := int64((end-start)/7 + 1)
|
||||
log.Printf("ipv6wry.db 共加载:%d 条ip记录\n", num)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// ReadData 从文件中读取数据
|
||||
func (c *Client) readData(length uint32) (rs []byte) {
|
||||
end := c.Offset + length
|
||||
dataNum := uint32(len(datBuff))
|
||||
if c.Offset > dataNum {
|
||||
return nil
|
||||
}
|
||||
|
||||
if end > dataNum {
|
||||
end = dataNum
|
||||
}
|
||||
rs = datBuff[c.Offset:end]
|
||||
c.Offset = end
|
||||
return rs
|
||||
}
|
||||
|
||||
func (c *Client) getAddr() ([]byte, []byte) {
|
||||
mode := c.readData(1)[0]
|
||||
if mode == 0x01 {
|
||||
// [IP][0x01][国家和地区信息的绝对偏移地址]
|
||||
c.Offset = byteToUInt32(c.readData(3))
|
||||
return c.getAddr()
|
||||
}
|
||||
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
|
||||
_offset := c.Offset - 1
|
||||
c1 := c.readArea(_offset)
|
||||
if mode == 0x02 {
|
||||
c.Offset = 4 + _offset
|
||||
} else {
|
||||
c.Offset = _offset + uint32(1+len(c1))
|
||||
}
|
||||
c2 := c.readArea(c.Offset)
|
||||
return c1, c2
|
||||
}
|
||||
|
||||
func (c *Client) readArea(offset uint32) []byte {
|
||||
c.Offset = offset
|
||||
mode := c.readData(1)[0]
|
||||
if mode == 0x01 || mode == 0x02 {
|
||||
return c.readArea(byteToUInt32(c.readData(3)))
|
||||
}
|
||||
c.Offset = offset
|
||||
return c.readString()
|
||||
}
|
||||
|
||||
func (c *Client) readString() []byte {
|
||||
data := make([]byte, 0)
|
||||
for {
|
||||
buf := c.readData(1)
|
||||
if buf[0] == 0 {
|
||||
break
|
||||
}
|
||||
data = append(data, buf[0])
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func (c *Client) searchIndex(ip uint64) uint32 {
|
||||
|
||||
c.ItemLen = 8
|
||||
c.IndexLen = 11
|
||||
|
||||
header = datBuff[8:24]
|
||||
start = binary.LittleEndian.Uint32(header[8:])
|
||||
counts := binary.LittleEndian.Uint32(header[:8])
|
||||
end = start + counts*c.IndexLen
|
||||
|
||||
buf := make([]byte, c.IndexLen)
|
||||
|
||||
for {
|
||||
mid := start + c.IndexLen*(((end-start)/c.IndexLen)>>1)
|
||||
buf = datBuff[mid : mid+c.IndexLen]
|
||||
_ip := binary.LittleEndian.Uint64(buf[:c.ItemLen])
|
||||
|
||||
if end-start == c.IndexLen {
|
||||
if ip >= binary.LittleEndian.Uint64(datBuff[end:end+c.ItemLen]) {
|
||||
buf = datBuff[end : end+c.IndexLen]
|
||||
}
|
||||
return byteToUInt32(buf[c.ItemLen:])
|
||||
}
|
||||
|
||||
if _ip > ip {
|
||||
end = mid
|
||||
} else if _ip < ip {
|
||||
start = mid
|
||||
} else if _ip == ip {
|
||||
return byteToUInt32(buf[c.ItemLen:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func byteToUInt32(data []byte) uint32 {
|
||||
i := uint32(data[0]) & 0xff
|
||||
i |= (uint32(data[1]) << 8) & 0xff00
|
||||
i |= (uint32(data[2]) << 16) & 0xff0000
|
||||
return i
|
||||
}
|
27
vendor/go.dtapp.net/goip/v6/download.go → vendor/go.dtapp.net/goip/ipv6wry/download.go
generated
vendored
27
vendor/go.dtapp.net/goip/v6/download.go → vendor/go.dtapp.net/goip/ipv6wry/download.go
generated
vendored
0
vendor/go.dtapp.net/goip/v6/ip.db → vendor/go.dtapp.net/goip/ipv6wry/ipv6wry.db
generated
vendored
0
vendor/go.dtapp.net/goip/v6/ip.db → vendor/go.dtapp.net/goip/ipv6wry/ipv6wry.db
generated
vendored
@ -0,0 +1,82 @@
|
||||
package ipv6wry
|
||||
|
||||
import (
|
||||
"go.dtapp.net/gostring"
|
||||
"math/big"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// QueryResult 返回
|
||||
type QueryResult struct {
|
||||
Ip string `json:"ip,omitempty"` // ip
|
||||
Country string `json:"country,omitempty"` // 国家
|
||||
Province string `json:"province,omitempty"` // 省份
|
||||
City string `json:"city,omitempty"` // 城市
|
||||
Area string `json:"area,omitempty"` // 区域
|
||||
Isp string `json:"isp,omitempty"` // 运营商
|
||||
}
|
||||
|
||||
// Query ip地址查询对应归属地信息
|
||||
func (c *Client) Query(ipAddress net.IP) (result QueryResult) {
|
||||
|
||||
result.Ip = ipAddress.String()
|
||||
|
||||
c.Offset = 0
|
||||
|
||||
tp := big.NewInt(0)
|
||||
op := big.NewInt(0)
|
||||
tp.SetBytes(ipAddress.To16())
|
||||
op.SetString("18446744073709551616", 10)
|
||||
op.Div(tp, op)
|
||||
tp.SetString("FFFFFFFFFFFFFFFF", 16)
|
||||
op.And(op, tp)
|
||||
|
||||
v6ip = op.Uint64()
|
||||
offset = c.searchIndex(v6ip)
|
||||
c.Offset = offset
|
||||
|
||||
country, area = c.getAddr()
|
||||
|
||||
// 解析地区数据
|
||||
info := strings.Split(string(country), "\t")
|
||||
if len(info) > 0 {
|
||||
i := 1
|
||||
for {
|
||||
if i > len(info) {
|
||||
break
|
||||
}
|
||||
switch i {
|
||||
case 1:
|
||||
result.Country = info[i-1]
|
||||
result.Country = gostring.SpaceAndLineBreak(result.Country)
|
||||
case 2:
|
||||
result.Province = info[i-1]
|
||||
result.Province = gostring.SpaceAndLineBreak(result.Province)
|
||||
case 3:
|
||||
result.City = info[i-1]
|
||||
result.City = gostring.SpaceAndLineBreak(result.City)
|
||||
case 4:
|
||||
result.Area = info[i-1]
|
||||
result.Area = gostring.SpaceAndLineBreak(result.Area)
|
||||
}
|
||||
i++ // 自增
|
||||
}
|
||||
} else {
|
||||
result.Country = string(country)
|
||||
result.Country = gostring.SpaceAndLineBreak(result.Country)
|
||||
}
|
||||
// 运营商
|
||||
result.Isp = string(area)
|
||||
|
||||
// Delete ZX (防止不相关的信息产生干扰)
|
||||
if result.Isp == "ZX" || result.Isp == "" {
|
||||
result.Isp = ""
|
||||
} else {
|
||||
result.Isp = " " + result.Isp
|
||||
}
|
||||
|
||||
result.Isp = gostring.SpaceAndLineBreak(result.Isp)
|
||||
|
||||
return result
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
package goip
|
||||
|
||||
import "strings"
|
||||
|
||||
var (
|
||||
ipv4 = "IPV4"
|
||||
ipv6 = "IPV6"
|
||||
)
|
||||
|
||||
func (c *Client) isIpv4OrIpv6(ip string) string {
|
||||
if len(ip) < 7 {
|
||||
return ""
|
||||
}
|
||||
arrIpv4 := strings.Split(ip, ".")
|
||||
if len(arrIpv4) == 4 {
|
||||
//. 判断IPv4
|
||||
for _, val := range arrIpv4 {
|
||||
if !c.CheckIpv4(val) {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return ipv4
|
||||
}
|
||||
arrIpv6 := strings.Split(ip, ":")
|
||||
if len(arrIpv6) == 8 {
|
||||
// 判断Ipv6
|
||||
for _, val := range arrIpv6 {
|
||||
if !c.CheckIpv6(val) {
|
||||
return "Neither"
|
||||
}
|
||||
}
|
||||
return ipv6
|
||||
}
|
||||
return ""
|
||||
}
|
@ -0,0 +1,139 @@
|
||||
package qqwry
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
header []byte
|
||||
country []byte
|
||||
area []byte
|
||||
offset uint32
|
||||
start uint32
|
||||
end uint32
|
||||
)
|
||||
|
||||
//go:embed qqwry.dat
|
||||
var datBuff []byte
|
||||
|
||||
type Client struct {
|
||||
Offset uint32
|
||||
ItemLen uint32
|
||||
IndexLen uint32
|
||||
}
|
||||
|
||||
func New() *Client {
|
||||
|
||||
c := &Client{}
|
||||
|
||||
buf := datBuff[0:8]
|
||||
start := binary.LittleEndian.Uint32(buf[:4])
|
||||
end := binary.LittleEndian.Uint32(buf[4:])
|
||||
|
||||
num := int64((end-start)/7 + 1)
|
||||
log.Printf("qqwry.dat 共加载:%d 条ip记录\n", num)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// ReadData 从文件中读取数据
|
||||
func (c *Client) readData(length uint32) (rs []byte) {
|
||||
end := c.Offset + length
|
||||
dataNum := uint32(len(datBuff))
|
||||
if c.Offset > dataNum {
|
||||
return nil
|
||||
}
|
||||
|
||||
if end > dataNum {
|
||||
end = dataNum
|
||||
}
|
||||
rs = datBuff[c.Offset:end]
|
||||
c.Offset = end
|
||||
return rs
|
||||
}
|
||||
|
||||
// 获取地址信息
|
||||
func (c *Client) getAddr() ([]byte, []byte) {
|
||||
mode := c.readData(1)[0]
|
||||
if mode == 0x01 {
|
||||
// [IP][0x01][国家和地区信息的绝对偏移地址]
|
||||
c.Offset = byteToUInt32(c.readData(3))
|
||||
return c.getAddr()
|
||||
}
|
||||
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
|
||||
_offset := c.Offset - 1
|
||||
c1 := c.readArea(_offset)
|
||||
if mode == 0x02 {
|
||||
c.Offset = 4 + _offset
|
||||
} else {
|
||||
c.Offset = _offset + uint32(1+len(c1))
|
||||
}
|
||||
c2 := c.readArea(c.Offset)
|
||||
return c1, c2
|
||||
}
|
||||
|
||||
// 读取区
|
||||
func (c *Client) readArea(offset uint32) []byte {
|
||||
c.Offset = offset
|
||||
mode := c.readData(1)[0]
|
||||
if mode == 0x01 || mode == 0x02 {
|
||||
return c.readArea(byteToUInt32(c.readData(3)))
|
||||
}
|
||||
c.Offset = offset
|
||||
return c.readString()
|
||||
}
|
||||
|
||||
// 读取字符串
|
||||
func (c *Client) readString() []byte {
|
||||
data := make([]byte, 0)
|
||||
for {
|
||||
buf := c.readData(1)
|
||||
if buf[0] == 0 {
|
||||
break
|
||||
}
|
||||
data = append(data, buf[0])
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// 搜索索引
|
||||
func (c *Client) searchIndex(ip uint32) uint32 {
|
||||
c.ItemLen = 4
|
||||
c.IndexLen = 7
|
||||
header = datBuff[0:8]
|
||||
start = binary.LittleEndian.Uint32(header[:4])
|
||||
end = binary.LittleEndian.Uint32(header[4:])
|
||||
|
||||
buf := make([]byte, c.IndexLen)
|
||||
|
||||
for {
|
||||
mid := start + c.IndexLen*(((end-start)/c.IndexLen)>>1)
|
||||
buf = datBuff[mid : mid+c.IndexLen]
|
||||
_ip := binary.LittleEndian.Uint32(buf[:c.ItemLen])
|
||||
|
||||
if end-start == c.IndexLen {
|
||||
if ip >= binary.LittleEndian.Uint32(datBuff[end:end+c.ItemLen]) {
|
||||
buf = datBuff[end : end+c.IndexLen]
|
||||
}
|
||||
return byteToUInt32(buf[c.ItemLen:])
|
||||
}
|
||||
|
||||
if _ip > ip {
|
||||
end = mid
|
||||
} else if _ip < ip {
|
||||
start = mid
|
||||
} else if _ip == ip {
|
||||
return byteToUInt32(buf[c.ItemLen:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 字节转UInt32
|
||||
func byteToUInt32(data []byte) uint32 {
|
||||
i := uint32(data[0]) & 0xff
|
||||
i |= (uint32(data[1]) << 8) & 0xff00
|
||||
i |= (uint32(data[2]) << 16) & 0xff0000
|
||||
return i
|
||||
}
|
56
vendor/go.dtapp.net/goip/v4/download.go → vendor/go.dtapp.net/goip/qqwry/download.go
generated
vendored
56
vendor/go.dtapp.net/goip/v4/download.go → vendor/go.dtapp.net/goip/qqwry/download.go
generated
vendored
@ -0,0 +1,53 @@
|
||||
package qqwry
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"go.dtapp.net/gostring"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"net"
|
||||
)
|
||||
|
||||
// QueryResult 返回
|
||||
type QueryResult struct {
|
||||
Ip string `json:"ip,omitempty"` // ip
|
||||
Country string `json:"country,omitempty"` // 国家或地区
|
||||
Area string `json:"area,omitempty"` // 区域
|
||||
}
|
||||
|
||||
// Query ip地址查询对应归属地信息
|
||||
func (c *Client) Query(ipAddress net.IP) (result QueryResult, err error) {
|
||||
|
||||
c.Offset = 0
|
||||
|
||||
// 偏移
|
||||
offset = c.searchIndex(binary.BigEndian.Uint32(ipAddress.To4()))
|
||||
if offset <= 0 {
|
||||
return QueryResult{}, errors.New("搜索失败")
|
||||
}
|
||||
|
||||
result.Ip = ipAddress.String()
|
||||
|
||||
c.Offset = offset + c.ItemLen
|
||||
|
||||
country, area = c.getAddr()
|
||||
|
||||
enc := simplifiedchinese.GBK.NewDecoder()
|
||||
|
||||
result.Country, _ = enc.String(string(country))
|
||||
|
||||
result.Country = gostring.SpaceAndLineBreak(result.Country)
|
||||
|
||||
result.Area, _ = enc.String(string(area))
|
||||
|
||||
// Delete CZ88.NET (防止不相关的信息产生干扰)
|
||||
if result.Area == " CZ88.NET" || result.Area == "" {
|
||||
result.Area = ""
|
||||
} else {
|
||||
result.Area = " " + result.Area
|
||||
}
|
||||
|
||||
result.Area = gostring.SpaceAndLineBreak(result.Area)
|
||||
|
||||
return result, nil
|
||||
}
|
@ -0,0 +1,87 @@
|
||||
package goip
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"go.dtapp.net/goip/geoip"
|
||||
"go.dtapp.net/goip/ip2region"
|
||||
"go.dtapp.net/goip/ip2region_v2"
|
||||
"go.dtapp.net/goip/ipv6wry"
|
||||
"go.dtapp.net/goip/qqwry"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
QueryIncorrect = errors.New("ip地址不正确")
|
||||
)
|
||||
|
||||
// QueryQqWry 纯真IP库
|
||||
// https://www.cz88.net/
|
||||
func (c *Client) QueryQqWry(ipAddress net.IP) (result qqwry.QueryResult, err error) {
|
||||
if ipAddress.To4() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query, err := c.qqwryClient.Query(ipAddress)
|
||||
if err != nil {
|
||||
return qqwry.QueryResult{}, err
|
||||
}
|
||||
|
||||
return query, err
|
||||
}
|
||||
|
||||
// QueryIp2Region ip2region
|
||||
// https://github.com/lionsoul2014/ip2region
|
||||
func (c *Client) QueryIp2Region(ipAddress net.IP) (result ip2region.QueryResult, err error) {
|
||||
if ipAddress.To4() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query, err := c.ip2regionClient.Query(ipAddress)
|
||||
if err != nil {
|
||||
return ip2region.QueryResult{}, err
|
||||
}
|
||||
|
||||
return query, err
|
||||
}
|
||||
|
||||
// QueryIp2RegionV2 ip2region
|
||||
// https://github.com/lionsoul2014/ip2region
|
||||
func (c *Client) QueryIp2RegionV2(ipAddress net.IP) (result ip2region_v2.QueryResult, err error) {
|
||||
if ipAddress.To4() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query, err := c.ip2regionV2Client.Query(ipAddress)
|
||||
if err != nil {
|
||||
return ip2region_v2.QueryResult{}, err
|
||||
}
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// QueryGeoIp ip2region
|
||||
// https://www.maxmind.com/
|
||||
func (c *Client) QueryGeoIp(ipAddress net.IP) (result geoip.QueryCityResult, err error) {
|
||||
if ipAddress.String() == "<nil>" {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query, err := c.geoIpClient.QueryCity(ipAddress)
|
||||
if err != nil {
|
||||
return geoip.QueryCityResult{}, err
|
||||
}
|
||||
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// QueryIpv6wry ip2region
|
||||
// https://ip.zxinc.org
|
||||
func (c *Client) QueryIpv6wry(ipAddress net.IP) (result ipv6wry.QueryResult, err error) {
|
||||
if ipAddress.To16() == nil {
|
||||
return result, QueryIncorrect
|
||||
}
|
||||
|
||||
query := c.ipv6wryClient.Query(ipAddress)
|
||||
|
||||
return query, nil
|
||||
}
|
@ -1,201 +0,0 @@
|
||||
package v4
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"go.dtapp.net/gostring"
|
||||
"golang.org/x/text/encoding/simplifiedchinese"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
)
|
||||
|
||||
var (
|
||||
header []byte
|
||||
country []byte
|
||||
area []byte
|
||||
offset uint32
|
||||
start uint32
|
||||
end uint32
|
||||
)
|
||||
|
||||
//go:embed ip.dat
|
||||
var dat []byte
|
||||
|
||||
type Pointer struct {
|
||||
Offset uint32
|
||||
ItemLen uint32
|
||||
IndexLen uint32
|
||||
}
|
||||
|
||||
// Result 返回
|
||||
type Result struct {
|
||||
IP string `json:"ip,omitempty"` // 输入的ip地址
|
||||
Country string `json:"country,omitempty"` // 国家或地区
|
||||
Area string `json:"area,omitempty"` // 区域
|
||||
}
|
||||
|
||||
// InitIPV4Data 加载
|
||||
func (q *Pointer) InitIPV4Data() int64 {
|
||||
buf := dat[0:8]
|
||||
start := binary.LittleEndian.Uint32(buf[:4])
|
||||
end := binary.LittleEndian.Uint32(buf[4:])
|
||||
|
||||
return int64((end-start)/7 + 1)
|
||||
}
|
||||
|
||||
// ReadData 从文件中读取数据
|
||||
func (q *Pointer) readData(length uint32) (rs []byte) {
|
||||
end := q.Offset + length
|
||||
dataNum := uint32(len(dat))
|
||||
if q.Offset > dataNum {
|
||||
return nil
|
||||
}
|
||||
|
||||
if end > dataNum {
|
||||
end = dataNum
|
||||
}
|
||||
rs = dat[q.Offset:end]
|
||||
q.Offset = end
|
||||
return rs
|
||||
}
|
||||
|
||||
// Find ip地址查询对应归属地信息
|
||||
func (q *Pointer) Find(ipStr string) (res Result) {
|
||||
|
||||
// 赋值
|
||||
res.IP = ipStr
|
||||
if net.ParseIP(ipStr).To4() == nil {
|
||||
// 不是ip地址
|
||||
return res
|
||||
}
|
||||
|
||||
q.Offset = 0
|
||||
|
||||
// 偏移
|
||||
offset = q.searchIndex(binary.BigEndian.Uint32(net.ParseIP(ipStr).To4()))
|
||||
if offset <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
q.Offset = offset + q.ItemLen
|
||||
|
||||
country, area = q.getAddr()
|
||||
|
||||
enc := simplifiedchinese.GBK.NewDecoder()
|
||||
|
||||
res.Country, _ = enc.String(string(country))
|
||||
res.Country = gostring.SpaceAndLineBreak(res.Country)
|
||||
|
||||
res.Area, _ = enc.String(string(area))
|
||||
|
||||
// Delete CZ88.NET (防止不相关的信息产生干扰)
|
||||
if res.Area == " CZ88.NET" || res.Area == "" {
|
||||
res.Area = ""
|
||||
} else {
|
||||
res.Area = " " + res.Area
|
||||
}
|
||||
|
||||
res.Area = gostring.SpaceAndLineBreak(res.Area)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// 获取地址信息
|
||||
func (q *Pointer) getAddr() ([]byte, []byte) {
|
||||
mode := q.readData(1)[0]
|
||||
if mode == 0x01 {
|
||||
// [IP][0x01][国家和地区信息的绝对偏移地址]
|
||||
q.Offset = byteToUInt32(q.readData(3))
|
||||
return q.getAddr()
|
||||
}
|
||||
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
|
||||
_offset := q.Offset - 1
|
||||
c1 := q.readArea(_offset)
|
||||
if mode == 0x02 {
|
||||
q.Offset = 4 + _offset
|
||||
} else {
|
||||
q.Offset = _offset + uint32(1+len(c1))
|
||||
}
|
||||
c2 := q.readArea(q.Offset)
|
||||
return c1, c2
|
||||
}
|
||||
|
||||
// 读取区
|
||||
func (q *Pointer) readArea(offset uint32) []byte {
|
||||
q.Offset = offset
|
||||
mode := q.readData(1)[0]
|
||||
if mode == 0x01 || mode == 0x02 {
|
||||
return q.readArea(byteToUInt32(q.readData(3)))
|
||||
}
|
||||
q.Offset = offset
|
||||
return q.readString()
|
||||
}
|
||||
|
||||
// 读取字符串
|
||||
func (q *Pointer) readString() []byte {
|
||||
data := make([]byte, 0)
|
||||
for {
|
||||
buf := q.readData(1)
|
||||
if buf[0] == 0 {
|
||||
break
|
||||
}
|
||||
data = append(data, buf[0])
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// 搜索索引
|
||||
func (q *Pointer) searchIndex(ip uint32) uint32 {
|
||||
q.ItemLen = 4
|
||||
q.IndexLen = 7
|
||||
header = dat[0:8]
|
||||
start = binary.LittleEndian.Uint32(header[:4])
|
||||
end = binary.LittleEndian.Uint32(header[4:])
|
||||
|
||||
buf := make([]byte, q.IndexLen)
|
||||
|
||||
for {
|
||||
mid := start + q.IndexLen*(((end-start)/q.IndexLen)>>1)
|
||||
buf = dat[mid : mid+q.IndexLen]
|
||||
_ip := binary.LittleEndian.Uint32(buf[:q.ItemLen])
|
||||
|
||||
if end-start == q.IndexLen {
|
||||
if ip >= binary.LittleEndian.Uint32(dat[end:end+q.ItemLen]) {
|
||||
buf = dat[end : end+q.IndexLen]
|
||||
}
|
||||
return byteToUInt32(buf[q.ItemLen:])
|
||||
}
|
||||
|
||||
if _ip > ip {
|
||||
end = mid
|
||||
} else if _ip < ip {
|
||||
start = mid
|
||||
} else if _ip == ip {
|
||||
return byteToUInt32(buf[q.ItemLen:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 字节转UInt32
|
||||
func byteToUInt32(data []byte) uint32 {
|
||||
i := uint32(data[0]) & 0xff
|
||||
i |= (uint32(data[1]) << 8) & 0xff00
|
||||
i |= (uint32(data[2]) << 16) & 0xff0000
|
||||
return i
|
||||
}
|
||||
|
||||
// OnlineDownload 在线下载
|
||||
func (q *Pointer) OnlineDownload() (err error) {
|
||||
tmpData, err := getOnline()
|
||||
if err != nil {
|
||||
return errors.New("下载失败")
|
||||
}
|
||||
if err := ioutil.WriteFile("./qqwry.dat", tmpData, 0644); err == nil {
|
||||
log.Printf("已下载最新 纯真 IPv4数据库 %s ", "./qqwry.dat")
|
||||
} else {
|
||||
return errors.New("保存失败")
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,230 +0,0 @@
|
||||
package v6
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"go.dtapp.net/gostring"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
header []byte
|
||||
country []byte
|
||||
area []byte
|
||||
v6ip uint64
|
||||
offset uint32
|
||||
start uint32
|
||||
end uint32
|
||||
)
|
||||
|
||||
type Result struct {
|
||||
IP string `json:"ip,omitempty"` // 输入的ip地址
|
||||
Country string `json:"country,omitempty"` // 国家
|
||||
Province string `json:"province,omitempty"` // 省份
|
||||
City string `json:"city,omitempty"` // 城市
|
||||
Area string `json:"area,omitempty"` // 区域
|
||||
Isp string `json:"isp,omitempty"` // 运营商
|
||||
}
|
||||
|
||||
//go:embed ip.db
|
||||
var dat []byte
|
||||
|
||||
type Pointer struct {
|
||||
Offset uint32
|
||||
ItemLen uint32
|
||||
IndexLen uint32
|
||||
}
|
||||
|
||||
// InitIPV4Data 加载
|
||||
func (q *Pointer) InitIPV4Data() int64 {
|
||||
buf := dat[0:8]
|
||||
start := binary.LittleEndian.Uint32(buf[:4])
|
||||
end := binary.LittleEndian.Uint32(buf[4:])
|
||||
|
||||
return int64((end-start)/7 + 1)
|
||||
}
|
||||
|
||||
// ReadData 从文件中读取数据
|
||||
func (q *Pointer) readData(length uint32) (rs []byte) {
|
||||
end := q.Offset + length
|
||||
dataNum := uint32(len(dat))
|
||||
if q.Offset > dataNum {
|
||||
return nil
|
||||
}
|
||||
|
||||
if end > dataNum {
|
||||
end = dataNum
|
||||
}
|
||||
rs = dat[q.Offset:end]
|
||||
q.Offset = end
|
||||
return rs
|
||||
}
|
||||
|
||||
// Find ip地址查询对应归属地信息
|
||||
func (q *Pointer) Find(ipStr string) (res Result) {
|
||||
|
||||
res = Result{}
|
||||
res.IP = ipStr
|
||||
if net.ParseIP(ipStr).To16() == nil {
|
||||
return Result{}
|
||||
}
|
||||
|
||||
q.Offset = 0
|
||||
|
||||
tp := big.NewInt(0)
|
||||
op := big.NewInt(0)
|
||||
tp.SetBytes(net.ParseIP(ipStr).To16())
|
||||
op.SetString("18446744073709551616", 10)
|
||||
op.Div(tp, op)
|
||||
tp.SetString("FFFFFFFFFFFFFFFF", 16)
|
||||
op.And(op, tp)
|
||||
|
||||
v6ip = op.Uint64()
|
||||
offset = q.searchIndex(v6ip)
|
||||
q.Offset = offset
|
||||
|
||||
country, area = q.getAddr()
|
||||
|
||||
// 解析地区数据
|
||||
info := strings.Split(string(country), "\t")
|
||||
if len(info) > 0 {
|
||||
i := 1
|
||||
for {
|
||||
if i > len(info) {
|
||||
break
|
||||
}
|
||||
switch i {
|
||||
case 1:
|
||||
res.Country = info[i-1]
|
||||
res.Country = gostring.SpaceAndLineBreak(res.Country)
|
||||
case 2:
|
||||
res.Province = info[i-1]
|
||||
res.Province = gostring.SpaceAndLineBreak(res.Province)
|
||||
case 3:
|
||||
res.City = info[i-1]
|
||||
res.City = gostring.SpaceAndLineBreak(res.City)
|
||||
case 4:
|
||||
res.Area = info[i-1]
|
||||
res.Area = gostring.SpaceAndLineBreak(res.Area)
|
||||
}
|
||||
i++ // 自增
|
||||
}
|
||||
} else {
|
||||
res.Country = string(country)
|
||||
res.Country = gostring.SpaceAndLineBreak(res.Country)
|
||||
}
|
||||
// 运营商
|
||||
res.Isp = string(area)
|
||||
|
||||
// Delete ZX (防止不相关的信息产生干扰)
|
||||
if res.Isp == "ZX" || res.Isp == "" {
|
||||
res.Isp = ""
|
||||
} else {
|
||||
res.Isp = " " + res.Isp
|
||||
}
|
||||
|
||||
res.Isp = gostring.SpaceAndLineBreak(res.Isp)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (q *Pointer) getAddr() ([]byte, []byte) {
|
||||
mode := q.readData(1)[0]
|
||||
if mode == 0x01 {
|
||||
// [IP][0x01][国家和地区信息的绝对偏移地址]
|
||||
q.Offset = byteToUInt32(q.readData(3))
|
||||
return q.getAddr()
|
||||
}
|
||||
// [IP][0x02][信息的绝对偏移][...] or [IP][国家][...]
|
||||
_offset := q.Offset - 1
|
||||
c1 := q.readArea(_offset)
|
||||
if mode == 0x02 {
|
||||
q.Offset = 4 + _offset
|
||||
} else {
|
||||
q.Offset = _offset + uint32(1+len(c1))
|
||||
}
|
||||
c2 := q.readArea(q.Offset)
|
||||
return c1, c2
|
||||
}
|
||||
|
||||
func (q *Pointer) readArea(offset uint32) []byte {
|
||||
q.Offset = offset
|
||||
mode := q.readData(1)[0]
|
||||
if mode == 0x01 || mode == 0x02 {
|
||||
return q.readArea(byteToUInt32(q.readData(3)))
|
||||
}
|
||||
q.Offset = offset
|
||||
return q.readString()
|
||||
}
|
||||
|
||||
func (q *Pointer) readString() []byte {
|
||||
data := make([]byte, 0)
|
||||
for {
|
||||
buf := q.readData(1)
|
||||
if buf[0] == 0 {
|
||||
break
|
||||
}
|
||||
data = append(data, buf[0])
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
func (q *Pointer) searchIndex(ip uint64) uint32 {
|
||||
|
||||
q.ItemLen = 8
|
||||
q.IndexLen = 11
|
||||
|
||||
header = dat[8:24]
|
||||
start = binary.LittleEndian.Uint32(header[8:])
|
||||
counts := binary.LittleEndian.Uint32(header[:8])
|
||||
end = start + counts*q.IndexLen
|
||||
|
||||
buf := make([]byte, q.IndexLen)
|
||||
|
||||
for {
|
||||
mid := start + q.IndexLen*(((end-start)/q.IndexLen)>>1)
|
||||
buf = dat[mid : mid+q.IndexLen]
|
||||
_ip := binary.LittleEndian.Uint64(buf[:q.ItemLen])
|
||||
|
||||
if end-start == q.IndexLen {
|
||||
if ip >= binary.LittleEndian.Uint64(dat[end:end+q.ItemLen]) {
|
||||
buf = dat[end : end+q.IndexLen]
|
||||
}
|
||||
return byteToUInt32(buf[q.ItemLen:])
|
||||
}
|
||||
|
||||
if _ip > ip {
|
||||
end = mid
|
||||
} else if _ip < ip {
|
||||
start = mid
|
||||
} else if _ip == ip {
|
||||
return byteToUInt32(buf[q.ItemLen:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func byteToUInt32(data []byte) uint32 {
|
||||
i := uint32(data[0]) & 0xff
|
||||
i |= (uint32(data[1]) << 8) & 0xff00
|
||||
i |= (uint32(data[2]) << 16) & 0xff0000
|
||||
return i
|
||||
}
|
||||
|
||||
// OnlineDownload 在线下载
|
||||
func (q *Pointer) OnlineDownload() (err error) {
|
||||
tmpData, err := getOnline()
|
||||
if err != nil {
|
||||
return errors.New("下载失败")
|
||||
}
|
||||
if err := ioutil.WriteFile("./ipv6wry.db", tmpData, 0644); err == nil {
|
||||
log.Printf("已下载最新 ZX IPv6数据库 %s ", "./ipv6wry.db")
|
||||
} else {
|
||||
return errors.New("保存失败")
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,32 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin && go1.12 && !go1.13
|
||||
// +build darwin,go1.12,!go1.13
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const _SYS_GETDIRENTRIES64 = 344
|
||||
|
||||
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
|
||||
// To implement this using libSystem we'd need syscall_syscallPtr for
|
||||
// fdopendir. However, syscallPtr was only added in Go 1.13, so we fall
|
||||
// back to raw syscalls for this func on Go 1.12.
|
||||
var p unsafe.Pointer
|
||||
if len(buf) > 0 {
|
||||
p = unsafe.Pointer(&buf[0])
|
||||
} else {
|
||||
p = unsafe.Pointer(&_zero)
|
||||
}
|
||||
r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
|
||||
n = int(r0)
|
||||
if e1 != 0 {
|
||||
return n, errnoErr(e1)
|
||||
}
|
||||
return n, nil
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build darwin && go1.13
|
||||
// +build darwin,go1.13
|
||||
|
||||
package unix
|
||||
|
||||
import "unsafe"
|
||||
|
||||
//sys closedir(dir uintptr) (err error)
|
||||
//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
|
||||
|
||||
func fdopendir(fd int) (dir uintptr, err error) {
|
||||
r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
|
||||
dir = uintptr(r0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var libc_fdopendir_trampoline_addr uintptr
|
||||
|
||||
//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
|
||||
|
||||
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
|
||||
// Simulate Getdirentries using fdopendir/readdir_r/closedir.
|
||||
// We store the number of entries to skip in the seek
|
||||
// offset of fd. See issue #31368.
|
||||
// It's not the full required semantics, but should handle the case
|
||||
// of calling Getdirentries or ReadDirent repeatedly.
|
||||
// It won't handle assigning the results of lseek to *basep, or handle
|
||||
// the directory being edited underfoot.
|
||||
skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// We need to duplicate the incoming file descriptor
|
||||
// because the caller expects to retain control of it, but
|
||||
// fdopendir expects to take control of its argument.
|
||||
// Just Dup'ing the file descriptor is not enough, as the
|
||||
// result shares underlying state. Use Openat to make a really
|
||||
// new file descriptor referring to the same directory.
|
||||
fd2, err := Openat(fd, ".", O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d, err := fdopendir(fd2)
|
||||
if err != nil {
|
||||
Close(fd2)
|
||||
return 0, err
|
||||
}
|
||||
defer closedir(d)
|
||||
|
||||
var cnt int64
|
||||
for {
|
||||
var entry Dirent
|
||||
var entryp *Dirent
|
||||
e := readdir_r(d, &entry, &entryp)
|
||||
if e != 0 {
|
||||
return n, errnoErr(e)
|
||||
}
|
||||
if entryp == nil {
|
||||
break
|
||||
}
|
||||
if skip > 0 {
|
||||
skip--
|
||||
cnt++
|
||||
continue
|
||||
}
|
||||
|
||||
reclen := int(entry.Reclen)
|
||||
if reclen > len(buf) {
|
||||
// Not enough room. Return for now.
|
||||
// The counter will let us know where we should start up again.
|
||||
// Note: this strategy for suspending in the middle and
|
||||
// restarting is O(n^2) in the length of the directory. Oh well.
|
||||
break
|
||||
}
|
||||
|
||||
// Copy entry into return buffer.
|
||||
s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
|
||||
copy(buf, s)
|
||||
|
||||
buf = buf[reclen:]
|
||||
n += reclen
|
||||
cnt++
|
||||
}
|
||||
// Set the seek offset of the input fd to record
|
||||
// how many files we've already returned.
|
||||
_, err = Seek(fd, cnt, 0 /* SEEK_SET */)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
//go:build darwin && amd64 && go1.13
|
||||
// +build darwin,amd64,go1.13
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var _ syscall.Errno
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func closedir(dir uintptr) (err error) {
|
||||
_, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var libc_closedir_trampoline_addr uintptr
|
||||
|
||||
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
|
||||
r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
|
||||
res = Errno(r0)
|
||||
return
|
||||
}
|
||||
|
||||
var libc_readdir_r_trampoline_addr uintptr
|
||||
|
||||
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
|
@ -1,25 +0,0 @@
|
||||
// go run mkasm.go darwin amd64
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
//go:build go1.13
|
||||
// +build go1.13
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
|
||||
JMP libc_fdopendir(SB)
|
||||
|
||||
GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
|
||||
DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
|
||||
|
||||
TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
|
||||
JMP libc_closedir(SB)
|
||||
|
||||
GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
|
||||
DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
|
||||
|
||||
TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
|
||||
JMP libc_readdir_r(SB)
|
||||
|
||||
GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
|
||||
DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
|
@ -1,40 +0,0 @@
|
||||
// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
//go:build darwin && arm64 && go1.13
|
||||
// +build darwin,arm64,go1.13
|
||||
|
||||
package unix
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var _ syscall.Errno
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func closedir(dir uintptr) (err error) {
|
||||
_, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var libc_closedir_trampoline_addr uintptr
|
||||
|
||||
//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
|
||||
|
||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||
|
||||
func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
|
||||
r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
|
||||
res = Errno(r0)
|
||||
return
|
||||
}
|
||||
|
||||
var libc_readdir_r_trampoline_addr uintptr
|
||||
|
||||
//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
|
@ -1,25 +0,0 @@
|
||||
// go run mkasm.go darwin arm64
|
||||
// Code generated by the command above; DO NOT EDIT.
|
||||
|
||||
//go:build go1.13
|
||||
// +build go1.13
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
|
||||
JMP libc_fdopendir(SB)
|
||||
|
||||
GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
|
||||
DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
|
||||
|
||||
TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
|
||||
JMP libc_closedir(SB)
|
||||
|
||||
GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
|
||||
DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
|
||||
|
||||
TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
|
||||
JMP libc_readdir_r(SB)
|
||||
|
||||
GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
|
||||
DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
|
Loading…
Reference in new issue