master
李光春 2 years ago
parent d927801c06
commit b54c0bebdb

1
.gitignore vendored

@ -4,6 +4,7 @@
.idea
.vscode
*.log
git.sh
gitmod.sh
/test/
/service/*/*_test.go

@ -5,18 +5,17 @@ go 1.18
require (
github.com/go-redis/redis/v8 v8.11.5
github.com/mitchellh/mapstructure v1.5.0
github.com/mvdan/xurls v1.1.0
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/nilorg/sdk v0.0.0-20210429091026-95b6cdc95c84
github.com/shopspring/decimal v1.3.1
github.com/sirupsen/logrus v1.8.1
go.mongodb.org/mongo-driver v1.9.0
go.mongodb.org/mongo-driver v1.9.1
go.uber.org/zap v1.21.0
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e
golang.org/x/text v0.3.7
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gorm.io/driver/mysql v1.3.3
gorm.io/driver/postgres v1.3.5
gorm.io/driver/postgres v1.3.6
gorm.io/gorm v1.23.5
)
@ -27,16 +26,16 @@ require (
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgconn v1.12.0 // indirect
github.com/jackc/pgconn v1.12.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
github.com/jackc/pgtype v1.11.0 // indirect
github.com/jackc/pgx/v4 v4.16.0 // indirect
github.com/jackc/pgx/v4 v4.16.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.15.2 // indirect
github.com/klauspost/compress v1.15.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.1 // indirect
@ -44,8 +43,8 @@ require (
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
)

@ -45,8 +45,8 @@ github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsU
github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY=
github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI=
github.com/jackc/pgconn v1.12.0 h1:/RvQ24k3TnNdfBSW0ou9EOi5jx2cX7zfE8n2nLKuiP0=
github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono=
github.com/jackc/pgconn v1.12.1 h1:rsDFzIpRk7xT4B8FufgpCCeyjdNpKyghZeSefViE5W8=
github.com/jackc/pgconn v1.12.1/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono=
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
@ -77,8 +77,8 @@ github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs=
github.com/jackc/pgx/v4 v4.16.0 h1:4k1tROTJctHotannFYzu77dY3bgtMRymQP7tXQjqpPk=
github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI=
github.com/jackc/pgx/v4 v4.16.1 h1:JzTglcal01DrghUqt+PmzWsZx/Yh7SC/CTQmSBMTd0Y=
github.com/jackc/pgx/v4 v4.16.1/go.mod h1:SIhx0D5hoADaiXZVyv+3gSm3LCIIINTVO0PficsvWGQ=
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
@ -90,8 +90,8 @@ github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.5 h1:qyCLMz2JCrKADihKOh9FxnW3houKeNsp2h5OEz0QSEA=
github.com/klauspost/compress v1.15.5/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@ -111,8 +111,6 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mvdan/xurls v1.1.0 h1:OpuDelGQ1R1ueQ6sSryzi6P+1RtBpfQHM8fJwlE45ww=
github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU=
github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
github.com/nilorg/sdk v0.0.0-20210429091026-95b6cdc95c84 h1:Nxk1uViXfb9MHgtHBlQFWzlQCsJbDQuotfTsAFcFP3o=
@ -163,8 +161,8 @@ github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqTosly
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.mongodb.org/mongo-driver v1.9.0 h1:f3aLGJvQmBl8d9S40IL+jEyBC6hfLPbJjv9t5hEM9ck=
go.mongodb.org/mongo-driver v1.9.0/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.mongodb.org/mongo-driver v1.9.1 h1:m078y9v7sBItkt1aaoe2YlvWEXcD263e1a4E1fBrJ1c=
go.mongodb.org/mongo-driver v1.9.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
@ -198,8 +196,8 @@ golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
@ -213,8 +211,9 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -229,8 +228,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -279,8 +278,8 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/mysql v1.3.3 h1:jXG9ANrwBc4+bMvBcSl8zCfPBaVoPyBEBshA8dA93X8=
gorm.io/driver/mysql v1.3.3/go.mod h1:ChK6AHbHgDCFZyJp0F+BmVGb06PSIoh9uVYKAlRbb2U=
gorm.io/driver/postgres v1.3.5 h1:oVLmefGqBTlgeEVG6LKnH6krOlo4TZ3Q/jIK21KUMlw=
gorm.io/driver/postgres v1.3.5/go.mod h1:EGCWefLFQSVFrHGy4J8EtiHCWX5Q8t0yz2Jt9aKkGzU=
gorm.io/driver/postgres v1.3.6 h1:Q0iLoYvWwsJVpYQrSrY5p5P4YzW7fJjFMBG2sa4Bz5U=
gorm.io/driver/postgres v1.3.6/go.mod h1:f02ympjIcgtHEGFMZvdgTxODZ9snAHDb4hXfigBVuNI=
gorm.io/gorm v1.23.1/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.23.4/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.23.5 h1:TnlF26wScKSvknUC/Rn8t0NLLM22fypYBlvj1+aH6dM=

@ -1,3 +1,9 @@
# 1.12.1 (May 7, 2022)
* Fix: setting krbspn and krbsrvname in connection string (sireax)
* Add support for Unix sockets on Windows (Eno Compton)
* Stop ignoring ErrorResponse during SCRAM auth (Rafi Shamim)
# 1.12.0 (April 21, 2022)
* Add pluggable GSSAPI support (Oliver Tan)

@ -78,12 +78,14 @@ func (c *PgConn) rxSASLContinue() (*pgproto3.AuthenticationSASLContinue, error)
if err != nil {
return nil, err
}
saslContinue, ok := msg.(*pgproto3.AuthenticationSASLContinue)
if ok {
return saslContinue, nil
switch m := msg.(type) {
case *pgproto3.AuthenticationSASLContinue:
return m, nil
case *pgproto3.ErrorResponse:
return nil, ErrorResponseToPgError(m)
}
return nil, errors.New("expected AuthenticationSASLContinue message but received unexpected message")
return nil, fmt.Errorf("expected AuthenticationSASLContinue message but received unexpected message %T", msg)
}
func (c *PgConn) rxSASLFinal() (*pgproto3.AuthenticationSASLFinal, error) {
@ -91,12 +93,14 @@ func (c *PgConn) rxSASLFinal() (*pgproto3.AuthenticationSASLFinal, error) {
if err != nil {
return nil, err
}
saslFinal, ok := msg.(*pgproto3.AuthenticationSASLFinal)
if ok {
return saslFinal, nil
switch m := msg.(type) {
case *pgproto3.AuthenticationSASLFinal:
return m, nil
case *pgproto3.ErrorResponse:
return nil, ErrorResponseToPgError(m)
}
return nil, errors.New("expected AuthenticationSASLFinal message but received unexpected message")
return nil, fmt.Errorf("expected AuthenticationSASLFinal message but received unexpected message %T", msg)
}
type scramClient struct {

@ -41,7 +41,9 @@ type Config struct {
BuildFrontend BuildFrontendFunc
RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name)
Fallbacks []*FallbackConfig
KerberosSrvName string
KerberosSpn string
Fallbacks []*FallbackConfig
// ValidateConnect is called during a connection attempt after a successful authentication with the PostgreSQL server.
// It can be used to validate that the server is acceptable. If this returns an error the connection is closed and the next
@ -98,10 +100,29 @@ type FallbackConfig struct {
TLSConfig *tls.Config // nil disables TLS
}
// isAbsolutePath checks if the provided value is an absolute path either
// beginning with a forward slash (as on Linux-based systems) or with a capital
// letter A-Z followed by a colon and a backslash, e.g., "C:\", (as on Windows).
func isAbsolutePath(path string) bool {
isWindowsPath := func(p string) bool {
if len(p) < 3 {
return false
}
drive := p[0]
colon := p[1]
backslash := p[2]
if drive >= 'A' && drive <= 'Z' && colon == ':' && backslash == '\\' {
return true
}
return false
}
return strings.HasPrefix(path, "/") || isWindowsPath(path)
}
// NetworkAddress converts a PostgreSQL host and port into network and address suitable for use with
// net.Dial.
func NetworkAddress(host string, port uint16) (network, address string) {
if strings.HasPrefix(host, "/") {
if isAbsolutePath(host) {
network = "unix"
address = filepath.Join(host, ".s.PGSQL.") + strconv.FormatInt(int64(port), 10)
} else {
@ -265,6 +286,14 @@ func ParseConfig(connString string) (*Config, error) {
"servicefile": {},
}
// Adding kerberos configuration
if _, present := settings["krbsrvname"]; present {
config.KerberosSrvName = settings["krbsrvname"]
}
if _, present := settings["krbspn"]; present {
config.KerberosSpn = settings["krbspn"]
}
for k, v := range settings {
if _, present := notRuntimeParams[k]; present {
continue

@ -41,14 +41,14 @@ func (c *PgConn) gssAuth() error {
}
var nextData []byte
if spn, ok := c.config.RuntimeParams["krbspn"]; ok {
if c.config.KerberosSpn != "" {
// Use the supplied SPN if provided.
nextData, err = cli.GetInitTokenFromSPN(spn)
nextData, err = cli.GetInitTokenFromSPN(c.config.KerberosSpn)
} else {
// Allow the kerberos service name to be overridden
service := "postgres"
if val, ok := c.config.RuntimeParams["krbsrvname"]; ok {
service = val
if c.config.KerberosSrvName != "" {
service = c.config.KerberosSrvName
}
nextData, err = cli.GetInitToken(c.config.Host, service)
}

@ -187,7 +187,7 @@ func expandWithIPs(ctx context.Context, lookupFn LookupFunc, fallbacks []*Fallba
for _, fb := range fallbacks {
// skip resolve for unix sockets
if strings.HasPrefix(fb.Host, "/") {
if isAbsolutePath(fb.Host) {
configs = append(configs, &FallbackConfig{
Host: fb.Host,
Port: fb.Port,

@ -1,3 +1,8 @@
# 4.16.1 (May 7, 2022)
* Upgrade pgconn to v1.12.1
* Fix explicitly prepared statements with describe statement cache mode
# 4.16.0 (April 21, 2022)
* Upgrade pgconn to v1.12.0

@ -185,3 +185,7 @@ Library for scanning data from a database into Go structs and more.
### [https://github.com/otan/gopgkrb5](https://github.com/otan/gopgkrb5)
Adds GSSAPI / Kerberos authentication support.
### [https://github.com/vgarvardt/pgx-google-uuid](https://github.com/vgarvardt/pgx-google-uuid)
Adds support for [`github.com/google/uuid`](https://github.com/google/uuid).

@ -645,7 +645,7 @@ optionLoop:
resultFormats = c.eqb.resultFormats
}
if c.stmtcache != nil && c.stmtcache.Mode() == stmtcache.ModeDescribe {
if c.stmtcache != nil && c.stmtcache.Mode() == stmtcache.ModeDescribe && !ok {
rows.resultReader = c.pgConn.ExecParams(ctx, sql, c.eqb.paramValues, sd.ParamOIDs, c.eqb.paramFormats, resultFormats)
} else {
rows.resultReader = c.pgConn.ExecPrepared(ctx, sd.Name, c.eqb.paramValues, c.eqb.paramFormats, resultFormats)

@ -17,6 +17,22 @@ This package provides various compression algorithms.
# changelog
* May 11, 2022 (v1.15.4)
* huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577)
* inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581)
* zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583)
* zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580)
* May 5, 2022 (v1.15.3)
* zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572)
* s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575)
* Apr 26, 2022 (v1.15.2)
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
* Minimum version is Go 1.16, added CI test on 1.18.
* Mar 11, 2022 (v1.15.1)
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)

@ -1,5 +0,0 @@
package huff0
//go:generate go run generate.go
//go:generate asmfmt -w decompress_amd64.s
//go:generate asmfmt -w decompress_8b_amd64.s

@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
return uint16(b.value >> ((64 - n) & 63))
}
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
return uint16(b.value >> n)
}
func (b *bitReaderShifted) advance(n uint8) {
b.bitsRead += n
b.value <<= n & 63
@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() {
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReaderShifted) finished() bool {
return b.off == 0 && b.bitsRead >= 64
}
func (b *bitReaderShifted) remaining() uint {
return b.off*8 + uint(64-b.bitsRead)
}

@ -5,8 +5,6 @@
package huff0
import "fmt"
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
0xFFFF, 0xFFFF} /* up to 16 bits */
// addBits16NC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16NC(value uint16, bits uint8) {
b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63)
b.nBits += bits
}
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
// addBits16ZeroNC will add up to 16 bits.
// It will not check if there is space for them,
// so the caller must ensure that it has flushed recently.
// This is fastest if bits can be zero.
func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) {
if bits == 0 {
return
}
value <<= (16 - bits) & 15
value >>= (16 - bits) & 15
b.bitContainer |= uint64(value) << (b.nBits & 63)
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
return
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
b.bitContainer >>= 1 << 3
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
b.bitContainer >>= 2 << 3
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
b.bitContainer >>= 3 << 3
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
b.bitContainer >>= 4 << 3
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
b.bitContainer >>= 5 << 3
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
b.bitContainer >>= 6 << 3
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
b.bitContainer >>= 7 << 3
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
b.bitContainer = 0
b.nBits = 0
return
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
@ -201,10 +93,3 @@ func (b *bitWriter) close() error {
b.flushAlign()
return nil
}
// reset and continue writing by appending to out.
func (b *bitWriter) reset(out []byte) {
b.bitContainer = 0
b.nBits = 0
b.out = out
}

@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) {
b.off = 0
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)
}
// Int32 returns a little endian int32 starting at current offset.
func (b byteReader) Int32() int32 {
v3 := int32(b.b[b.off+3])
@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 {
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
}
// unread returns the unread portion of the input.
func (b byteReader) unread() []byte {
return b.b[b.off:]
}
// remain will return the number of bytes remaining.
func (b byteReader) remain() int {
return len(b.b) - b.off

@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool {
return true
}
//lint:ignore U1000 used for debugging
func (s *Scratch) validateTable(c cTable) bool {
if len(c) < int(s.symbolLen) {
return false

@ -11,7 +11,6 @@ import (
type dTable struct {
single []dEntrySingle
double []dEntryDouble
}
// single-symbols decoding
@ -19,13 +18,6 @@ type dEntrySingle struct {
entry uint16
}
// double-symbols decoding
type dEntryDouble struct {
seq [4]byte
nBits uint8
len uint8
}
// Uses special code for all tables that are < 8 bits.
const use8BitTables = true
@ -35,7 +27,7 @@ const use8BitTables = true
// If no Scratch is provided a new one is allocated.
// The returned Scratch can be used for encoding or decoding input using this table.
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
s, err = s.prepare(in)
s, err = s.prepare(nil)
if err != nil {
return s, nil, err
}
@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte {
return &[4][256]byte{}
}
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
const shift = 56
const tlSize = 1 << 8
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.

@ -1,488 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
// const stream = 0
// br0.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
MOVQ bitReaderShifted_value(br0), br_value
MOVQ bitReaderShifted_off(br0), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill0
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br0), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br0.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 0(buffer)(off*1)
// SECOND PART:
// val2 := br0.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br0.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br0.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br0.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 0+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
MOVQ br_value, bitReaderShifted_value(br0)
MOVQ br_offset, bitReaderShifted_off(br0)
// const stream = 1
// br1.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
MOVQ bitReaderShifted_value(br1), br_value
MOVQ bitReaderShifted_off(br1), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill1
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br1), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br1.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 256(buffer)(off*1)
// SECOND PART:
// val2 := br1.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br1.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br1.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br1.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 256+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
MOVQ br_value, bitReaderShifted_value(br1)
MOVQ br_offset, bitReaderShifted_off(br1)
// const stream = 2
// br2.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
MOVQ bitReaderShifted_value(br2), br_value
MOVQ bitReaderShifted_off(br2), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill2
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br2), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br2.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 512(buffer)(off*1)
// SECOND PART:
// val2 := br2.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br2.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br2.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br2.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 512+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
MOVQ br_value, bitReaderShifted_value(br2)
MOVQ br_offset, bitReaderShifted_off(br2)
// const stream = 3
// br3.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
MOVQ bitReaderShifted_value(br3), br_value
MOVQ bitReaderShifted_off(br3), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill3
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br3), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br3.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, 768(buffer)(off*1)
// SECOND PART:
// val2 := br3.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br3.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br3.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br3.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, 768+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
MOVQ br_value, bitReaderShifted_value(br3)
MOVQ br_offset, bitReaderShifted_off(br3)
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

@ -1,197 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// if b.bitsRead >= 32 {
CMPQ br_bits_read, $32
JB skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
// b.value |= uint64(low) << (b.bitsRead & 63)
MOVQ br_bits_read, CX
SHLQ CL, AX
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// SECOND PART:
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v2 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
// v3 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
MOVBQZX AL, CX
SHLQ CX, br_value // value <<= n
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off+2] = uint8(v2.entry >> 8)
// buf[stream][off+3] = uint8(v3.entry >> 8)
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $4, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

@ -2,30 +2,43 @@
// +build amd64,!appengine,!noasm,gc
// This file contains the specialisation of Decoder.Decompress4X
// that uses an asm implementation of its main loop.
// and Decoder.Decompress1X that use an asm implementation of thir main loops.
package huff0
import (
"errors"
"fmt"
"github.com/klauspost/compress/internal/cpuinfo"
)
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
//go:noescape
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
//go:noescape
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
// fallback8BitSize is the size where using Go version is faster.
const fallback8BitSize = 800
type decompress4xContext struct {
pbr0 *bitReaderShifted
pbr1 *bitReaderShifted
pbr2 *bitReaderShifted
pbr3 *bitReaderShifted
peekBits uint8
out *byte
dstEvery int
tbl *dEntrySingle
decoded int
limit *byte
}
// Decompress4X will decompress a 4X encoded stream.
// The length of the supplied input must match the end of a block exactly.
// The *capacity* of the dst slice must match the destination size of
@ -42,6 +55,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
if cap(dst) < fallback8BitSize && use8BitTables {
return d.decompress4X8bit(dst, src)
}
var br [4]bitReaderShifted
// Decode "jump table"
start := 6
@ -71,70 +85,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
const tlMask = tlSize - 1
single := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
buf := d.buffer()
var off uint8
var decoded int
const debug = false
// see: bitReaderShifted.peekBitsFast()
peekBits := uint8((64 - d.actualTableLog) & 63)
// Decode 2 values from each decoder/loop.
const bufoff = 256
for {
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
break
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
ctx := decompress4xContext{
pbr0: &br[0],
pbr1: &br[1],
pbr2: &br[2],
pbr3: &br[3],
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
out: &out[0],
dstEvery: dstEvery,
tbl: &single[0],
limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last.
}
if use8BitTables {
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
decompress4x_8b_main_loop_amd64(&ctx)
} else {
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
}
if debug {
fmt.Print("DEBUG: ")
fmt.Printf("off=%d,", off)
for i := 0; i < 4; i++ {
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
i, br[i].bitsRead, br[i].value, br[i].off)
}
fmt.Println("")
}
if off != 0 {
break
decompress4x_main_loop_amd64(&ctx)
}
if bufoff > dstEvery {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
copy(out, buf[0][:])
copy(out[dstEvery:], buf[1][:])
copy(out[dstEvery*2:], buf[2][:])
copy(out[dstEvery*3:], buf[3][:])
out = out[bufoff:]
decoded += bufoff * 4
// There must at least be 3 buffers left.
if len(out) < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
}
if off > 0 {
ioff := int(off)
if len(out) < dstEvery*3+ioff {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 3")
}
copy(out, buf[0][:off])
copy(out[dstEvery:], buf[1][:off])
copy(out[dstEvery*2:], buf[2][:off])
copy(out[dstEvery*3:], buf[3][:off])
decoded += int(off) * 4
out = out[off:]
decoded = ctx.decoded
out = out[decoded/4:]
}
// Decode remaining.
@ -150,7 +122,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
for bitsLeft > 0 {
br.fill()
if offset >= endsAt {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 4")
}
@ -164,7 +135,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
offset++
}
if offset != endsAt {
d.bufs.Put(buf)
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
}
decoded += offset - dstEvery*i
@ -173,9 +143,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
return nil, err
}
}
d.bufs.Put(buf)
if dstSize != decoded {
return nil, errors.New("corruption detected: short output block")
}
return dst, nil
}
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
type decompress1xContext struct {
pbr *bitReaderShifted
peekBits uint8
out *byte
outCap int
tbl *dEntrySingle
decoded int
}
// Error reported by asm implementations
const error_max_decoded_size_exeeded = -1
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:maxDecodedSize]
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
if maxDecodedSize >= 4 {
ctx := decompress1xContext{
pbr: &br,
out: &dst[0],
outCap: maxDecodedSize,
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
tbl: &d.dt.single[0],
}
if cpuinfo.HasBMI2() {
decompress1x_main_loop_bmi2(&ctx)
} else {
decompress1x_main_loop_amd64(&ctx)
}
if ctx.decoded == error_max_decoded_size_exeeded {
return nil, ErrMaxDecodedSizeExceeded
}
dst = dst[:ctx.decoded]
}
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if len(dst) >= maxDecodedSize {
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
return dst, br.close()
}

File diff suppressed because it is too large Load Diff

@ -1,195 +0,0 @@
// +build !appengine
// +build gc
// +build !noasm
#include "textflag.h"
#include "funcdata.h"
#include "go_asm.h"
#ifdef GOAMD64_v4
#ifndef GOAMD64_v3
#define GOAMD64_v3
#endif
#endif
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
#define off R8
#define buffer DI
#define table SI
#define br_bits_read R9
#define br_value R10
#define br_offset R11
#define peek_bits R12
#define exhausted DX
#define br0 R13
#define br1 R14
#define br2 R15
#define br3 BP
MOVQ BP, 0(SP)
XORQ exhausted, exhausted // exhausted = false
XORQ off, off // off = 0
MOVBQZX peekBits+32(FP), peek_bits
MOVQ buf+40(FP), buffer
MOVQ tbl+48(FP), table
MOVQ pbr0+0(FP), br0
MOVQ pbr1+8(FP), br1
MOVQ pbr2+16(FP), br2
MOVQ pbr3+24(FP), br3
main_loop:
{{ define "decode_2_values_x86" }}
// const stream = {{ var "id" }}
// br{{ var "id"}}.fillFast()
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
// We must have at least 2 * max tablelog left
CMPQ br_bits_read, $64-22
JBE skip_fill{{ var "id" }}
SUBQ $32, br_bits_read // b.bitsRead -= 32
SUBQ $4, br_offset // b.off -= 4
// v := b.in[b.off-4 : b.off]
// v = v[:4]
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
// b.value |= uint64(low) << (b.bitsRead & 63)
#ifdef GOAMD64_v3
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
#else
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
MOVQ br_bits_read, CX
SHLQ CL, AX
#endif
ORQ AX, br_value
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
CMPQ br_offset, $4
SETLT DL
ORB DL, DH
// }
skip_fill{{ var "id" }}:
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
MOVQ br_value, AX
MOVQ peek_bits, CX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v0 := table[val0&mask]
MOVW 0(table)(AX*2), AX // AX - v0
// br{{ var "id"}}.advance(uint8(v0.entry))
MOVB AH, BL // BL = uint8(v0.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
#ifdef GOAMD64_v3
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
#else
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
MOVQ peek_bits, CX
MOVQ br_value, AX
SHRQ CL, AX // AX = (value >> peek_bits) & mask
#endif
// v1 := table[val1&mask]
MOVW 0(table)(AX*2), AX // AX - v1
// br{{ var "id"}}.advance(uint8(v1.entry))
MOVB AH, BH // BH = uint8(v1.entry >> 8)
#ifdef GOAMD64_v3
MOVBQZX AL, CX
SHLXQ AX, br_value, br_value // value <<= n
#else
MOVBQZX AL, CX
SHLQ CL, br_value // value <<= n
#endif
ADDQ CX, br_bits_read // bits_read += n
// these two writes get coalesced
// buf[stream][off] = uint8(v0.entry >> 8)
// buf[stream][off+1] = uint8(v1.entry >> 8)
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
// update the bitrader reader structure
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
{{ end }}
{{ set "id" "0" }}
{{ set "ofs" "0" }}
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
{{ template "decode_2_values_x86" . }}
{{ set "id" "1" }}
{{ set "ofs" "8" }}
{{ set "bufofs" "256" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "2" }}
{{ set "ofs" "16" }}
{{ set "bufofs" "512" }}
{{ template "decode_2_values_x86" . }}
{{ set "id" "3" }}
{{ set "ofs" "24" }}
{{ set "bufofs" "768" }}
{{ template "decode_2_values_x86" . }}
ADDQ $2, off // off += 2
TESTB DH, DH // any br[i].ofs < 4?
JNZ end
CMPQ off, $bufoff
JL main_loop
end:
MOVQ 0(SP), BP
MOVB off, ret+56(FP)
RET
#undef off
#undef buffer
#undef table
#undef br_bits_read
#undef br_value
#undef br_offset
#undef peek_bits
#undef exhausted
#undef br0
#undef br1
#undef br2
#undef br3

@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
}
return dst, nil
}
// Decompress1X will decompress a 1X encoded stream.
// The cap of the output buffer will be the maximum decompressed size.
// The length of the supplied input must match the end of a block exactly.
func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
if len(d.dt.single) == 0 {
return nil, errors.New("no table loaded")
}
if use8BitTables && d.actualTableLog <= 8 {
return d.decompress1X8Bit(dst, src)
}
var br bitReaderShifted
err := br.init(src)
if err != nil {
return dst, err
}
maxDecodedSize := cap(dst)
dst = dst[:0]
// Avoid bounds check by always having full sized table.
const tlSize = 1 << tableLogMax
const tlMask = tlSize - 1
dt := d.dt.single[:tlSize]
// Use temp table to avoid bound checks/append penalty.
bufs := d.buffer()
buf := &bufs[0]
var off uint8
for br.off >= 8 {
br.fillFast()
v := dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+0] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+1] = uint8(v.entry >> 8)
// Refill
br.fillFast()
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+2] = uint8(v.entry >> 8)
v = dt[br.peekBitsFast(d.actualTableLog)&tlMask]
br.advance(uint8(v.entry))
buf[off+3] = uint8(v.entry >> 8)
off += 4
if off == 0 {
if len(dst)+256 > maxDecodedSize {
br.close()
d.bufs.Put(bufs)
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:]...)
}
}
if len(dst)+int(off) > maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
dst = append(dst, buf[:off]...)
// br < 8, so uint8 is fine
bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead
for bitsLeft > 0 {
br.fill()
if false && br.bitsRead >= 32 {
if br.off >= 4 {
v := br.in[br.off-4:]
v = v[:4]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
br.value = (br.value << 32) | uint64(low)
br.bitsRead -= 32
br.off -= 4
} else {
for br.off > 0 {
br.value = (br.value << 8) | uint64(br.in[br.off-1])
br.bitsRead -= 8
br.off--
}
}
}
if len(dst) >= maxDecodedSize {
d.bufs.Put(bufs)
br.close()
return nil, ErrMaxDecodedSizeExceeded
}
v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask]
nBits := uint8(v.entry)
br.advance(nBits)
bitsLeft -= nBits
dst = append(dst, uint8(v.entry>>8))
}
d.bufs.Put(bufs)
return dst, br.close()
}

@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 {
return v
}
func (b *bitReader) get16BitsFast(n uint8) uint16 {
const regMask = 64 - 1
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
b.bitsRead += n
return v
}
// fillFast() will make sure at least 32 bits are available.
// There must be at least 4 bytes available.
func (b *bitReader) fillFast() {

@ -5,8 +5,6 @@
package zstd
import "fmt"
// bitWriter will write bits.
// First bit will be LSB of the first byte of output.
type bitWriter struct {
@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
b.nBits += bits
}
// flush will flush all pending full bytes.
// There will be at least 56 bits available for writing when this has been called.
// Using flush32 is faster, but leaves less space for writing.
func (b *bitWriter) flush() {
v := b.nBits >> 3
switch v {
case 0:
case 1:
b.out = append(b.out,
byte(b.bitContainer),
)
case 2:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
)
case 3:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
)
case 4:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
)
case 5:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
)
case 6:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
)
case 7:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
)
case 8:
b.out = append(b.out,
byte(b.bitContainer),
byte(b.bitContainer>>8),
byte(b.bitContainer>>16),
byte(b.bitContainer>>24),
byte(b.bitContainer>>32),
byte(b.bitContainer>>40),
byte(b.bitContainer>>48),
byte(b.bitContainer>>56),
)
default:
panic(fmt.Errorf("bits (%d) > 64", b.nBits))
}
b.bitContainer >>= v << 3
b.nBits &= 7
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {

@ -49,11 +49,8 @@ const (
// Maximum possible block size (all Raw+Uncompressed).
maxBlockSize = (1 << 21) - 1
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header
maxCompressedLiteralSize = 1 << 18
maxRLELiteralSize = 1 << 20
maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff
maxMatchLen = 131074
maxSequences = 0x7f00 + 0xffff
// We support slightly less than the reference decoder to be able to
// use ints on 32 bit archs.
@ -105,7 +102,6 @@ type blockDec struct {
// Block is RLE, this is the size.
RLESize uint32
tmp [4]byte
Type blockType
@ -368,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
}
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, litRegenSize)
b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc)
} else {
if litRegenSize > maxCompressedLiteralSize {
// Exceptional
b.literalBuf = make([]byte, litRegenSize)
} else {
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
}
b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
literals = b.literalBuf[:litRegenSize]
@ -405,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
var err error
// Use our out buffer.
huff.MaxDecodedSize = maxCompressedBlockSize
huff.MaxDecodedSize = litRegenSize
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
} else {
@ -437,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
// Ensure we have space to store it.
if cap(b.literalBuf) < litRegenSize {
if b.lowMem {
b.literalBuf = make([]byte, 0, litRegenSize)
b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc)
} else {
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc)
}
}
huff := hist.huffTree
@ -456,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
return in, err
}
hist.huffTree = huff
huff.MaxDecodedSize = maxCompressedBlockSize
huff.MaxDecodedSize = litRegenSize
// Use our out buffer.
if fourStreams {
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
@ -471,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
if len(literals) != litRegenSize {
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
}
// Re-cap to get extra size.
literals = b.literalBuf[:len(literals)]
if debugDecoder {
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
}

@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
return r, nil
}
func (b *byteBuf) remain() []byte {
return *b
}
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {

@ -13,12 +13,6 @@ type byteReader struct {
off int
}
// init will initialize the reader and set the input.
func (b *byteReader) init(in []byte) {
b.b = in
b.off = 0
}
// advance the stream b n bytes.
func (b *byteReader) advance(n uint) {
b.off += int(n)

@ -439,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
if len(next.b) > 0 {
if !d.o.ignoreChecksum && len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
@ -451,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
got := d.current.crc.Sum64()
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
}
@ -535,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) {
// Update/Check CRC
if d.frame.HasCheckSum {
d.frame.crc.Write(d.current.b)
if !d.o.ignoreChecksum {
d.frame.crc.Write(d.current.b)
}
if d.current.d.Last {
d.current.err = d.frame.checkCRC()
if !d.o.ignoreChecksum {
d.current.err = d.frame.checkCRC()
} else {
d.current.err = d.frame.consumeCRC()
}
if d.current.err != nil {
println("CRC error:", d.current.err)
return false
@ -631,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error {
// Create Decoder:
// ASYNC:
// Spawn 4 go routines.
// 0: Read frames and decode blocks.
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
// 3: Wait for stream history, execute sequences, send stream history.
// Spawn 3 go routines.
// 0: Read frames and decode block literals.
// 1: Decode sequences.
// 2: Execute sequences, send to output.
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
defer d.streamWg.Done()
br := readerWrapper{r: r}
var seqPrepare = make(chan *blockDec, d.o.concurrent)
var seqDecode = make(chan *blockDec, d.o.concurrent)
var seqExecute = make(chan *blockDec, d.o.concurrent)
// Async 1: Prepare blocks...
go func() {
var hist history
var hasErr bool
for block := range seqPrepare {
if hasErr {
if block != nil {
seqDecode <- block
}
continue
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 1: new history")
}
hist.reset()
if block.async.newHist.dict != nil {
hist.setDict(block.async.newHist.dict)
}
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
continue
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
close(seqDecode)
}()
// Async 2: Decode sequences...
// Async 1: Decode sequences...
go func() {
var hist history
var hasErr bool
@ -698,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
}
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
@ -752,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
}
if block.async.newHist != nil {
if debugDecoder {
println("Async 3: new history")
println("Async 2: new history")
}
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
@ -839,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
decodeStream:
for {
var hist history
var hasErr bool
decodeBlock := func(block *blockDec) {
if hasErr {
if block != nil {
seqDecode <- block
}
return
}
if block.err != nil || block.Type != blockTypeCompressed {
hasErr = block.err != nil
seqDecode <- block
return
}
remain, err := block.decodeLiterals(block.data, &hist)
block.err = err
hasErr = block.err != nil
if err == nil {
block.async.literals = hist.decoders.literals
block.async.seqData = remain
} else if debugDecoder {
println("decodeLiterals error:", err)
}
seqDecode <- block
}
frame := d.frame
if debugDecoder {
println("New frame...")
@ -865,7 +856,7 @@ decodeStream:
case <-ctx.Done():
case dec := <-d.decoders:
dec.sendErr(err)
seqPrepare <- dec
decodeBlock(dec)
}
break decodeStream
}
@ -885,6 +876,10 @@ decodeStream:
if debugDecoder {
println("Alloc History:", h.allocFrameBuffer)
}
hist.reset()
if h.dict != nil {
hist.setDict(h.dict)
}
dec.async.newHist = &h
dec.async.fcs = frame.FrameContentSize
historySent = true
@ -911,7 +906,7 @@ decodeStream:
}
err = dec.err
last := dec.Last
seqPrepare <- dec
decodeBlock(dec)
if err != nil {
break decodeStream
}
@ -920,7 +915,7 @@ decodeStream:
}
}
}
close(seqPrepare)
close(seqDecode)
wg.Wait()
d.frame.history.b = frameHistCache
}

@ -19,6 +19,7 @@ type decoderOptions struct {
maxDecodedSize uint64
maxWindowSize uint64
dicts []dict
ignoreChecksum bool
}
func (o *decoderOptions) setDefault() {
@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil
}
}
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}

@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error {
return ErrWindowSizeTooSmall
}
d.history.windowSize = int(d.WindowSize)
if d.o.lowMem && d.history.windowSize < maxBlockSize {
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
// Alloc 2x window size if not low-mem, or very small window size.
d.history.allocFrameBuffer = d.history.windowSize * 2
// TODO: Maybe use FrameContent size
} else {
// Alloc with one additional block
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
}
@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4)
@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error {
return err
}
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
if d.o.ignoreChecksum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
}
@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error {
return nil
}
// consumeCRC reads the checksum data if the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
}
return nil
}
// runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
@ -373,13 +392,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
}
}

@ -229,18 +229,10 @@ func (d decSymbol) newState() uint16 {
return uint16(d >> 16)
}
func (d decSymbol) baseline() uint32 {
return uint32(d >> 32)
}
func (d decSymbol) baselineInt() int {
return int(d >> 32)
}
func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) {
*d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32)
}
func (d *decSymbol) setNBits(nBits uint8) {
const mask = 0xffffffffffffff00
*d = (*d & mask) | decSymbol(nBits)
@ -256,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) {
*d = (*d & mask) | decSymbol(state)<<16
}
func (d *decSymbol) setBaseline(baseline uint32) {
const mask = 0xffffffff
*d = (*d & mask) | decSymbol(baseline)<<32
}
func (d *decSymbol) setExt(addBits uint8, baseline uint32) {
const mask = 0xffff00ff
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32)
@ -377,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) {
s.state = dt[br.getBits(tableLog)]
}
// next returns the current symbol and sets the next state.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) next(br *bitReader) {
lowBits := uint16(br.getBits(s.state.nbBits()))
s.state = s.dt[s.state.newState()+lowBits]
}
// finished returns true if all bits have been read from the bitstream
// and the next state would require reading bits from the input.
func (s *fseState) finished(br *bitReader) bool {
return br.finished() && s.state.nbBits() > 0
}
// final returns the current state symbol without decoding the next.
func (s *fseState) final() (int, uint8) {
return s.state.baselineInt(), s.state.addBits()
}
// final returns the current state symbol without decoding the next.
func (s decSymbol) final() (int, uint8) {
return s.baselineInt(), s.addBits()
}
// nextFast returns the next symbol and sets the next state.
// This can only be used if no symbols are 0 bits.
// At least tablelog bits must be available in the bit reader.
func (s *fseState) nextFast(br *bitReader) (uint32, uint8) {
lowBits := br.get16BitsFast(s.state.nbBits())
s.state = s.dt[s.state.newState()+lowBits]
return s.state.baseline(), s.state.addBits()
}

@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) {
s.clearCount = maxCount != 0
}
// prepare will prepare and allocate scratch tables used for both compression and decompression.
func (s *fseEncoder) prepare() (*fseEncoder, error) {
if s == nil {
s = &fseEncoder{}
}
s.useRLE = false
if s.clearCount && s.maxCount == 0 {
for i := range s.count {
s.count[i] = 0
}
s.clearCount = false
}
return s, nil
}
// allocCtable will allocate tables needed for compression.
// If existing tables a re big enough, they are simply re-used.
func (s *fseEncoder) allocCtable() {
@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) {
c.state = c.stateTable[lu]
}
// encode the output symbol provided and write it to the bitstream.
func (c *cState) encode(symbolTT symbolTransform) {
nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16
dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState)
c.bw.addBits16NC(c.state, uint8(nbBitsOut))
c.state = c.stateTable[dstState]
}
// flush will write the tablelog to the output and flush the remaining full bytes.
func (c *cState) flush(tableLog uint8) {
c.bw.flush32()

@ -1,11 +0,0 @@
//go:build ignorecrc
// +build ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = true

@ -1,11 +0,0 @@
//go:build !ignorecrc
// +build !ignorecrc
// Copyright 2019+ Klaus Post. All rights reserved.
// License information can be found in the LICENSE file.
// Based on work by Yann Collet, released under BSD License.
package zstd
// ignoreCRC can be used for fuzz testing to ignore CRC values...
const ignoreCRC = false

@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 {
return (uint32(u) * prime4bytes) >> (32 - length)
}
}
// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits.
// Preferably h should be a constant and should always be <32.
func hash3(u uint32, h uint8) uint32 {
return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31)
}

@ -188,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
}
}
}
// Add final literals
copy(out[t:], s.literals)
if debugDecoder {
@ -203,12 +204,11 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
// decode sequences from the stream with the provided history.
func (s *sequenceDecs) decodeSync(hist []byte) error {
if true {
supported, err := s.decodeSyncSimple(hist)
if supported {
return err
}
supported, err := s.decodeSyncSimple(hist)
if supported {
return err
}
br := s.br
seqs := s.nSeqs
startSize := len(s.out)
@ -396,6 +396,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
ofState = ofTable[ofState.newState()&maxTableMask]
} else {
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
@ -418,16 +419,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return br.close()
}
// update states, at least 27 bits must be available.
func (s *sequenceDecs) update(br *bitReader) {
// Max 8 bits
s.litLengths.state.next(br)
// Max 9 bits
s.matchLengths.state.next(br)
// Max 8 bits
s.offsets.state.next(br)
}
var bitMask [16]uint16
func init() {
@ -436,87 +427,6 @@ func init() {
}
}
// update states, at least 27 bits must be available.
func (s *sequenceDecs) updateAlt(br *bitReader) {
// Update all 3 states at once. Approx 20% faster.
a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
nBits := a.nbBits() + b.nbBits() + c.nbBits()
if nBits == 0 {
s.litLengths.state.state = s.litLengths.state.dt[a.newState()]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()]
s.offsets.state.state = s.offsets.state.dt[c.newState()]
return
}
bits := br.get32BitsFast(nBits)
lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31))
s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits]
lowBits = uint16(bits >> (c.nbBits() & 31))
lowBits &= bitMask[b.nbBits()&15]
s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits]
lowBits = uint16(bits) & bitMask[c.nbBits()&15]
s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits]
}
// nextFast will return new states when there are at least 4 unused bytes left on the stream when done.
func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()
ml, mlB := mlState.final()
mo, moB := ofState.final()
// extra bits are stored in reverse order.
br.fillFast()
mo += br.getBits(moB)
if s.maxBits > 32 {
br.fillFast()
}
ml += br.getBits(mlB)
ll += br.getBits(llB)
if moB > 1 {
s.prevOffset[2] = s.prevOffset[1]
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = mo
return
}
// mo = s.adjustOffset(mo, ll, moB)
// Inlined for rather big speedup
if ll == 0 {
// There is an exception though, when current sequence's literals_length = 0.
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
mo++
}
if mo == 0 {
mo = s.prevOffset[0]
return
}
var temp int
if mo == 3 {
temp = s.prevOffset[0] - 1
} else {
temp = s.prevOffset[mo]
}
if temp == 0 {
// 0 is not valid; input is corrupted; force offset to 1
println("temp was 0")
temp = 1
}
if mo != 1 {
s.prevOffset[2] = s.prevOffset[1]
}
s.prevOffset[1] = s.prevOffset[0]
s.prevOffset[0] = temp
mo = temp
return
}
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) {
// Final will not read from stream.
ll, llB := llState.final()

@ -62,6 +62,10 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
useSafe = true
}
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
useSafe = true
}
br := s.br
maxBlockSize := maxCompressedBlockSize
@ -301,6 +305,10 @@ type executeAsmContext struct {
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// executeSimple handles cases when dictionary is not used.
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
// Ensure we have enough output size...
@ -327,8 +335,12 @@ func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
literals: s.literals,
windowSize: s.windowSize,
}
ok := sequenceDecs_executeSimple_amd64(&ctx)
var ok bool
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
ok = sequenceDecs_executeSimple_safe_amd64(&ctx)
} else {
ok = sequenceDecs_executeSimple_amd64(&ctx)
}
if !ok {
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))

@ -705,60 +705,55 @@ sequenceDecs_decode_bmi2_fill_2_end:
MOVQ CX, (R9)
// Fill bitreader for state updates
MOVQ R13, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R13
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decode_bmi2_skip_update
// Update Literal Length State
MOVBQZX SI, R14
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
MOVQ R13, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R13
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decode_bmi2_skip_update
LEAQ (SI)(DI*1), R14
ADDQ R8, R14
MOVBQZX R14, R14
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, SI
// Load ctx.llTable
// Update Offset State
BZHIQ R8, R15, CX
SHRXQ R8, R15, R15
MOVQ $0x00001010, R14
BEXTRQ R14, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
// Update Match Length State
MOVBQZX DI, R14
MOVQ $0x00001010, CX
BEXTRQ CX, DI, DI
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, DI
BZHIQ DI, R15, CX
SHRXQ DI, R15, R15
MOVQ $0x00001010, R14
BEXTRQ R14, DI, DI
ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
// Update Offset State
MOVBQZX R8, R14
MOVQ $0x00001010, CX
BEXTRQ CX, R8, R8
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, R8
// Update Literal Length State
BZHIQ SI, R15, CX
MOVQ $0x00001010, R14
BEXTRQ R14, SI, SI
ADDQ CX, SI
// Load ctx.ofTable
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
sequenceDecs_decode_bmi2_skip_update:
// Adjust offset
@ -965,60 +960,55 @@ sequenceDecs_decode_56_bmi2_fill_end:
MOVQ CX, (R9)
// Fill bitreader for state updates
MOVQ R13, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R13
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decode_56_bmi2_skip_update
// Update Literal Length State
MOVBQZX SI, R14
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
MOVQ R13, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R13
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decode_56_bmi2_skip_update
LEAQ (SI)(DI*1), R14
ADDQ R8, R14
MOVBQZX R14, R14
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, SI
// Load ctx.llTable
// Update Offset State
BZHIQ R8, R15, CX
SHRXQ R8, R15, R15
MOVQ $0x00001010, R14
BEXTRQ R14, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
// Update Match Length State
MOVBQZX DI, R14
MOVQ $0x00001010, CX
BEXTRQ CX, DI, DI
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, DI
BZHIQ DI, R15, CX
SHRXQ DI, R15, R15
MOVQ $0x00001010, R14
BEXTRQ R14, DI, DI
ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
// Update Offset State
MOVBQZX R8, R14
MOVQ $0x00001010, CX
BEXTRQ CX, R8, R8
LEAQ (DX)(R14*1), CX
MOVQ AX, R15
MOVQ CX, DX
ROLQ CL, R15
BZHIQ R14, R15, R15
ADDQ R15, R8
// Update Literal Length State
BZHIQ SI, R15, CX
MOVQ $0x00001010, R14
BEXTRQ R14, SI, SI
ADDQ CX, SI
// Load ctx.ofTable
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
sequenceDecs_decode_56_bmi2_skip_update:
// Adjust offset
@ -1162,6 +1152,228 @@ TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
// outBase += outPosition
ADDQ DI, BX
main_loop:
MOVQ (AX), R11
MOVQ 16(AX), R12
MOVQ 8(AX), R13
// Copy literals
TESTQ R11, R11
JZ check_offset
XORQ R14, R14
copy_1:
MOVUPS (SI)(R14*1), X0
MOVUPS X0, (BX)(R14*1)
ADDQ $0x10, R14
CMPQ R14, R11
JB copy_1
ADDQ R11, SI
ADDQ R11, BX
ADDQ R11, DI
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
check_offset:
LEAQ (DI)(R10*1), R11
CMPQ R12, R11
JG error_match_off_too_big
CMPQ R12, R8
JG error_match_off_too_big
// Copy match from history
MOVQ R12, R11
SUBQ DI, R11
JLS copy_match
MOVQ R9, R14
SUBQ R11, R14
CMPQ R13, R11
JGE copy_all_from_history
XORQ R11, R11
TESTQ $0x00000001, R13
JZ copy_4_word
MOVB (R14)(R11*1), R12
MOVB R12, (BX)(R11*1)
ADDQ $0x01, R11
copy_4_word:
TESTQ $0x00000002, R13
JZ copy_4_dword
MOVW (R14)(R11*1), R12
MOVW R12, (BX)(R11*1)
ADDQ $0x02, R11
copy_4_dword:
TESTQ $0x00000004, R13
JZ copy_4_qword
MOVL (R14)(R11*1), R12
MOVL R12, (BX)(R11*1)
ADDQ $0x04, R11
copy_4_qword:
TESTQ $0x00000008, R13
JZ copy_4_test
MOVQ (R14)(R11*1), R12
MOVQ R12, (BX)(R11*1)
ADDQ $0x08, R11
JMP copy_4_test
copy_4:
MOVUPS (R14)(R11*1), X0
MOVUPS X0, (BX)(R11*1)
ADDQ $0x10, R11
copy_4_test:
CMPQ R11, R13
JB copy_4
ADDQ R13, DI
ADDQ R13, BX
ADDQ $0x18, AX
INCQ DX
CMPQ DX, CX
JB main_loop
JMP loop_finished
copy_all_from_history:
XORQ R15, R15
TESTQ $0x00000001, R11
JZ copy_5_word
MOVB (R14)(R15*1), BP
MOVB BP, (BX)(R15*1)
ADDQ $0x01, R15
copy_5_word:
TESTQ $0x00000002, R11
JZ copy_5_dword
MOVW (R14)(R15*1), BP
MOVW BP, (BX)(R15*1)
ADDQ $0x02, R15
copy_5_dword:
TESTQ $0x00000004, R11
JZ copy_5_qword
MOVL (R14)(R15*1), BP
MOVL BP, (BX)(R15*1)
ADDQ $0x04, R15
copy_5_qword:
TESTQ $0x00000008, R11
JZ copy_5_test
MOVQ (R14)(R15*1), BP
MOVQ BP, (BX)(R15*1)
ADDQ $0x08, R15
JMP copy_5_test
copy_5:
MOVUPS (R14)(R15*1), X0
MOVUPS X0, (BX)(R15*1)
ADDQ $0x10, R15
copy_5_test:
CMPQ R15, R11
JB copy_5
ADDQ R11, BX
ADDQ R11, DI
SUBQ R11, R13
// Copy match from the current buffer
copy_match:
TESTQ R13, R13
JZ handle_loop
MOVQ BX, R11
SUBQ R12, R11
// ml <= mo
CMPQ R13, R12
JA copy_overlapping_match
// Copy non-overlapping match
ADDQ R13, DI
MOVQ BX, R12
ADDQ R13, BX
copy_2:
MOVUPS (R11), X0
MOVUPS X0, (R12)
ADDQ $0x10, R11
ADDQ $0x10, R12
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
ADDQ R13, DI
copy_slow_3:
MOVB (R11), R12
MOVB R12, (BX)
INCQ R11
INCQ BX
DECQ R13
JNZ copy_slow_3
handle_loop:
ADDQ $0x18, AX
INCQ DX
CMPQ DX, CX
JB main_loop
loop_finished:
// Return value
MOVB $0x01, ret+8(FP)
// Update the context
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
MOVQ SI, 112(AX)
RET
error_match_off_too_big:
// Return value
MOVB $0x00, ret+8(FP)
// Update the context
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
MOVQ 80(AX), CX
SUBQ CX, SI
MOVQ SI, 112(AX)
RET
empty_seqs:
// Return value
MOVB $0x01, ret+8(FP)
RET
// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
// Requires: SSE
TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9
MOVQ ctx+0(FP), R10
MOVQ 8(R10), CX
TESTQ CX, CX
JZ empty_seqs
MOVQ (R10), AX
MOVQ 24(R10), DX
MOVQ 32(R10), BX
MOVQ 80(R10), SI
MOVQ 104(R10), DI
MOVQ 120(R10), R8
MOVQ 56(R10), R9
MOVQ 64(R10), R10
ADDQ R10, R9
// seqsBase += 24 * seqIndex
LEAQ (DX)(DX*2), R11
SHLQ $0x03, R11
ADDQ R11, AX
// outBase += outPosition
ADDQ DI, BX
main_loop:
MOVQ (AX), R11
MOVQ 16(AX), R12
@ -1326,30 +1538,58 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
XORQ R12, R12
ADDQ R13, DI
XORQ R12, R12
TESTQ $0x00000001, R13
JZ copy_2_word
MOVB (R11)(R12*1), R14
MOVB R14, (BX)(R12*1)
ADDQ $0x01, R12
copy_2_word:
TESTQ $0x00000002, R13
JZ copy_2_dword
MOVW (R11)(R12*1), R14
MOVW R14, (BX)(R12*1)
ADDQ $0x02, R12
copy_2_dword:
TESTQ $0x00000004, R13
JZ copy_2_qword
MOVL (R11)(R12*1), R14
MOVL R14, (BX)(R12*1)
ADDQ $0x04, R12
copy_2_qword:
TESTQ $0x00000008, R13
JZ copy_2_test
MOVQ (R11)(R12*1), R14
MOVQ R14, (BX)(R12*1)
ADDQ $0x08, R12
JMP copy_2_test
copy_2:
MOVUPS (R11)(R12*1), X0
MOVUPS X0, (BX)(R12*1)
ADDQ $0x10, R12
CMPQ R12, R13
JB copy_2
ADDQ R13, BX
ADDQ R13, DI
JMP handle_loop
copy_2_test:
CMPQ R12, R13
JB copy_2
ADDQ R13, BX
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ R12, R12
ADDQ R13, DI
copy_slow_3:
MOVB (R11)(R12*1), R14
MOVB R14, (BX)(R12*1)
INCQ R12
CMPQ R12, R13
JB copy_slow_3
ADDQ R13, BX
ADDQ R13, DI
MOVB (R11), R12
MOVB R12, (BX)
INCQ R11
INCQ BX
DECQ R13
JNZ copy_slow_3
handle_loop:
ADDQ $0x18, AX
@ -1673,45 +1913,16 @@ sequenceDecs_decodeSync_amd64_match_len_ofs_ok:
TESTQ AX, AX
JZ check_offset
XORQ R14, R14
TESTQ $0x00000001, AX
JZ copy_1_word
MOVB (R11)(R14*1), R15
MOVB R15, (R10)(R14*1)
ADDQ $0x01, R14
copy_1_word:
TESTQ $0x00000002, AX
JZ copy_1_dword
MOVW (R11)(R14*1), R15
MOVW R15, (R10)(R14*1)
ADDQ $0x02, R14
copy_1_dword:
TESTQ $0x00000004, AX
JZ copy_1_qword
MOVL (R11)(R14*1), R15
MOVL R15, (R10)(R14*1)
ADDQ $0x04, R14
copy_1_qword:
TESTQ $0x00000008, AX
JZ copy_1_test
MOVQ (R11)(R14*1), R15
MOVQ R15, (R10)(R14*1)
ADDQ $0x08, R14
JMP copy_1_test
copy_1:
MOVUPS (R11)(R14*1), X0
MOVUPS X0, (R10)(R14*1)
ADDQ $0x10, R14
copy_1_test:
CMPQ R14, AX
JB copy_1
ADDQ AX, R11
ADDQ AX, R10
ADDQ AX, R12
CMPQ R14, AX
JB copy_1
ADDQ AX, R11
ADDQ AX, R10
ADDQ AX, R12
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
check_offset:
@ -1826,30 +2037,30 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
XORQ CX, CX
ADDQ R13, R12
MOVQ R10, CX
ADDQ R13, R10
copy_2:
MOVUPS (AX)(CX*1), X0
MOVUPS X0, (R10)(CX*1)
MOVUPS (AX), X0
MOVUPS X0, (CX)
ADDQ $0x10, AX
ADDQ $0x10, CX
CMPQ CX, R13
JB copy_2
ADDQ R13, R10
ADDQ R13, R12
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ CX, CX
ADDQ R13, R12
copy_slow_3:
MOVB (AX)(CX*1), R14
MOVB R14, (R10)(CX*1)
INCQ CX
CMPQ CX, R13
JB copy_slow_3
ADDQ R13, R10
ADDQ R13, R12
MOVB (AX), CL
MOVB CL, (R10)
INCQ AX
INCQ R10
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), AX
@ -2044,60 +2255,55 @@ sequenceDecs_decodeSync_bmi2_fill_2_end:
MOVQ CX, 24(SP)
// Fill bitreader for state updates
MOVQ R12, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R12
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decodeSync_bmi2_skip_update
// Update Literal Length State
MOVBQZX SI, R13
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
MOVQ R12, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R12
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decodeSync_bmi2_skip_update
LEAQ (SI)(DI*1), R13
ADDQ R8, R13
MOVBQZX R13, R13
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, SI
// Load ctx.llTable
// Update Offset State
BZHIQ R8, R14, CX
SHRXQ R8, R14, R14
MOVQ $0x00001010, R13
BEXTRQ R13, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
// Update Match Length State
MOVBQZX DI, R13
MOVQ $0x00001010, CX
BEXTRQ CX, DI, DI
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, DI
BZHIQ DI, R14, CX
SHRXQ DI, R14, R14
MOVQ $0x00001010, R13
BEXTRQ R13, DI, DI
ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
// Update Offset State
MOVBQZX R8, R13
MOVQ $0x00001010, CX
BEXTRQ CX, R8, R8
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, R8
// Update Literal Length State
BZHIQ SI, R14, CX
MOVQ $0x00001010, R13
BEXTRQ R13, SI, SI
ADDQ CX, SI
// Load ctx.ofTable
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
sequenceDecs_decodeSync_bmi2_skip_update:
// Adjust offset
@ -2180,45 +2386,16 @@ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok:
TESTQ CX, CX
JZ check_offset
XORQ R14, R14
TESTQ $0x00000001, CX
JZ copy_1_word
MOVB (R10)(R14*1), R15
MOVB R15, (R9)(R14*1)
ADDQ $0x01, R14
copy_1_word:
TESTQ $0x00000002, CX
JZ copy_1_dword
MOVW (R10)(R14*1), R15
MOVW R15, (R9)(R14*1)
ADDQ $0x02, R14
copy_1_dword:
TESTQ $0x00000004, CX
JZ copy_1_qword
MOVL (R10)(R14*1), R15
MOVL R15, (R9)(R14*1)
ADDQ $0x04, R14
copy_1_qword:
TESTQ $0x00000008, CX
JZ copy_1_test
MOVQ (R10)(R14*1), R15
MOVQ R15, (R9)(R14*1)
ADDQ $0x08, R14
JMP copy_1_test
copy_1:
MOVUPS (R10)(R14*1), X0
MOVUPS X0, (R9)(R14*1)
ADDQ $0x10, R14
copy_1_test:
CMPQ R14, CX
JB copy_1
ADDQ CX, R10
ADDQ CX, R9
ADDQ CX, R11
CMPQ R14, CX
JB copy_1
ADDQ CX, R10
ADDQ CX, R9
ADDQ CX, R11
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize)
check_offset:
@ -2333,30 +2510,30 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
XORQ R12, R12
ADDQ R13, R11
MOVQ R9, R12
ADDQ R13, R9
copy_2:
MOVUPS (CX)(R12*1), X0
MOVUPS X0, (R9)(R12*1)
MOVUPS (CX), X0
MOVUPS X0, (R12)
ADDQ $0x10, CX
ADDQ $0x10, R12
CMPQ R12, R13
JB copy_2
ADDQ R13, R9
ADDQ R13, R11
SUBQ $0x10, R13
JHI copy_2
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ R12, R12
ADDQ R13, R11
copy_slow_3:
MOVB (CX)(R12*1), R14
MOVB R14, (R9)(R12*1)
INCQ R12
CMPQ R12, R13
JB copy_slow_3
ADDQ R13, R9
ADDQ R13, R11
MOVB (CX), R12
MOVB R12, (R9)
INCQ CX
INCQ R9
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), CX
@ -2862,6 +3039,7 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
ADDQ R13, R12
XORQ CX, CX
TESTQ $0x00000001, R13
JZ copy_2_word
@ -2900,21 +3078,19 @@ copy_2_test:
CMPQ CX, R13
JB copy_2
ADDQ R13, R10
ADDQ R13, R12
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ CX, CX
ADDQ R13, R12
copy_slow_3:
MOVB (AX)(CX*1), R14
MOVB R14, (R10)(CX*1)
INCQ CX
CMPQ CX, R13
JB copy_slow_3
ADDQ R13, R10
ADDQ R13, R12
MOVB (AX), CL
MOVB CL, (R10)
INCQ AX
INCQ R10
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), AX
@ -3109,60 +3285,55 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
MOVQ CX, 24(SP)
// Fill bitreader for state updates
MOVQ R12, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R12
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
// Update Literal Length State
MOVBQZX SI, R13
MOVQ $0x00001010, CX
BEXTRQ CX, SI, SI
MOVQ R12, (SP)
MOVQ $0x00000808, CX
BEXTRQ CX, R8, R12
MOVQ ctx+16(FP), CX
CMPQ 96(CX), $0x00
JZ sequenceDecs_decodeSync_safe_bmi2_skip_update
LEAQ (SI)(DI*1), R13
ADDQ R8, R13
MOVBQZX R13, R13
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, SI
// Load ctx.llTable
// Update Offset State
BZHIQ R8, R14, CX
SHRXQ R8, R14, R14
MOVQ $0x00001010, R13
BEXTRQ R13, R8, R8
ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
// Update Match Length State
MOVBQZX DI, R13
MOVQ $0x00001010, CX
BEXTRQ CX, DI, DI
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, DI
BZHIQ DI, R14, CX
SHRXQ DI, R14, R14
MOVQ $0x00001010, R13
BEXTRQ R13, DI, DI
ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
MOVQ 24(CX), CX
MOVQ (CX)(DI*8), DI
// Update Offset State
MOVBQZX R8, R13
MOVQ $0x00001010, CX
BEXTRQ CX, R8, R8
LEAQ (DX)(R13*1), CX
MOVQ AX, R14
MOVQ CX, DX
ROLQ CL, R14
BZHIQ R13, R14, R14
ADDQ R14, R8
// Update Literal Length State
BZHIQ SI, R14, CX
MOVQ $0x00001010, R13
BEXTRQ R13, SI, SI
ADDQ CX, SI
// Load ctx.ofTable
// Load ctx.llTable
MOVQ ctx+16(FP), CX
MOVQ 48(CX), CX
MOVQ (CX)(R8*8), R8
MOVQ (CX), CX
MOVQ (CX)(SI*8), SI
sequenceDecs_decodeSync_safe_bmi2_skip_update:
// Adjust offset
@ -3398,6 +3569,7 @@ copy_match:
JA copy_overlapping_match
// Copy non-overlapping match
ADDQ R13, R11
XORQ R12, R12
TESTQ $0x00000001, R13
JZ copy_2_word
@ -3436,21 +3608,19 @@ copy_2_test:
CMPQ R12, R13
JB copy_2
ADDQ R13, R9
ADDQ R13, R11
JMP handle_loop
// Copy overlapping match
copy_overlapping_match:
XORQ R12, R12
ADDQ R13, R11
copy_slow_3:
MOVB (CX)(R12*1), R14
MOVB R14, (R9)(R12*1)
INCQ R12
CMPQ R12, R13
JB copy_slow_3
ADDQ R13, R9
ADDQ R13, R11
MOVB (CX), R12
MOVB R12, (R9)
INCQ CX
INCQ R9
DECQ R13
JNZ copy_slow_3
handle_loop:
MOVQ ctx+16(FP), CX

@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) {
}
}
// matchLenFast does matching, but will not match the last up to 7 bytes.
func matchLenFast(a, b []byte) int {
endI := len(a) & (math.MaxInt32 - 7)
for i := 0; i < endI; i += 8 {
if diff := load64(a, i) ^ load64(b, i); diff != 0 {
return i + bits.TrailingZeros64(diff)>>3
}
}
return endI
}
// matchLen returns the maximum length.
// a must be the shortest of the two.
// The function also returns whether all bytes matched.

@ -1,3 +0,0 @@
cmd/xurls/xurls
generate/tldsgen/tldsgen
generate/regexgen/regexgen

@ -1,4 +0,0 @@
language: go
go:
- 1.7.x

@ -1,27 +0,0 @@
Copyright (c) 2015, Daniel Martí. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,40 +0,0 @@
# xurls
[![GoDoc](https://godoc.org/github.com/mvdan/xurls?status.svg)](https://godoc.org/github.com/mvdan/xurls)
[![Travis](https://travis-ci.org/mvdan/xurls.svg?branch=master)](https://travis-ci.org/mvdan/xurls)
Extract urls from text using regular expressions.
go get -u github.com/mvdan/xurls
```go
import "github.com/mvdan/xurls"
func main() {
xurls.Relaxed.FindString("Do gophers live in golang.org?")
// "golang.org"
xurls.Strict.FindAllString("foo.com is http://foo.com/.", -1)
// []string{"http://foo.com/"}
}
```
`Relaxed` is around five times slower than `Strict` since it does more
work to find the URLs without relying on the scheme:
```
BenchmarkStrictEmpty-4 1000000 1885 ns/op
BenchmarkStrictSingle-4 200000 8356 ns/op
BenchmarkStrictMany-4 100000 22547 ns/op
BenchmarkRelaxedEmpty-4 200000 7284 ns/op
BenchmarkRelaxedSingle-4 30000 58557 ns/op
BenchmarkRelaxedMany-4 10000 130251 ns/op
```
#### cmd/xurls
go get -u github.com/mvdan/xurls/cmd/xurls
```shell
$ echo "Do gophers live in http://golang.org?" | xurls
http://golang.org
```

File diff suppressed because one or more lines are too long

@ -1,17 +0,0 @@
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package xurls
// SchemesNoAuthority is a sorted list of some well-known url schemes that are
// followed by ":" instead of "://". Since these are more prone to false
// positives, we limit their matching.
var SchemesNoAuthority = []string{
`bitcoin`, // Bitcoin
`file`, // Files
`magnet`, // Torrent magnets
`mailto`, // Mail
`sms`, // SMS
`tel`, // Telephone
`xmpp`, // XMPP
}

File diff suppressed because it is too large Load Diff

@ -1,24 +0,0 @@
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package xurls
// PseudoTLDs is a sorted list of some widely used unofficial TLDs.
//
// Sources:
// * https://en.wikipedia.org/wiki/Pseudo-top-level_domain
// * https://en.wikipedia.org/wiki/Category:Pseudo-top-level_domains
// * https://tools.ietf.org/html/draft-grothoff-iesg-special-use-p2p-names-00
// * https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml
var PseudoTLDs = []string{
`bit`, // Namecoin
`example`, // Example domain
`exit`, // Tor exit node
`gnu`, // GNS by public key
`i2p`, // I2P network
`invalid`, // Invalid domain
`local`, // Local network
`localhost`, // Local network
`test`, // Test domain
`zkey`, // GNS domain name
}

@ -1,67 +0,0 @@
// Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
// Package xurls extracts urls from plain text using regular expressions.
package xurls
import "regexp"
//go:generate go run generate/tldsgen/main.go
//go:generate go run generate/regexgen/main.go
const (
letter = `\p{L}`
mark = `\p{M}`
number = `\p{N}`
iriChar = letter + mark + number
currency = `\p{Sc}`
otherSymb = `\p{So}`
endChar = iriChar + `/\-+_&~*%=#` + currency + otherSymb
midChar = endChar + `@.,:;'?!|`
wellParen = `\([` + midChar + `]*(\([` + midChar + `]*\)[` + midChar + `]*)*\)`
wellBrack = `\[[` + midChar + `]*(\[[` + midChar + `]*\][` + midChar + `]*)*\]`
wellBrace = `\{[` + midChar + `]*(\{[` + midChar + `]*\}[` + midChar + `]*)*\}`
wellAll = wellParen + `|` + wellBrack + `|` + wellBrace
pathCont = `([` + midChar + `]*(` + wellAll + `|[` + endChar + `])+)+`
comScheme = `[a-zA-Z][a-zA-Z.\-+]*://`
scheme = `(` + comScheme + `|` + otherScheme + `)`
iri = `[` + iriChar + `]([` + iriChar + `\-]*[` + iriChar + `])?`
domain = `(` + iri + `\.)+`
octet = `(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])`
ipv4Addr = `\b` + octet + `\.` + octet + `\.` + octet + `\.` + octet + `\b`
ipv6Addr = `([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:[0-9a-fA-F]{0,4}|:[0-9a-fA-F]{1,4})?|(:[0-9a-fA-F]{1,4}){0,2})|(:[0-9a-fA-F]{1,4}){0,3})|(:[0-9a-fA-F]{1,4}){0,4})|:(:[0-9a-fA-F]{1,4}){0,5})((:[0-9a-fA-F]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9a-fA-F]{1,4}:){1,6}|:):[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){7}:`
ipAddr = `(` + ipv4Addr + `|` + ipv6Addr + `)`
site = domain + gtld
hostName = `(` + site + `|` + ipAddr + `)`
port = `(:[0-9]*)?`
path = `(/|/` + pathCont + `?|\b|$)`
webURL = hostName + port + path
strict = `(\b` + scheme + pathCont + `)`
relaxed = `(` + strict + `|` + webURL + `)`
)
var (
// Relaxed matches all the urls it can find.
Relaxed = regexp.MustCompile(relaxed)
// Strict only matches urls with a scheme to avoid false positives.
Strict = regexp.MustCompile(strict)
)
func init() {
Relaxed.Longest()
Strict.Longest()
}
// StrictMatchingScheme produces a regexp that matches urls like Strict but
// whose scheme matches the given regular expression.
func StrictMatchingScheme(exp string) (*regexp.Regexp, error) {
strictMatching := `(\b(?i)(` + exp + `)(?-i)` + pathCont + `)`
re, err := regexp.Compile(strictMatching)
if err != nil {
return nil, err
}
re.Longest()
return re, nil
}

@ -114,7 +114,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr
batchErr.Labels = writeErr.Labels
batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError)
}
batchRes.InsertedCount = int64(res.N)
batchRes.InsertedCount = res.N
case *DeleteOneModel, *DeleteManyModel:
res, err := bw.runDelete(ctx, batch)
if err != nil {
@ -126,7 +126,7 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr
batchErr.Labels = writeErr.Labels
batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError)
}
batchRes.DeletedCount = int64(res.N)
batchRes.DeletedCount = res.N
case *ReplaceOneModel, *UpdateOneModel, *UpdateManyModel:
res, err := bw.runUpdate(ctx, batch)
if err != nil {
@ -138,8 +138,8 @@ func (bw *bulkWrite) runBatch(ctx context.Context, batch bulkWriteBatch) (BulkWr
batchErr.Labels = writeErr.Labels
batchErr.WriteConcernError = convertDriverWriteConcernError(writeErr.WriteConcernError)
}
batchRes.MatchedCount = int64(res.N)
batchRes.ModifiedCount = int64(res.NModified)
batchRes.MatchedCount = res.N
batchRes.ModifiedCount = res.NModified
batchRes.UpsertedCount = int64(len(res.Upserted))
for _, upsert := range res.Upserted {
batchRes.UpsertedIDs[int64(batch.indexes[upsert.Index])] = upsert.ID

@ -473,7 +473,7 @@ func (coll *Collection) delete(ctx context.Context, filter interface{}, deleteOn
if rr&expectedRr == 0 {
return nil, err
}
return &DeleteResult{DeletedCount: int64(op.Result().N)}, err
return &DeleteResult{DeletedCount: op.Result().N}, err
}
// DeleteOne executes a delete command to delete at most one document from the collection.
@ -582,8 +582,8 @@ func (coll *Collection) updateOrReplace(ctx context.Context, filter bsoncore.Doc
opRes := op.Result()
res := &UpdateResult{
MatchedCount: int64(opRes.N),
ModifiedCount: int64(opRes.NModified),
MatchedCount: opRes.N,
ModifiedCount: opRes.NModified,
UpsertedCount: int64(len(opRes.Upserted)),
}
if len(opRes.Upserted) > 0 {

@ -7,4 +7,4 @@
package version // import "go.mongodb.org/mongo-driver/version"
// Driver is the current version of the driver.
var Driver = "v1.9.0"
var Driver = "v1.9.1"

@ -18,7 +18,6 @@ import (
"io/ioutil"
"math/big"
"net/http"
"net/url"
"time"
"golang.org/x/crypto/ocsp"
@ -28,9 +27,6 @@ import (
var (
tlsFeatureExtensionOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 24}
mustStapleFeatureValue = big.NewInt(5)
defaultRequestTimeout = 5 * time.Second
errGotOCSPResponse = errors.New("done")
)
// Error represents an OCSP verification error
@ -126,10 +122,7 @@ func getParsedResponse(ctx context.Context, cfg config, connState tls.Connection
if cfg.disableEndpointChecking {
return nil, nil
}
externalResponse, err := contactResponders(ctx, cfg)
if err != nil {
return nil, err
}
externalResponse := contactResponders(ctx, cfg)
if externalResponse == nil {
// None of the responders were available.
return nil, nil
@ -210,33 +203,21 @@ func isMustStapleCertificate(cert *x509.Certificate) (bool, error) {
return false, nil
}
// contactResponders will send a request to the OCSP responders reported by cfg.serverCert. The first response that
// conclusively identifies cfg.serverCert as good or revoked will be returned. If all responders are unavailable or no
// responder returns a conclusive status, (nil, nil) will be returned.
func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error) {
// contactResponders will send a request to all OCSP responders reported by cfg.serverCert. The
// first response that conclusively identifies cfg.serverCert as good or revoked will be returned.
// If all responders are unavailable or no responder returns a conclusive status, it returns nil.
// contactResponders will wait for up to 5 seconds to get a certificate status response.
func contactResponders(ctx context.Context, cfg config) *ResponseDetails {
if len(cfg.serverCert.OCSPServer) == 0 {
return nil, nil
return nil
}
requestCtx := ctx // Either ctx or a new context derived from ctx with a five second timeout.
userContextUsed := true
var cancelFn context.CancelFunc
// Use a context with defaultRequestTimeout if ctx does not have a deadline set or the current deadline is further
// out than defaultRequestTimeout. If the current deadline is less than less than defaultRequestTimeout out, respect
// it. Calling context.WithTimeout would do this for us, but we need to know which context we're using.
wantDeadline := time.Now().Add(defaultRequestTimeout)
if deadline, ok := ctx.Deadline(); !ok || deadline.After(wantDeadline) {
userContextUsed = false
requestCtx, cancelFn = context.WithDeadline(ctx, wantDeadline)
}
defer func() {
if cancelFn != nil {
cancelFn()
}
}()
// Limit all OCSP responder calls to a maximum of 5 seconds or when the passed-in context expires,
// whichever happens first.
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
group, groupCtx := errgroup.WithContext(requestCtx)
group, ctx := errgroup.WithContext(ctx)
ocspResponses := make(chan *ocsp.Response, len(cfg.serverCert.OCSPServer))
defer close(ocspResponses)
@ -244,6 +225,11 @@ func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error
// Re-assign endpoint so it gets re-scoped rather than using the iteration variable in the goroutine. See
// https://golang.org/doc/faq#closures_and_goroutines.
endpoint := endpoint
// Start a group of goroutines that each attempt to request the certificate status from one
// of the OCSP endpoints listed in the server certificate. We want to "soft fail" on all
// errors, so this function never returns actual errors. Only a "done" error is returned
// when a response is received so the errgroup cancels any other in-progress requests.
group.Go(func() error {
// Use bytes.NewReader instead of bytes.NewBuffer because a bytes.Buffer is an owning representation and the
// docs recommend not using the underlying []byte after creating the buffer, so a new copy of the request
@ -252,35 +238,16 @@ func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error
if err != nil {
return nil
}
request = request.WithContext(groupCtx)
// Execute the request and handle errors as follows:
//
// 1. If the original context expired or was cancelled, propagate the error up so the caller will abort the
// verification and return control to the user.
//
// 2. If any other errors occurred, including the defaultRequestTimeout expiring, or the response has a
// non-200 status code, suppress the error because we want to ignore this responder and wait for a different
// one to respond.
request = request.WithContext(ctx)
httpResponse, err := http.DefaultClient.Do(request)
if err != nil {
urlErr, ok := err.(*url.Error)
if !ok {
return nil
}
timeout := urlErr.Timeout()
cancelled := urlErr.Err == context.Canceled // Timeout() does not return true for context.Cancelled.
if cancelled || (userContextUsed && timeout) {
// Handle the original context expiring or being cancelled. The url.Error type supports Unwrap, so
// users can use errors.Is to check for context errors.
return err
}
return nil // Ignore all other errors.
return nil
}
defer func() {
_ = httpResponse.Body.Close()
}()
if httpResponse.StatusCode != 200 {
return nil
}
@ -292,26 +259,27 @@ func contactResponders(ctx context.Context, cfg config) (*ResponseDetails, error
ocspResponse, err := ocsp.ParseResponseForCert(httpBytes, cfg.serverCert, cfg.issuer)
if err != nil || verifyResponse(cfg, ocspResponse) != nil || ocspResponse.Status == ocsp.Unknown {
// If there was an error parsing/validating the response or the response was inconclusive, suppress
// the error because we want to ignore this responder.
// If there was an error parsing/validating the response or the response was
// inconclusive, suppress the error because we want to ignore this responder.
return nil
}
// Store the response and return a sentinel error so the error group will exit and any in-flight requests
// will be cancelled.
// Send the conclusive response on the response channel and return a "done" error that
// will cause the errgroup to cancel all other in-progress requests.
ocspResponses <- ocspResponse
return errGotOCSPResponse
return errors.New("done")
})
}
if err := group.Wait(); err != nil && err != errGotOCSPResponse {
return nil, err
}
if len(ocspResponses) == 0 {
// None of the responders gave a conclusive response.
return nil, nil
_ = group.Wait()
select {
case res := <-ocspResponses:
return extractResponseDetails(res)
default:
// If there is no OCSP response on the response channel, all OCSP calls either failed or
// were inconclusive. Return nil.
return nil
}
return extractResponseDetails(<-ocspResponses), nil
}
// verifyResponse checks that the provided OCSP response is valid.

@ -42,7 +42,7 @@ type Delete struct {
// DeleteResult represents a delete result returned by the server.
type DeleteResult struct {
// Number of documents successfully deleted.
N int32
N int64
}
func buildDeleteResult(response bsoncore.Document) (DeleteResult, error) {
@ -55,9 +55,9 @@ func buildDeleteResult(response bsoncore.Document) (DeleteResult, error) {
switch element.Key() {
case "n":
var ok bool
dr.N, ok = element.Value().AsInt32OK()
dr.N, ok = element.Value().AsInt64OK()
if !ok {
return dr, fmt.Errorf("response field 'n' is type int32, but received BSON type %s", element.Value().Type)
return dr, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type)
}
}
}

@ -41,7 +41,7 @@ type Insert struct {
// InsertResult represents an insert result returned by the server.
type InsertResult struct {
// Number of documents successfully inserted.
N int32
N int64
}
func buildInsertResult(response bsoncore.Document) (InsertResult, error) {
@ -54,9 +54,9 @@ func buildInsertResult(response bsoncore.Document) (InsertResult, error) {
switch element.Key() {
case "n":
var ok bool
ir.N, ok = element.Value().AsInt32OK()
ir.N, ok = element.Value().AsInt64OK()
if !ok {
return ir, fmt.Errorf("response field 'n' is type int32, but received BSON type %s", element.Value().Type)
return ir, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type)
}
}
}

@ -51,9 +51,9 @@ type Upsert struct {
// UpdateResult contains information for the result of an Update operation.
type UpdateResult struct {
// Number of documents matched.
N int32
N int64
// Number of documents modified.
NModified int32
NModified int64
// Information about upserted documents.
Upserted []Upsert
}
@ -68,15 +68,15 @@ func buildUpdateResult(response bsoncore.Document) (UpdateResult, error) {
switch element.Key() {
case "nModified":
var ok bool
ur.NModified, ok = element.Value().Int32OK()
ur.NModified, ok = element.Value().AsInt64OK()
if !ok {
return ur, fmt.Errorf("response field 'nModified' is type int32, but received BSON type %s", element.Value().Type)
return ur, fmt.Errorf("response field 'nModified' is type int32 or int64, but received BSON type %s", element.Value().Type)
}
case "n":
var ok bool
ur.N, ok = element.Value().Int32OK()
ur.N, ok = element.Value().AsInt64OK()
if !ok {
return ur, fmt.Errorf("response field 'n' is type int32, but received BSON type %s", element.Value().Type)
return ur, fmt.Errorf("response field 'n' is type int32 or int64, but received BSON type %s", element.Value().Type)
}
case "upserted":
arr, ok := element.Value().ArrayOK()

@ -460,6 +460,8 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
// certKeyAlgoNames is a mapping from known certificate algorithm names to the
// corresponding public key signature algorithm.
//
// This map must be kept in sync with the one in agent/client.go.
var certKeyAlgoNames = map[string]string{
CertAlgoRSAv01: KeyAlgoRSA,
CertAlgoRSASHA256v01: KeyAlgoRSASHA256,

@ -8,9 +8,12 @@ package errgroup
import (
"context"
"fmt"
"sync"
)
type token struct{}
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
@ -20,10 +23,19 @@ type Group struct {
wg sync.WaitGroup
sem chan token
errOnce sync.Once
err error
}
func (g *Group) done() {
if g.sem != nil {
<-g.sem
}
g.wg.Done()
}
// WithContext returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
@ -45,14 +57,48 @@ func (g *Group) Wait() error {
}
// Go calls the given function in a new goroutine.
// It blocks until the new goroutine can be added without the number of
// active goroutines in the group exceeding the configured limit.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
func (g *Group) Go(f func() error) {
if g.sem != nil {
g.sem <- token{}
}
g.wg.Add(1)
go func() {
defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
}
}()
}
// TryGo calls the given function in a new goroutine only if the number of
// active goroutines in the group is currently below the configured limit.
//
// The return value reports whether the goroutine was started.
func (g *Group) TryGo(f func() error) bool {
if g.sem != nil {
select {
case g.sem <- token{}:
// Note: this allows barging iff channels in general allow barging.
default:
return false
}
}
g.wg.Add(1)
go func() {
defer g.wg.Done()
defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
@ -63,4 +109,23 @@ func (g *Group) Go(f func() error) {
})
}
}()
return true
}
// SetLimit limits the number of active goroutines in this group to at most n.
// A negative value indicates no limit.
//
// Any subsequent call to the Go method will block until it can add an active
// goroutine without exceeding the configured limit.
//
// The limit must not be modified while any goroutines in the group are active.
func (g *Group) SetLimit(n int) {
if n < 0 {
g.sem = nil
return
}
if len(g.sem) != 0 {
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
}
g.sem = make(chan token, n)
}

@ -106,8 +106,8 @@ var ARM64 struct {
// ARM contains the supported CPU features of the current ARM (32-bit) platform.
// All feature flags are false if:
// 1. the current platform is not arm, or
// 2. the current operating system is not Linux.
// 1. the current platform is not arm, or
// 2. the current operating system is not Linux.
var ARM struct {
_ CacheLinePad
HasSWP bool // SWP instruction support

@ -8,7 +8,6 @@
package unix
import (
"bytes"
"unsafe"
)
@ -45,13 +44,7 @@ func NewIfreq(name string) (*Ifreq, error) {
// Name returns the interface name associated with the Ifreq.
func (ifr *Ifreq) Name() string {
// BytePtrToString requires a NULL terminator or the program may crash. If
// one is not present, just return the empty string.
if !bytes.Contains(ifr.raw.Ifrn[:], []byte{0x00}) {
return ""
}
return BytePtrToString(&ifr.raw.Ifrn[0])
return ByteSliceToString(ifr.raw.Ifrn[:])
}
// According to netdevice(7), only AF_INET addresses are returned for numerous

@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) {
}
//sys utimes(path string, times *[2]Timeval) (err error)
func Utimes(path string, tv []Timeval) error {
if len(tv) != 2 {
return EINVAL
@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error {
}
//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
func UtimesNano(path string, ts []Timespec) error {
if len(ts) != 2 {
return EINVAL
@ -300,11 +302,13 @@ func direntNamlen(buf []byte) (uint64, bool) {
}
//sys getdirent(fd int, buf []byte) (n int, err error)
func Getdents(fd int, buf []byte) (n int, err error) {
return getdirent(fd, buf)
}
//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error)
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
var status _C_int
var r Pid_t
@ -372,6 +376,7 @@ func (w WaitStatus) TrapCause() int { return -1 }
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range
func Fsync(fd int) error {
return fsyncRange(fd, O_SYNC, 0, 0)
}
@ -536,6 +541,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
//sys Getsystemcfg(label int) (n uint64)
//sys umount(target string) (err error)
func Unmount(target string, flags int) (err error) {
if flags != 0 {
// AIX doesn't have any flags for umount.

@ -504,6 +504,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error)
//sys Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
//sys Pathconf(path string, name int) (val int, err error)
@ -572,7 +573,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
// Nfssvc
// Getfh
// Quotactl
// Mount
// Csops
// Waitid
// Add_profil

@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) {
}
//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
func pread(fd int, p []byte, offset int64) (n int, err error) {
return extpread(fd, p, 0, offset)
}
//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
return extpwrite(fd, p, 0, offset)
}

@ -512,24 +512,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
//
// Server example:
//
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
// })
// _ = Listen(fd, 1)
// nfd, sa, _ := Accept(fd)
// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
// Read(nfd, buf)
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
// })
// _ = Listen(fd, 1)
// nfd, sa, _ := Accept(fd)
// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
// Read(nfd, buf)
//
// Client example:
//
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = Connect(fd, &SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
// })
// Write(fd, []byte(`hello`))
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
// _ = Connect(fd, &SockaddrRFCOMM{
// Channel: 1,
// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
// })
// Write(fd, []byte(`hello`))
type SockaddrRFCOMM struct {
// Addr represents a bluetooth address, byte ordering is little-endian.
Addr [6]uint8
@ -556,12 +556,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
// The SockaddrCAN struct must be bound to the socket file descriptor
// using Bind before the CAN socket can be used.
//
// // Read one raw CAN frame
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
// addr := &SockaddrCAN{Ifindex: index}
// Bind(fd, addr)
// frame := make([]byte, 16)
// Read(fd, frame)
// // Read one raw CAN frame
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
// addr := &SockaddrCAN{Ifindex: index}
// Bind(fd, addr)
// frame := make([]byte, 16)
// Read(fd, frame)
//
// The full SocketCAN documentation can be found in the linux kernel
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
@ -632,13 +632,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
// Here is an example of using an AF_ALG socket with SHA1 hashing.
// The initial socket setup process is as follows:
//
// // Open a socket to perform SHA1 hashing.
// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
// unix.Bind(fd, addr)
// // Note: unix.Accept does not work at this time; must invoke accept()
// // manually using unix.Syscall.
// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
// // Open a socket to perform SHA1 hashing.
// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
// unix.Bind(fd, addr)
// // Note: unix.Accept does not work at this time; must invoke accept()
// // manually using unix.Syscall.
// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
//
// Once a file descriptor has been returned from Accept, it may be used to
// perform SHA1 hashing. The descriptor is not safe for concurrent use, but
@ -647,39 +647,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
// When hashing a small byte slice or string, a single Write and Read may
// be used:
//
// // Assume hashfd is already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash an input string and read the results. Each Write discards
// // previous hash state. Read always reads the current state.
// b := make([]byte, 20)
// for i := 0; i < 2; i++ {
// io.WriteString(hash, "Hello, world.")
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// }
// // Output:
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
// // Assume hashfd is already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash an input string and read the results. Each Write discards
// // previous hash state. Read always reads the current state.
// b := make([]byte, 20)
// for i := 0; i < 2; i++ {
// io.WriteString(hash, "Hello, world.")
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// }
// // Output:
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
//
// For hashing larger byte slices, or byte streams such as those read from
// a file or socket, use Sendto with MSG_MORE to instruct the kernel to update
// the hash digest instead of creating a new one for a given chunk and finalizing it.
//
// // Assume hashfd and addr are already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash the contents of a file.
// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
// b := make([]byte, 4096)
// for {
// n, err := f.Read(b)
// if err == io.EOF {
// break
// }
// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
// }
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
// // Assume hashfd and addr are already configured using the setup process.
// hash := os.NewFile(hashfd, "sha1")
// // Hash the contents of a file.
// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
// b := make([]byte, 4096)
// for {
// n, err := f.Read(b)
// if err == io.EOF {
// break
// }
// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
// }
// hash.Read(b)
// fmt.Println(hex.EncodeToString(b))
// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
//
// For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html.
type SockaddrALG struct {

@ -81,6 +81,7 @@ func Pipe(p []int) (err error) {
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) error {
if len(p) != 2 {
return EINVAL
@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error {
}
//sys Getdents(fd int, buf []byte) (n int, err error)
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
n, err = Getdents(fd, buf)
if err != nil || basep == nil {

@ -5,7 +5,7 @@
// +build 386,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go
package unix

@ -5,7 +5,7 @@
// +build amd64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go
package unix

@ -5,7 +5,7 @@
// +build arm,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build arm64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go
package unix

@ -5,7 +5,7 @@
// +build loong64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build mips,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build mips64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build mips64le,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build mipsle,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build ppc,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build ppc64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build ppc64le,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build riscv64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -5,7 +5,7 @@
// +build s390x,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go
package unix

@ -5,7 +5,7 @@
// +build sparc64,linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go
package unix

@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(fsType)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(dir)
if err != nil {
return
}
_, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_mount_trampoline_addr uintptr
//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)

@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mount(SB)
GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)

@ -1643,6 +1643,30 @@ var libc_mknod_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mount(fsType string, dir string, flags int, data unsafe.Pointer) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(fsType)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(dir)
if err != nil {
return
}
_, _, e1 := syscall_syscall6(libc_mount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flags), uintptr(data), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
var libc_mount_trampoline_addr uintptr
//go:cgo_import_dynamic libc_mount mount "/usr/lib/libSystem.B.dylib"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)

@ -600,6 +600,12 @@ TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mount(SB)
GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8
DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build 386 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build amd64 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build arm && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build arm64 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build loong64 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips64 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips64le && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mipsle && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build ppc && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build ppc64 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build ppc64le && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build riscv64 && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build s390x && linux

@ -1,4 +1,4 @@
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/linux/types.go | go run mkpost.go
// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build sparc64 && linux

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save