- update vendor

master
李光春 1 year ago
parent 4585d3f4a2
commit 41f01c0dd6

@ -6,11 +6,11 @@ require (
github.com/MercuryEngineering/CookieMonster v0.0.0-20180304172713-1584578b3403
github.com/aliyun/aliyun-oss-go-sdk v2.2.7+incompatible
github.com/allegro/bigcache/v3 v3.1.0
github.com/baidubce/bce-sdk-go v0.9.146
github.com/baidubce/bce-sdk-go v0.9.148
github.com/basgys/goxml2json v1.1.0
github.com/bytedance/sonic v1.8.7
github.com/gin-gonic/gin v1.9.0
github.com/go-co-op/gocron v1.19.0
github.com/go-co-op/gocron v1.22.2
github.com/go-playground/locales v0.14.1
github.com/go-playground/universal-translator v0.18.1
github.com/go-playground/validator/v10 v10.12.0
@ -18,7 +18,7 @@ require (
github.com/goccy/go-json v0.10.2
github.com/gogf/gf/v2 v2.3.3
github.com/json-iterator/go v1.1.12
github.com/lib/pq v1.10.7
github.com/lib/pq v1.10.8
github.com/mitchellh/mapstructure v1.5.0
github.com/mvdan/xurls v1.1.0
github.com/natefinch/lumberjack v2.0.0+incompatible
@ -32,14 +32,14 @@ require (
github.com/tencentyun/cos-go-sdk-v5 v0.7.41
go.mongodb.org/mongo-driver v1.11.4
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.7.0
golang.org/x/text v0.8.0
golang.org/x/crypto v0.8.0
golang.org/x/text v0.9.0
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df
gorm.io/datatypes v1.1.1
gorm.io/driver/mysql v1.4.7
gorm.io/datatypes v1.2.0
gorm.io/driver/mysql v1.5.0
gorm.io/driver/postgres v1.5.0
gorm.io/gen v0.3.21
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11
gorm.io/gorm v1.25.0
xorm.io/xorm v1.3.2
)
@ -63,9 +63,9 @@ require (
github.com/jackc/pgx/v5 v5.3.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/klauspost/compress v1.16.3 // indirect
github.com/klauspost/compress v1.16.5 // indirect
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
github.com/leodido/go-urn v1.2.2 // indirect
github.com/leodido/go-urn v1.2.3 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
@ -97,11 +97,11 @@ require (
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.3.0 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/net v0.9.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.7.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect

@ -32,8 +32,8 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/baidubce/bce-sdk-go v0.9.146 h1:AQlKDhZB53dlJjmTDwQTg1jc7TSPj7v+nnhxvm/JxWs=
github.com/baidubce/bce-sdk-go v0.9.146/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/baidubce/bce-sdk-go v0.9.148 h1:p9HMHnOVG/v6nK7hYHkCnkJyv41KCxuuWmUM8bIpkac=
github.com/baidubce/bce-sdk-go v0.9.148/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/basgys/goxml2json v1.1.0 h1:4ln5i4rseYfXNd86lGEB+Vi652IsIXIvggKM/BhUKVw=
github.com/basgys/goxml2json v1.1.0/go.mod h1:wH7a5Np/Q4QoECFIU8zTQlZwZkrilY0itPfecMw41Dw=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@ -106,8 +106,8 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
github.com/go-co-op/gocron v1.19.0 h1:XlPLqNnxnKblmCRLdfcWV1UgbukQaU54QdNeR1jtgak=
github.com/go-co-op/gocron v1.19.0/go.mod h1:UqVyvM90I1q/R1qGEX6cBORI6WArLuEgYlbncLMvzRM=
github.com/go-co-op/gocron v1.22.2 h1:5+486wUbSp2Tgodv3Fwek0OgMK/aqjcgGBcRTcT2kgs=
github.com/go-co-op/gocron v1.22.2/go.mod h1:UqVyvM90I1q/R1qGEX6cBORI6WArLuEgYlbncLMvzRM=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
@ -295,8 +295,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
@ -313,15 +313,15 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.2 h1:7z68G0FCGvDk646jz1AelTYNYWrTNm0bEcFAo147wt4=
github.com/leodido/go-urn v1.2.2/go.mod h1:kUaIbLZWttglzwNuG0pgsh5vuV6u2YcGBYz1hIPjtOQ=
github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw=
github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lib/pq v1.10.8 h1:3fdt97i/cwSU83+E0hZTC/Xpc9mTZxc6UWSCRcSbxiE=
github.com/lib/pq v1.10.8/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
@ -466,7 +466,6 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/saracen/go7z v0.0.0-20191010121135-9c09b6bd7fda h1:h+YpzUB/bGVJcLqW+d5GghcCmE/A25KbzjXvWJQi/+o=
@ -611,8 +610,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
@ -648,8 +647,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -713,7 +712,7 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@ -723,8 +722,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2/go.mod h1:EFNZuWvGYxIRUEX+K8UmCFwYmZjqcrnq15ZuVldZkZ0=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
@ -748,8 +747,8 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -807,11 +806,11 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/datatypes v1.1.1 h1:XAjO7NNfUKVUvnS3+BkqMrPXxCAcxDlpOYbjnizxNCw=
gorm.io/datatypes v1.1.1/go.mod h1:u8GEgFjJ+GpsGfgHmBUcQqHm/937t3sj/SO9dvbndTg=
gorm.io/datatypes v1.2.0 h1:5YT+eokWdIxhJgWHdrb2zYUimyk0+TaFth+7a0ybzco=
gorm.io/datatypes v1.2.0/go.mod h1:o1dh0ZvjIjhH/bngTpypG6lVRJ5chTBxE09FH/71k04=
gorm.io/driver/mysql v1.4.3/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10c=
gorm.io/driver/mysql v1.4.7 h1:rY46lkCspzGHn7+IYsNpSfEv9tA+SU4SkkB+GFX125Y=
gorm.io/driver/mysql v1.4.7/go.mod h1:SxzItlnT1cb6e1e4ZRpgJN2VYtcqJgqnHxWr4wsP8oc=
gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
gorm.io/driver/postgres v1.5.0/go.mod h1:FUZXzO+5Uqg5zzwzv4KK49R8lvGIyscBOqYrtI1Ce9A=
gorm.io/driver/sqlite v1.4.2/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI=
@ -822,8 +821,9 @@ gorm.io/gen v0.3.21/go.mod h1:aWgvoKdG9f8Des4TegSa0N5a+gwhGsFo0JJMaLwokvk=
gorm.io/gorm v1.23.8/go.mod h1:l2lP/RyAtc1ynaTjFksBde/O8v9oOGIApu2/xRitmZk=
gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
gorm.io/gorm v1.24.3/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11 h1:9qNbmu21nNThCNnF5i2R3kw2aL27U8ZwbzccNjOmW0g=
gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gorm.io/hints v1.1.1 h1:NPampLxQujY+277452rt4yqtg6JmzNZ1jA2olk0eFXw=
gorm.io/hints v1.1.1/go.mod h1:zdwzfFqvBWGbpuKiAhLFOSGSpeD3/VsRgkXR9Y7Z3cs=
gorm.io/plugin/dbresolver v1.4.1 h1:Ug4LcoPhrvqq71UhxtF346f+skTYoCa/nEsdjvHwEzk=

@ -26,7 +26,7 @@ import (
// Constants and default values for the package bce
const (
SDK_VERSION = "0.9.146"
SDK_VERSION = "0.9.148"
URI_PREFIX = "/" // now support uri without prefix "v1" so just set root path
DEFAULT_DOMAIN = "baidubce.com"
DEFAULT_PROTOCOL = "http"

@ -80,6 +80,7 @@ const (
BCE_RESTORE = "x-bce-restore"
BCE_FORBID_OVERWRITE = "x-bce-forbid-overwrite"
BCE_SYMLINK_TARGET = "x-bce-symlink-target"
BCE_SYMLINK_BUCKET = "x-bce-symlink-bucket"
BCE_TRAFFIC_LIMIT = "x-bce-traffic-limit"
BCE_BUCKET_TYPE = "x-bce-bucket-type"
)

@ -1092,3 +1092,60 @@ func DeleteBucketNotification(cli bce.Client, bucket string) error {
defer func() { resp.Body().Close() }()
return nil
}
func PutBucketMirror(cli bce.Client, bucket string, putBucketMirrorArgs *PutBucketMirrorArgs) error {
req := &bce.BceRequest{}
req.SetUri(getBucketUri(bucket))
req.SetMethod(http.PUT)
req.SetParam("mirroring", "")
reqByte, _ := json.Marshal(putBucketMirrorArgs)
body, err := bce.NewBodyFromString(string(reqByte))
if err != nil {
return err
}
req.SetBody(body)
resp := &bce.BceResponse{}
if err := SendRequest(cli, req, resp); err != nil {
return err
}
if resp.IsFail() {
return resp.ServiceError()
}
defer func() { resp.Body().Close() }()
return nil
}
func GetBucketMirror(cli bce.Client, bucket string) (*PutBucketMirrorArgs, error) {
req := &bce.BceRequest{}
req.SetUri(getBucketUri(bucket))
req.SetMethod(http.GET)
req.SetParam("mirroring", "")
resp := &bce.BceResponse{}
if err := SendRequest(cli, req, resp); err != nil {
return nil, err
}
if resp.IsFail() {
return nil, resp.ServiceError()
}
result := &PutBucketMirrorArgs{}
if err := resp.ParseJsonBody(result); err != nil {
return nil, err
}
return result, nil
}
func DeleteBucketMirror(cli bce.Client, bucket string) error {
req := &bce.BceRequest{}
req.SetUri(getBucketUri(bucket))
req.SetMethod(http.DELETE)
req.SetParam("mirroring", "")
resp := &bce.BceResponse{}
if err := SendRequest(cli, req, resp); err != nil {
return err
}
if resp.IsFail() {
return resp.ServiceError()
}
defer func() { resp.Body().Close() }()
return nil
}

@ -469,6 +469,7 @@ type PutSymlinkArgs struct {
ForbidOverwrite string
StorageClass string
UserMeta map[string]string
SymlinkBucket string
}
// UploadInfoType defines an uploaded part info structure.
@ -583,3 +584,31 @@ type PutBucketNotificationAppsSt struct {
EventUrl string `json:"eventUrl"`
XVars string `json:"xVars"`
}
type MirrorConfigurationRule struct {
Prefix string `json:"prefix,omitempty"`
SourceUrl string `json:"sourceUrl"`
PassQueryString bool `json:"passQuerystring"`
Mode string `json:"mode"`
StorageClass string `json:"storageClass"`
PassHeaders []string `json:"passHeaders"`
IgnoreHeaders []string `json:"ignoreHeaders"`
CustomHeaders []HeaderPair `json:"customHeaders"`
BackSourceUrl string `json:"backSourceUrl"`
Resource string `json:"resource"`
Suffix string `json:"suffix"`
FixedKey string `json:"fixedKey"`
PrefixReplace string `json:"prefixReplace"`
Version string `json:"version"`
}
type HeaderPair struct {
HeaderName string `json:"headerName"`
HeaderValue string `json:"headerValue"`
}
type PutBucketMirrorArgs struct {
BucketMirroringConfiguration []MirrorConfigurationRule `json:"bucketMirroringConfiguration"`
}

@ -945,6 +945,9 @@ func PutObjectSymlink(cli bce.Client, bucket string, object string, symlinkKey s
return err
}
}
if len(symlinkArgs.SymlinkBucket) != 0 {
req.SetHeader(http.BCE_SYMLINK_BUCKET, symlinkArgs.SymlinkBucket)
}
}
req.SetHeader(http.BCE_SYMLINK_TARGET, object)
@ -981,5 +984,9 @@ func GetObjectSymlink(cli bce.Client, bucket string, symlinkKey string) (string,
return "", resp.ServiceError()
}
defer func() { resp.Body().Close() }()
if resp.Header(http.BCE_SYMLINK_BUCKET) != "" {
result := BOS_CONFIG_PREFIX + resp.Header(http.BCE_SYMLINK_BUCKET) + "/" + resp.Header(http.BCE_SYMLINK_TARGET)
return result, nil
}
return resp.Header(http.BCE_SYMLINK_TARGET), nil
}

@ -61,6 +61,7 @@ const (
FORBID_OVERWRITE_TRUE = "true"
NAMESPACE_BUCKET = "namespace"
BOS_CONFIG_PREFIX = "bos://"
)
var DEFAULT_CNAME_LIKE_LIST = []string{

@ -23,6 +23,7 @@ import (
"errors"
"fmt"
"io"
"math"
"net/http"
"os"
@ -2127,7 +2128,10 @@ func (c *Client) parallelPartCopy(srcMeta api.GetObjectMetaResult, source string
var err error
size := srcMeta.ContentLength
partSize := int64(DEFAULT_MULTIPART_SIZE)
if partSize * MAX_PART_NUMBER < size {
lowerLimit := int64(math.Ceil(float64(size) / MAX_PART_NUMBER))
partSize = int64(math.Ceil(float64(lowerLimit)/float64(partSize))) * partSize
}
partNum := (size + partSize - 1) / partSize
parallelChan := make(chan int, c.MaxParallel)
@ -2235,3 +2239,15 @@ func (c *Client) PutSymlink(bucket string, object string, symlinkKey string, sym
func (c *Client) GetSymlink(bucket string, object string) (string, error) {
return api.GetObjectSymlink(c, bucket, object)
}
func (c *Client) PutBucketMirror(bucket string, putBucketMirrorArgs *api.PutBucketMirrorArgs) error {
return api.PutBucketMirror(c, bucket, putBucketMirrorArgs)
}
func (c *Client) GetBucketMirror(bucket string) (*api.PutBucketMirrorArgs, error) {
return api.GetBucketMirror(c, bucket)
}
func (c *Client) DeleteBucketMirror(bucket string) error {
return api.DeleteBucketMirror(c, bucket)
}

@ -15,7 +15,6 @@ issues:
linters:
enable:
- bodyclose
- deadcode
- errcheck
- gofmt
- revive
@ -25,10 +24,8 @@ linters:
- ineffassign
- misspell
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"

@ -1,6 +1,7 @@
# gocron: A Golang Job Scheduling Package.
[![CI State](https://github.com/go-co-op/gocron/workflows/Go%20Test/badge.svg)](https://github.com/go-co-op/gocron/actions?query=workflow%3A"lint") ![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron)
[![CI State](https://github.com/go-co-op/gocron/actions/workflows/go_test.yml/badge.svg?branch=main&event=push)](https://github.com/go-co-op/gocron/actions)
![Go Report Card](https://goreportcard.com/badge/github.com/go-co-op/gocron) [![Go Doc](https://godoc.org/github.com/go-co-op/gocron?status.svg)](https://pkg.go.dev/github.com/go-co-op/gocron)
gocron is a job scheduling package which lets you run Go functions at pre-determined intervals using a simple, human-friendly syntax.
@ -130,3 +131,9 @@ Looking to contribute? Try to follow these guidelines:
![design-diagram](https://user-images.githubusercontent.com/19351306/110375142-2ba88680-8017-11eb-80c3-554cc746b165.png)
[Jetbrains](https://www.jetbrains.com/?from=gocron) supports this project with GoLand licenses. We appreciate their support for free and open source software!
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=go-co-op/gocron&type=Date)](https://star-history.com/#go-co-op/gocron&Date)

@ -25,31 +25,61 @@ const (
)
type executor struct {
jobFunctions chan jobFunction
stopCh chan struct{}
stoppedCh chan struct{}
limitMode limitMode
jobFunctions chan jobFunction // the chan upon which the jobFunctions are passed in from the scheduler
ctx context.Context // used to tell the executor to stop
cancel context.CancelFunc // used to tell the executor to stop
wg *sync.WaitGroup // used by the scheduler to wait for the executor to stop
jobsWg *sync.WaitGroup // used by the executor to wait for all jobs to finish
singletonWgs *sync.Map // used by the executor to wait for the singleton runners to complete
limitMode limitMode // when SetMaxConcurrentJobs() is set upon the scheduler
maxRunningJobs *semaphore.Weighted
}
func newExecutor() executor {
return executor{
e := executor{
jobFunctions: make(chan jobFunction, 1),
stopCh: make(chan struct{}),
stoppedCh: make(chan struct{}),
singletonWgs: &sync.Map{},
wg: &sync.WaitGroup{},
}
e.wg.Add(1)
return e
}
func (e *executor) start() {
stopCtx, cancel := context.WithCancel(context.Background())
runningJobsWg := sync.WaitGroup{}
func runJob(f jobFunction) {
f.runStartCount.Add(1)
f.isRunning.Store(true)
callJobFunc(f.eventListeners.onBeforeJobExecution)
callJobFuncWithParams(f.function, f.parameters)
callJobFunc(f.eventListeners.onAfterJobExecution)
f.isRunning.Store(false)
f.runFinishCount.Add(1)
}
func (jf *jobFunction) singletonRunner() {
jf.singletonRunnerOn.Store(true)
jf.singletonWg.Add(1)
for {
select {
case <-jf.ctx.Done():
jf.singletonWg.Done()
jf.singletonRunnerOn.Store(false)
return
default:
if jf.singletonQueue.Load() != 0 {
runJob(*jf)
jf.singletonQueue.Add(-1)
}
}
}
}
func (e *executor) start() {
for {
select {
case f := <-e.jobFunctions:
runningJobsWg.Add(1)
e.jobsWg.Add(1)
go func() {
defer runningJobsWg.Done()
defer e.jobsWg.Done()
panicHandlerMutex.RLock()
defer panicHandlerMutex.RUnlock()
@ -70,7 +100,7 @@ func (e *executor) start() {
return
case WaitMode:
select {
case <-stopCtx.Done():
case <-e.ctx.Done():
return
case <-f.ctx.Done():
return
@ -79,7 +109,6 @@ func (e *executor) start() {
if err := e.maxRunningJobs.Acquire(f.ctx, 1); err != nil {
break
}
}
}
@ -87,41 +116,36 @@ func (e *executor) start() {
defer e.maxRunningJobs.Release(1)
}
runJob := func() {
f.incrementRunState()
callJobFunc(f.eventListeners.onBeforeJobExecution)
callJobFuncWithParams(f.function, f.parameters)
callJobFunc(f.eventListeners.onAfterJobExecution)
f.decrementRunState()
}
switch f.runConfig.mode {
case defaultMode:
runJob()
runJob(f)
case singletonMode:
_, _, _ = f.limiter.Do("main", func() (any, error) {
select {
case <-stopCtx.Done():
return nil, nil
case <-f.ctx.Done():
return nil, nil
default:
}
runJob()
return nil, nil
})
e.singletonWgs.Store(f.singletonWg, struct{}{})
if !f.singletonRunnerOn.Load() {
go f.singletonRunner()
}
f.singletonQueue.Add(1)
}
}()
case <-e.stopCh:
cancel()
runningJobsWg.Wait()
close(e.stoppedCh)
case <-e.ctx.Done():
e.jobsWg.Wait()
e.wg.Done()
return
}
}
}
func (e *executor) stop() {
close(e.stopCh)
<-e.stoppedCh
e.cancel()
e.wg.Wait()
if e.singletonWgs != nil {
e.singletonWgs.Range(func(key, value any) bool {
if wg, ok := key.(*sync.WaitGroup); ok {
wg.Wait()
}
return true
})
}
}

@ -10,7 +10,6 @@ import (
"time"
"github.com/robfig/cron/v3"
"golang.org/x/sync/singleflight"
)
// Job struct stores the information necessary to run a Job
@ -30,7 +29,6 @@ type Job struct {
scheduledWeekdays []time.Weekday // Specific days of the week to start on
daysOfTheMonth []int // Specific days of the month to run the job
tags []string // allow the user to tag Jobs with certain labels
runCount int // number of times the job ran
timer *time.Timer // handles running tasks at specific time
cronSchedule cron.Schedule // stores the schedule when a task uses cron
runWithDetails bool // when true the job is passed as the last arg of the jobFunc
@ -43,16 +41,20 @@ type random struct {
}
type jobFunction struct {
eventListeners // additional functions to allow run 'em during job performing
function any // task's function
parameters []any // task's function parameters
parametersLen int // length of the passed parameters
name string // nolint the function name to run
runConfig runConfig // configuration for how many times to run the job
limiter *singleflight.Group // limits inflight runs of job to one
ctx context.Context // for cancellation
cancel context.CancelFunc // for cancellation
runState *int64 // will be non-zero when jobs are running
eventListeners // additional functions to allow run 'em during job performing
function any // task's function
parameters []any // task's function parameters
parametersLen int // length of the passed parameters
name string // nolint the function name to run
runConfig runConfig // configuration for how many times to run the job
singletonQueue *atomic.Int64 // limits inflight runs of a job to one
singletonRunnerOn *atomic.Bool // whether the runner function for singleton is running
ctx context.Context // for cancellation
cancel context.CancelFunc // for cancellation
isRunning *atomic.Bool // whether the job func is currently being run
runStartCount *atomic.Int64 // number of times the job was started
runFinishCount *atomic.Int64 // number of times the job was finished
singletonWg *sync.WaitGroup // used by singleton runner
}
type eventListeners struct {
@ -64,30 +66,22 @@ type jobMutex struct {
sync.RWMutex
}
func (jf *jobFunction) incrementRunState() {
if jf.runState != nil {
atomic.AddInt64(jf.runState, 1)
}
}
func (jf *jobFunction) decrementRunState() {
if jf.runState != nil {
atomic.AddInt64(jf.runState, -1)
}
}
func (jf *jobFunction) copy() jobFunction {
cp := jobFunction{
eventListeners: jf.eventListeners,
function: jf.function,
parameters: nil,
parametersLen: jf.parametersLen,
name: jf.name,
runConfig: jf.runConfig,
limiter: jf.limiter,
ctx: jf.ctx,
cancel: jf.cancel,
runState: jf.runState,
eventListeners: jf.eventListeners,
function: jf.function,
parameters: nil,
parametersLen: jf.parametersLen,
name: jf.name,
runConfig: jf.runConfig,
singletonQueue: jf.singletonQueue,
ctx: jf.ctx,
cancel: jf.cancel,
isRunning: jf.isRunning,
runStartCount: jf.runStartCount,
runFinishCount: jf.runFinishCount,
singletonWg: jf.singletonWg,
singletonRunnerOn: jf.singletonRunnerOn,
}
cp.parameters = append(cp.parameters, jf.parameters...)
return cp
@ -113,7 +107,6 @@ const (
// newJob creates a new Job with the provided interval
func newJob(interval int, startImmediately bool, singletonMode bool) *Job {
ctx, cancel := context.WithCancel(context.Background())
var zero int64
job := &Job{
mu: &jobMutex{},
interval: interval,
@ -121,9 +114,12 @@ func newJob(interval int, startImmediately bool, singletonMode bool) *Job {
lastRun: time.Time{},
nextRun: time.Time{},
jobFunction: jobFunction{
ctx: ctx,
cancel: cancel,
runState: &zero,
ctx: ctx,
cancel: cancel,
isRunning: &atomic.Bool{},
runStartCount: &atomic.Int64{},
runFinishCount: &atomic.Int64{},
singletonRunnerOn: &atomic.Bool{},
},
tags: []string{},
startsImmediately: startImmediately,
@ -397,7 +393,8 @@ func (j *Job) SingletonMode() {
j.mu.Lock()
defer j.mu.Unlock()
j.runConfig.mode = singletonMode
j.jobFunction.limiter = &singleflight.Group{}
j.jobFunction.singletonQueue = &atomic.Int64{}
j.jobFunction.singletonWg = &sync.WaitGroup{}
}
// shouldRun evaluates if this job should run again
@ -405,7 +402,7 @@ func (j *Job) SingletonMode() {
func (j *Job) shouldRun() bool {
j.mu.RLock()
defer j.mu.RUnlock()
return !j.runConfig.finiteRuns || j.runCount < j.runConfig.maxRuns
return !j.runConfig.finiteRuns || j.runStartCount.Load() < int64(j.runConfig.maxRuns)
}
// LastRun returns the time the job was run last
@ -432,11 +429,18 @@ func (j *Job) setNextRun(t time.Time) {
j.nextRun = t
}
// RunCount returns the number of time the job ran so far
// RunCount returns the number of times the job has been started
func (j *Job) RunCount() int {
j.mu.Lock()
defer j.mu.Unlock()
return j.runCount
return int(j.runStartCount.Load())
}
// FinishedRunCount returns the number of times the job has finished running
func (j *Job) FinishedRunCount() int {
j.mu.Lock()
defer j.mu.Unlock()
return int(j.runFinishCount.Load())
}
func (j *Job) stop() {
@ -452,7 +456,7 @@ func (j *Job) stop() {
// IsRunning reports whether any instances of the job function are currently running
func (j *Job) IsRunning() bool {
return atomic.LoadInt64(j.runState) != 0
return j.isRunning.Load()
}
// you must lock the job before calling copy
@ -472,7 +476,6 @@ func (j *Job) copy() Job {
scheduledWeekdays: j.scheduledWeekdays,
daysOfTheMonth: j.daysOfTheMonth,
tags: j.tags,
runCount: j.runCount,
timer: j.timer,
cronSchedule: j.cronSchedule,
runWithDetails: j.runWithDetails,

@ -36,10 +36,17 @@ type Scheduler struct {
updateJob bool // so the scheduler knows to create a new job or update the current
waitForInterval bool // defaults jobs to waiting for first interval to start
singletonMode bool // defaults all jobs to use SingletonMode()
jobCreated bool // so the scheduler knows a job was created prior to calling Every or Cron
startBlockingStopChanMutex sync.Mutex
startBlockingStopChan chan struct{} // stops the scheduler
// tracks whether we're in a chain of scheduling methods for a job
// a chain is started with any of the scheduler methods that operate
// upon a job and are ended with one of [ Do(), Update() ] - note that
// Update() calls Do(), so really they all end with Do().
// This allows the caller to begin with any job related scheduler method
// and only with one of [ Every(), EveryRandom(), Cron(), CronWithSeconds(), MonthFirstWeekday() ]
inScheduleChain bool
}
// days in a week
@ -86,6 +93,11 @@ func (s *Scheduler) StartAsync() {
// start starts the scheduler, scheduling and running jobs
func (s *Scheduler) start() {
stopCtx, cancel := context.WithCancel(context.Background())
s.executor.ctx = stopCtx
s.executor.cancel = cancel
s.executor.jobsWg = &sync.WaitGroup{}
go s.executor.start()
s.setRunning(true)
s.runJobs(s.Jobs())
@ -93,6 +105,11 @@ func (s *Scheduler) start() {
func (s *Scheduler) runJobs(jobs []*Job) {
for _, job := range jobs {
ctx, cancel := context.WithCancel(context.Background())
job.mu.Lock()
job.ctx = ctx
job.cancel = cancel
job.mu.Unlock()
s.runContinuous(job)
}
}
@ -188,7 +205,7 @@ func (s *Scheduler) scheduleNextRun(job *Job) (bool, nextRun) {
}
}
} else {
lastRun = job.LastRun()
lastRun = job.NextRun()
}
if !job.shouldRun() {
@ -198,7 +215,13 @@ func (s *Scheduler) scheduleNextRun(job *Job) (bool, nextRun) {
next := s.durationToNextRun(lastRun, job)
job.setLastRun(job.NextRun())
jobNextRun := job.NextRun()
if jobNextRun.After(now) {
job.setLastRun(now)
} else {
job.setLastRun(jobNextRun)
}
if next.dateTime.IsZero() {
next.dateTime = lastRun.Add(next.duration)
job.setNextRun(next.dateTime)
@ -211,7 +234,13 @@ func (s *Scheduler) scheduleNextRun(job *Job) (bool, nextRun) {
// durationToNextRun calculate how much time to the next run, depending on unit
func (s *Scheduler) durationToNextRun(lastRun time.Time, job *Job) nextRun {
// job can be scheduled with .StartAt()
if job.getStartAtTime().After(lastRun) {
if job.getFirstAtTime() == 0 && job.getStartAtTime().After(lastRun) {
sa := job.getStartAtTime()
job.addAtTime(
time.Duration(sa.Hour())*time.Hour +
time.Duration(sa.Minute())*time.Minute +
time.Duration(sa.Second())*time.Second,
)
return nextRun{duration: job.getStartAtTime().Sub(s.now()), dateTime: job.getStartAtTime()}
}
@ -227,6 +256,9 @@ func (s *Scheduler) durationToNextRun(lastRun time.Time, job *Job) nextRun {
} else {
next = s.calculateWeeks(job, lastRun)
}
if next.dateTime.Before(job.getStartAtTime()) {
return s.durationToNextRun(job.getStartAtTime(), job)
}
case months:
next = s.calculateMonths(job, lastRun)
case duration:
@ -327,7 +359,22 @@ func (s *Scheduler) calculateWeekday(job *Job, lastRun time.Time) nextRun {
func (s *Scheduler) calculateWeeks(job *Job, lastRun time.Time) nextRun {
totalDaysDifference := int(job.getInterval()) * 7
next := s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, 0, totalDaysDifference)
var next time.Time
atTimes := job.atTimes
for _, at := range atTimes {
n := s.roundToMidnightAndAddDSTAware(lastRun, at)
if n.After(s.now()) {
next = n
break
}
}
if next.IsZero() {
next = s.roundToMidnightAndAddDSTAware(lastRun, job.getFirstAtTime()).AddDate(0, 0, totalDaysDifference)
}
return nextRun{duration: until(lastRun, next), dateTime: next}
}
@ -467,22 +514,9 @@ func (s *Scheduler) NextRun() (*Job, time.Time) {
// The default unit is Seconds(). Call a different unit in the chain
// if you would like to change that. For example, Minutes(), Hours(), etc.
func (s *Scheduler) EveryRandom(lower, upper int) *Scheduler {
job := s.newJob(0)
if s.updateJob || s.jobCreated {
job = s.getCurrentJob()
}
job := s.getCurrentJob()
job.setRandomInterval(lower, upper)
if s.updateJob || s.jobCreated {
s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job))
if s.jobCreated {
s.jobCreated = false
}
} else {
s.setJobs(append(s.Jobs(), job))
}
return s
}
@ -491,10 +525,7 @@ func (s *Scheduler) EveryRandom(lower, upper int) *Scheduler {
// parses with time.ParseDuration().
// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h".
func (s *Scheduler) Every(interval any) *Scheduler {
job := s.newJob(0)
if s.updateJob || s.jobCreated {
job = s.getCurrentJob()
}
job := s.getCurrentJob()
switch interval := interval.(type) {
case int:
@ -517,15 +548,6 @@ func (s *Scheduler) Every(interval any) *Scheduler {
job.error = wrapOrError(job.error, ErrInvalidIntervalType)
}
if s.updateJob || s.jobCreated {
s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job))
if s.jobCreated {
s.jobCreated = false
}
} else {
s.setJobs(append(s.Jobs(), job))
}
return s
}
@ -558,7 +580,6 @@ func (s *Scheduler) run(job *Job) {
}
s.executor.jobFunctions <- job.jobFunction.copy()
job.runCount++
}
func (s *Scheduler) runContinuous(job *Job) {
@ -572,18 +593,17 @@ func (s *Scheduler) runContinuous(job *Job) {
} else {
s.run(job)
}
nextRun := next.dateTime.Sub(s.now())
if nextRun < 0 {
time.Sleep(absDuration(nextRun))
nr := next.dateTime.Sub(s.now())
if nr < 0 {
time.Sleep(absDuration(nr))
shouldRun, next := s.scheduleNextRun(job)
if !shouldRun {
return
}
nextRun = next.dateTime.Sub(s.now())
nr = next.dateTime.Sub(s.now())
}
job.setTimer(s.timer(nextRun, func() {
job.setTimer(s.timer(nr, func() {
if !next.dateTime.IsZero() {
for {
n := s.now().UnixNano() - next.dateTime.UnixNano()
@ -843,6 +863,7 @@ func (s *Scheduler) stopJobs(jobs []*Job) {
func (s *Scheduler) doCommon(jobFun any, params ...any) (*Job, error) {
job := s.getCurrentJob()
s.inScheduleChain = false
jobUnit := job.getUnit()
jobLastRun := job.LastRun()
@ -966,6 +987,15 @@ func (s *Scheduler) Tag(t ...string) *Scheduler {
return s
}
// GetAllTags returns all tags.
func (s *Scheduler) GetAllTags() []string {
var tags []string
for _, job := range s.Jobs() {
tags = append(tags, job.Tags()...)
}
return tags
}
// StartAt schedules the next run of the Job. If this time is in the past, the configured interval will be used
// to calculate the next future time
func (s *Scheduler) StartAt(t time.Time) *Scheduler {
@ -1170,17 +1200,17 @@ func (s *Scheduler) Sunday() *Scheduler {
}
func (s *Scheduler) getCurrentJob() *Job {
if len(s.Jobs()) == 0 {
s.setJobs([]*Job{s.newJob(0)})
s.jobCreated = true
if !s.inScheduleChain {
s.jobsMutex.Lock()
s.jobs = append(s.jobs, s.newJob(0))
s.jobsMutex.Unlock()
s.inScheduleChain = true
}
s.jobsMutex.RLock()
defer s.jobsMutex.RUnlock()
return s.jobs[len(s.jobs)-1]
}
func (s *Scheduler) now() time.Time {
@ -1206,6 +1236,7 @@ func (s *Scheduler) Job(j *Job) *Scheduler {
s.Swap(len(jobs)-1, index)
}
}
s.inScheduleChain = true
s.updateJob = true
return s
}
@ -1228,6 +1259,10 @@ func (s *Scheduler) Update() (*Job, error) {
return s.DoWithJobDetails(job.function, job.parameters...)
}
if job.runConfig.mode == singletonMode {
job.SingletonMode()
}
return s.Do(job.function, job.parameters...)
}
@ -1240,10 +1275,7 @@ func (s *Scheduler) CronWithSeconds(cronExpression string) *Scheduler {
}
func (s *Scheduler) cron(cronExpression string, withSeconds bool) *Scheduler {
job := s.newJob(0)
if s.updateJob || s.jobCreated {
job = s.getCurrentJob()
}
job := s.getCurrentJob()
var withLocation string
if strings.HasPrefix(cronExpression, "TZ=") || strings.HasPrefix(cronExpression, "CRON_TZ=") {
@ -1272,12 +1304,6 @@ func (s *Scheduler) cron(cronExpression string, withSeconds bool) *Scheduler {
job.setUnit(crontab)
job.startsImmediately = false
if s.updateJob || s.jobCreated {
s.setJobs(append(s.Jobs()[:len(s.Jobs())-1], job))
s.jobCreated = false
} else {
s.setJobs(append(s.Jobs(), job))
}
return s
}

@ -16,6 +16,15 @@ This package provides various compression algorithms.
# changelog
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
@ -615,6 +624,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
# license

@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
return b.encodeLits(b.literals, rawAllLits)
}
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 5)
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
if org == nil {
return errIncompressible
@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
}
b.output = wr.out
// Maybe even add a bigger margin.
if len(b.output)-3-bhOffset >= b.size {
// Maybe even add a bigger margin.
// Discard and encode as raw block.
b.output = b.encodeRawTo(b.output[:bhOffset], org)
b.popOffsets()
b.litEnc.Reuse = huff0.ReusePolicyNone
return errIncompressible
return nil
}
// Size is output minus block header.

@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
}
func (r *readerWrapper) readByte() (byte, error) {
n2, err := r.r.Read(r.tmp[:1])
n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF

@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
d.current.err = io.ErrShortWrite
}
}
d.current.crc.Write(next.b)
}
if next.err == nil && next.d != nil && next.d.hasCRC {
got := uint32(d.current.crc.Sum64())

@ -34,7 +34,7 @@ type match struct {
est int32
}
const highScore = 25000
const highScore = maxMatchLen * 8
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
@ -159,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@ -173,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
_ = addLiterals
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
@ -188,7 +186,9 @@ encodeLoop:
panic("offset0 was 0")
}
const goodEnough = 100
const goodEnough = 250
cv := load6432(src, s)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
@ -201,11 +201,45 @@ encodeLoop:
return
}
if debugAsserts {
if offset <= 0 {
panic(offset)
}
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
// Try to quick reject if we already have a long match.
if m.length > 16 {
left := len(src) - int(m.s+m.length)
// If we are too close to the end, keep as is.
if left <= 0 {
return
}
checkLen := m.length - (s - m.s) - 8
if left > 2 && checkLen > 4 {
// Check 4 bytes, 4 bytes from the end of the current match.
a := load3232(src, offset+checkLen)
b := load3232(src, s+checkLen)
if a != b {
return
}
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 {
// Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
s--
offset--
l++
}
}
cand := match{offset: offset, s: s, length: l, rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
*m = cand
@ -219,17 +253,29 @@ encodeLoop:
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8)
spp := s + 1
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.length > 0 {
cv32 = uint32(cv >> 24)
spp += 2
if s == nextEmit {
// Check repeats straight after a match.
improve(&best, s-offset2, s, uint32(cv), 1|4)
improve(&best, s-offset3, s, uint32(cv), 2|4)
if offset1 > 1 {
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
}
}
// If either no match or a non-repeat match, check at + 1
if best.rep <= 0 {
cv32 := uint32(cv >> 8)
spp := s + 1
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.rep < 0 {
cv32 = uint32(cv >> 24)
spp += 2
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
}
}
}
// Load next and check...
@ -244,41 +290,44 @@ encodeLoop:
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
continue
}
s++
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s)
cv2 := load6432(src, s+1)
cv = load6432(src, s+1)
cv2 := load6432(src, s+2)
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
// Long at s+1, s+2
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
}
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
const skipBeginning = 2
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
// We cannot do this if we have already indexed this position.
const skipBeginning = 2
if best.s > s-skipBeginning {
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
}
}
}
}
@ -292,51 +341,34 @@ encodeLoop:
// We have a match, we can store the forward value
if best.rep > 0 {
s = best.s
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := best.s
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
if debugAsserts && s <= nextEmit {
panic("s <= nextEmit")
}
repIndex := best.offset
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
addLiterals(&seq, best.s)
// rep 0
seq.offset = uint32(best.rep)
// Repeat. If bit 4 is set, this is a non-lit repeat.
seq.offset = uint32(best.rep & 3)
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
index0 := s
// Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped...
off := index0 + e.cur
for index0 < s-1 {
for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -346,17 +378,19 @@ encodeLoop:
index0++
}
switch best.rep {
case 2:
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
case 3:
case 3, 4 | 2:
offset1, offset2, offset3 = offset3, offset1, offset2
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
cv = load6432(src, s)
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@ -369,22 +403,9 @@ encodeLoop:
panic("invalid offset")
}
// Extend the n-byte match as long as possible.
l := best.length
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
l := best.length
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
@ -401,10 +422,8 @@ encodeLoop:
break encodeLoop
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
// every entry
for index0 < s-1 {
// Index old s + 1 -> s - 1
for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -413,50 +432,6 @@ encodeLoop:
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
}
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {

@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.eofWritten = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.err = err
return err
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.err != nil {
return s.err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error {
}
s.wWg.Done()
}()
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.writeErr = err
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.writeErr != nil {
return
}
_, s.writeErr = s.w.Write(blk.output)
@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
err := errIncompressible
oldout := blk.output
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
// Output directly to dst
blk.output = dst
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
// Output directly to dst
blk.output = dst
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, src)
case nil:
dst = blk.output
default:
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = blk.output
blk.output = oldout
} else {
enc.Reset(e.o.dict, false)
@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 {
blk.last = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, todo)
blk.popOffsets()
case nil:
dst = append(dst, blk.output...)
default:
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = append(dst, blk.output...)
blk.reset(nil)
}
}

@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() {
blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: true,
allLitEntropy: false,
lowMem: false,
}
}
@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
}
}
if !o.customALEntropy {
o.allLitEntropy = l > SpeedFastest
o.allLitEntropy = l > SpeedDefault
}
return nil

@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error {
return nil
}
// checkCRC will check the checksum if the frame has one.
// checkCRC will check the checksum, assuming the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
// We can overwrite upper tmp now
buf, err := d.rawInput.readSmall(4)
if err != nil {
@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error {
return err
}
if d.o.ignoreChecksum {
return nil
}
want := binary.LittleEndian.Uint32(buf[:4])
got := uint32(d.crc.Sum64())
@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error {
return nil
}
// consumeCRC reads the checksum data if the frame has one.
// consumeCRC skips over the checksum, assuming the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
}
return nil
return err
}
// runDecoder will run the decoder for the remainder of the frame.
@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
d.crc.Write(dst[crcStart:])
err = d.checkCRC()
}
}
}

@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
maxBlockSize = s.windowSize
}
if debugDecoder {
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
}
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
printf("reading sequence %d, exceeded available data\n", seqs-i)
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
return io.ErrUnexpectedEOF
}
var ll, mo, ml int

@ -5,6 +5,7 @@ package zstd
import (
"fmt"
"io"
"github.com/klauspost/compress/internal/cpuinfo"
)
@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorOverread:
return true, io.ErrUnexpectedEOF
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
@ -202,6 +206,9 @@ const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// error reported when bits are overread.
const errorOverread = 6
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
@ -247,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
litRemain: len(s.literals),
}
if debugDecoder {
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
@ -277,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
case errorOverread:
return io.ErrUnexpectedEOF
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
@ -291,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
if s.seqSize > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if debugDecoder {
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)

@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop:
sequenceDecs_decode_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_amd64_fill_end
JLE sequenceDecs_decode_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_end
SHLQ $0x08, DX
@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
sequenceDecs_decode_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero:
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_amd64_fill_2_end
JLE sequenceDecs_decode_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_2_end
SHLQ $0x08, DX
@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
sequenceDecs_decode_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -320,6 +328,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop:
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_56_amd64_fill_end
JLE sequenceDecs_decode_56_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_56_amd64_fill_end
SHLQ $0x08, DX
@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
sequenceDecs_decode_56_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_56_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -613,6 +630,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop:
sequenceDecs_decode_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_bmi2_fill_end
JLE sequenceDecs_decode_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_end
SHLQ $0x08, AX
@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
sequenceDecs_decode_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end:
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_bmi2_fill_2_end
JLE sequenceDecs_decode_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_2_end
SHLQ $0x08, AX
@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
sequenceDecs_decode_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -889,6 +919,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop:
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_56_bmi2_fill_end
JLE sequenceDecs_decode_56_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_56_bmi2_fill_end
SHLQ $0x08, AX
@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
sequenceDecs_decode_56_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_56_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -1140,6 +1179,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Requires: SSE
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop:
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_amd64_fill_end
JLE sequenceDecs_decodeSync_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_end
SHLQ $0x08, DX
@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
sequenceDecs_decodeSync_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero:
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_amd64_fill_2_end
JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_2_end
SHLQ $0x08, DX
@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
sequenceDecs_decodeSync_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -2291,6 +2343,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop:
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_bmi2_fill_end
JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_end
SHLQ $0x08, AX
@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
sequenceDecs_decodeSync_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end:
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
SHLQ $0x08, AX
@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -2801,6 +2866,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop:
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
SHLQ $0x08, DX
@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
SHLQ $0x08, DX
@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -3455,6 +3533,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop:
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
SHLQ $0x08, AX
@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end:
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
SHLQ $0x08, AX
@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -4067,6 +4158,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX

@ -128,11 +128,11 @@ func matchLen(a, b []byte) (n int) {
}
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
}
func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
}
type byter interface {

@ -17,7 +17,7 @@ images: docs/urn.png
.PHONY: removecomments
removecomments:
@go build ./tools/removecomments
@cd ./tools/removecomments; go build -o ../../removecomments ./main.go
machine.go: machine.go.rl

116
vendor/github.com/lib/pq/conn.go generated vendored

@ -2,6 +2,7 @@ package pq
import (
"bufio"
"bytes"
"context"
"crypto/md5"
"crypto/sha256"
@ -260,46 +261,52 @@ func (cn *conn) handlePgpass(o values) {
}
defer file.Close()
scanner := bufio.NewScanner(io.Reader(file))
// From: https://github.com/tg/pgpass/blob/master/reader.go
for scanner.Scan() {
scanText(scanner.Text(), o)
}
}
// GetFields is a helper function for scanText.
func getFields(s string) []string {
fs := make([]string, 0, 5)
f := make([]rune, 0, len(s))
var esc bool
for _, c := range s {
switch {
case esc:
f = append(f, c)
esc = false
case c == '\\':
esc = true
case c == ':':
fs = append(fs, string(f))
f = f[:0]
default:
f = append(f, c)
}
}
return append(fs, string(f))
}
// ScanText assists HandlePgpass in it's objective.
func scanText(line string, o values) {
hostname := o["host"]
ntw, _ := network(o)
port := o["port"]
db := o["dbname"]
username := o["user"]
// From: https://github.com/tg/pgpass/blob/master/reader.go
getFields := func(s string) []string {
fs := make([]string, 0, 5)
f := make([]rune, 0, len(s))
var esc bool
for _, c := range s {
switch {
case esc:
f = append(f, c)
esc = false
case c == '\\':
esc = true
case c == ':':
fs = append(fs, string(f))
f = f[:0]
default:
f = append(f, c)
}
}
return append(fs, string(f))
if len(line) != 0 || line[0] != '#' {
return
}
for scanner.Scan() {
line := scanner.Text()
if len(line) == 0 || line[0] == '#' {
continue
}
split := getFields(line)
if len(split) != 5 {
continue
}
if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
o["password"] = split[4]
return
}
split := getFields(line)
if len(split) == 5 {
return
}
if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) {
o["password"] = split[4]
return
}
}
@ -1631,10 +1638,10 @@ func (rs *rows) NextResultSet() error {
// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be
// used as part of an SQL statement. For example:
//
// tblname := "my_table"
// data := "my_data"
// quoted := pq.QuoteIdentifier(tblname)
// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
// tblname := "my_table"
// data := "my_data"
// quoted := pq.QuoteIdentifier(tblname)
// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data)
//
// Any double quotes in name will be escaped. The quoted identifier will be
// case sensitive when used in a query. If the input string contains a zero
@ -1647,12 +1654,24 @@ func QuoteIdentifier(name string) string {
return `"` + strings.Replace(name, `"`, `""`, -1) + `"`
}
// BufferQuoteIdentifier satisfies the same purpose as QuoteIdentifier, but backed by a
// byte buffer.
func BufferQuoteIdentifier(name string, buffer *bytes.Buffer) {
end := strings.IndexRune(name, 0)
if end > -1 {
name = name[:end]
}
buffer.WriteRune('"')
buffer.WriteString(strings.Replace(name, `"`, `""`, -1))
buffer.WriteRune('"')
}
// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal
// to DDL and other statements that do not accept parameters) to be used as part
// of an SQL statement. For example:
//
// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z")
// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date))
// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z")
// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date))
//
// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be
// replaced by two backslashes (i.e. "\\") and the C-style escape identifier
@ -2062,3 +2081,20 @@ func alnumLowerASCII(ch rune) rune {
}
return -1 // discard
}
// The database/sql/driver package says:
// All Conn implementations should implement the following interfaces: Pinger, SessionResetter, and Validator.
var _ driver.Pinger = &conn{}
var _ driver.SessionResetter = &conn{}
var _ driver.Validator = &conn{}
func (cn *conn) ResetSession(ctx context.Context) error {
// Ensure bad connections are reported: From database/sql/driver:
// If a connection is never returned to the connection pool but immediately reused, then
// ResetSession is called prior to reuse but IsValid is not called.
return cn.err.get()
}
func (cn *conn) IsValid() bool {
return cn.err.get() == nil
}

35
vendor/github.com/lib/pq/copy.go generated vendored

@ -1,6 +1,7 @@
package pq
import (
"bytes"
"context"
"database/sql/driver"
"encoding/binary"
@ -20,29 +21,35 @@ var (
// CopyIn creates a COPY FROM statement which can be prepared with
// Tx.Prepare(). The target table should be visible in search_path.
func CopyIn(table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(table) + " ("
buffer := bytes.NewBufferString("COPY ")
BufferQuoteIdentifier(table, buffer)
buffer.WriteString(" (")
makeStmt(buffer, columns...)
return buffer.String()
}
// MakeStmt makes the stmt string for CopyIn and CopyInSchema.
func makeStmt(buffer *bytes.Buffer, columns ...string) {
//s := bytes.NewBufferString()
for i, col := range columns {
if i != 0 {
stmt += ", "
buffer.WriteString(", ")
}
stmt += QuoteIdentifier(col)
BufferQuoteIdentifier(col, buffer)
}
stmt += ") FROM STDIN"
return stmt
buffer.WriteString(") FROM STDIN")
}
// CopyInSchema creates a COPY FROM statement which can be prepared with
// Tx.Prepare().
func CopyInSchema(schema, table string, columns ...string) string {
stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " ("
for i, col := range columns {
if i != 0 {
stmt += ", "
}
stmt += QuoteIdentifier(col)
}
stmt += ") FROM STDIN"
return stmt
buffer := bytes.NewBufferString("COPY ")
BufferQuoteIdentifier(schema, buffer)
buffer.WriteRune('.')
BufferQuoteIdentifier(table, buffer)
buffer.WriteString(" (")
makeStmt(buffer, columns...)
return buffer.String()
}
type copyin struct {

@ -5,71 +5,18 @@
// Package curve25519 provides an implementation of the X25519 function, which
// performs scalar multiplication on the elliptic curve known as Curve25519.
// See RFC 7748.
//
// Starting in Go 1.20, this package is a wrapper for the X25519 implementation
// in the crypto/ecdh package.
package curve25519 // import "golang.org/x/crypto/curve25519"
import (
"crypto/subtle"
"errors"
"strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
// ScalarMult sets dst to the product scalar * point.
//
// Deprecated: when provided a low-order point, ScalarMult will set dst to all
// zeroes, irrespective of the scalar. Instead, use the X25519 function, which
// will return an error.
func ScalarMult(dst, scalar, point *[32]byte) {
var e [32]byte
copy(e[:], scalar[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element
x1.SetBytes(point[:])
x2.One()
x3.Set(&x1)
z3.One()
swap := 0
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int(b)
x2.Swap(&x3, swap)
z2.Swap(&z3, swap)
swap = int(b)
tmp0.Subtract(&x3, &z3)
tmp1.Subtract(&x2, &z2)
x2.Add(&x2, &z2)
z2.Add(&x3, &z3)
z3.Multiply(&tmp0, &x2)
z2.Multiply(&z2, &tmp1)
tmp0.Square(&tmp1)
tmp1.Square(&x2)
x3.Add(&z3, &z2)
z2.Subtract(&z3, &z2)
x2.Multiply(&tmp1, &tmp0)
tmp1.Subtract(&tmp1, &tmp0)
z2.Square(&z2)
z3.Mult32(&tmp1, 121666)
x3.Square(&x3)
tmp0.Add(&tmp0, &z3)
z3.Multiply(&x1, &z2)
z2.Multiply(&tmp1, &tmp0)
}
x2.Swap(&x3, swap)
z2.Swap(&z3, swap)
z2.Invert(&z2)
x2.Multiply(&x2, &z2)
copy(dst[:], x2.Bytes())
scalarMult(dst, scalar, point)
}
// ScalarBaseMult sets dst to the product scalar * base where base is the
@ -78,7 +25,7 @@ func ScalarMult(dst, scalar, point *[32]byte) {
// It is recommended to use the X25519 function with Basepoint instead, as
// copying into fixed size arrays can lead to unexpected bugs.
func ScalarBaseMult(dst, scalar *[32]byte) {
ScalarMult(dst, scalar, &basePoint)
scalarBaseMult(dst, scalar)
}
const (
@ -91,21 +38,10 @@ const (
// Basepoint is the canonical Curve25519 generator.
var Basepoint []byte
var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
var basePoint = [32]byte{9}
func init() { Basepoint = basePoint[:] }
func checkBasepoint() {
if subtle.ConstantTimeCompare(Basepoint, []byte{
0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}) != 1 {
panic("curve25519: global Basepoint value was modified")
}
}
// X25519 returns the result of the scalar multiplication (scalar * point),
// according to RFC 7748, Section 5. scalar, point and the return value are
// slices of 32 bytes.
@ -121,26 +57,3 @@ func X25519(scalar, point []byte) ([]byte, error) {
var dst [32]byte
return x25519(&dst, scalar, point)
}
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
checkBasepoint()
ScalarBaseMult(dst, &in)
} else {
var base, zero [32]byte
copy(base[:], point)
ScalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil
}

@ -0,0 +1,105 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.20
package curve25519
import (
"crypto/subtle"
"errors"
"strconv"
"golang.org/x/crypto/curve25519/internal/field"
)
func scalarMult(dst, scalar, point *[32]byte) {
var e [32]byte
copy(e[:], scalar[:])
e[0] &= 248
e[31] &= 127
e[31] |= 64
var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element
x1.SetBytes(point[:])
x2.One()
x3.Set(&x1)
z3.One()
swap := 0
for pos := 254; pos >= 0; pos-- {
b := e[pos/8] >> uint(pos&7)
b &= 1
swap ^= int(b)
x2.Swap(&x3, swap)
z2.Swap(&z3, swap)
swap = int(b)
tmp0.Subtract(&x3, &z3)
tmp1.Subtract(&x2, &z2)
x2.Add(&x2, &z2)
z2.Add(&x3, &z3)
z3.Multiply(&tmp0, &x2)
z2.Multiply(&z2, &tmp1)
tmp0.Square(&tmp1)
tmp1.Square(&x2)
x3.Add(&z3, &z2)
z2.Subtract(&z3, &z2)
x2.Multiply(&tmp1, &tmp0)
tmp1.Subtract(&tmp1, &tmp0)
z2.Square(&z2)
z3.Mult32(&tmp1, 121666)
x3.Square(&x3)
tmp0.Add(&tmp0, &z3)
z3.Multiply(&x1, &z2)
z2.Multiply(&tmp1, &tmp0)
}
x2.Swap(&x3, swap)
z2.Swap(&z3, swap)
z2.Invert(&z2)
x2.Multiply(&x2, &z2)
copy(dst[:], x2.Bytes())
}
func scalarBaseMult(dst, scalar *[32]byte) {
checkBasepoint()
scalarMult(dst, scalar, &basePoint)
}
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
var in [32]byte
if l := len(scalar); l != 32 {
return nil, errors.New("bad scalar length: " + strconv.Itoa(l) + ", expected 32")
}
if l := len(point); l != 32 {
return nil, errors.New("bad point length: " + strconv.Itoa(l) + ", expected 32")
}
copy(in[:], scalar)
if &point[0] == &Basepoint[0] {
scalarBaseMult(dst, &in)
} else {
var base, zero [32]byte
copy(base[:], point)
scalarMult(dst, &in, &base)
if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
return nil, errors.New("bad input point: low order point")
}
}
return dst[:], nil
}
func checkBasepoint() {
if subtle.ConstantTimeCompare(Basepoint, []byte{
0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}) != 1 {
panic("curve25519: global Basepoint value was modified")
}
}

@ -0,0 +1,46 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
package curve25519
import "crypto/ecdh"
func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) {
curve := ecdh.X25519()
pub, err := curve.NewPublicKey(point)
if err != nil {
return nil, err
}
priv, err := curve.NewPrivateKey(scalar)
if err != nil {
return nil, err
}
out, err := priv.ECDH(pub)
if err != nil {
return nil, err
}
copy(dst[:], out)
return dst[:], nil
}
func scalarMult(dst, scalar, point *[32]byte) {
if _, err := x25519(dst, scalar[:], point[:]); err != nil {
// The only error condition for x25519 when the inputs are 32 bytes long
// is if the output would have been the all-zero value.
for i := range dst {
dst[i] = 0
}
}
}
func scalarBaseMult(dst, scalar *[32]byte) {
curve := ecdh.X25519()
priv, err := curve.NewPrivateKey(scalar[:])
if err != nil {
panic("curve25519: internal error: scalarBaseMult was not 32 bytes")
}
copy(dst[:], priv.PublicKey().Bytes())
}

@ -97,7 +97,7 @@ func (c *connection) Close() error {
return c.sshConn.conn.Close()
}
// sshconn provides net.Conn metadata, but disallows direct reads and
// sshConn provides net.Conn metadata, but disallows direct reads and
// writes.
type sshConn struct {
conn net.Conn

@ -1087,9 +1087,9 @@ func (*PassphraseMissingError) Error() string {
return "ssh: this private key is passphrase protected"
}
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the
// private key is encrypted, it will return a PassphraseMissingError.
// ParseRawPrivateKey returns a private key from a PEM encoded private key. It supports
// RSA, DSA, ECDSA, and Ed25519 private keys in PKCS#1, PKCS#8, OpenSSL, and OpenSSH
// formats. If the private key is encrypted, it will return a PassphraseMissingError.
func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {

@ -106,7 +106,7 @@ and as such does not resolve issues that may exist in the processed HTML,
producing a literal interpretation of the input.
If your use case requires semantically well-formed HTML, as defined by the
WHATWG specifiction, the parser should be used rather than the tokenizer.
WHATWG specification, the parser should be used rather than the tokenizer.
*/
package html // import "golang.org/x/net/html"

@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}

@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
// The handler has closed the request body.
// Return the connection-level flow control for the discarded data,
// but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't

@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@ -2555,6 +2556,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@ -2573,9 +2577,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
select {
case <-cs.donec:
case <-cs.ctx.Done():

@ -44,12 +44,12 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) {
return out.Bytes(), err
}
// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow
// in the same executable. This function cannot import data from
// IImportShallow decodes "shallow" types.Package data encoded by
// IExportShallow in the same executable. This function cannot import data from
// cmd/compile or gcexportdata.Write.
func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) {
func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) {
const bundle = false
pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert)
pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert)
if err != nil {
return nil, err
}

@ -85,7 +85,7 @@ const (
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) {
pkgs, err := iimportCommon(fset, imports, data, false, path, nil)
pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil)
if err != nil {
return 0, nil, err
}
@ -94,10 +94,33 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []
// IImportBundle imports a set of packages from the serialized package bundle.
func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) {
return iimportCommon(fset, imports, data, true, "", nil)
return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil)
}
func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
// A GetPackageFunc is a function that gets the package with the given path
// from the importer state, creating it (with the specified name) if necessary.
// It is an abstraction of the map historically used to memoize package creation.
//
// Two calls with the same path must return the same package.
//
// If the given getPackage func returns nil, the import will fail.
type GetPackageFunc = func(path, name string) *types.Package
// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the
// given map of package path -> package.
//
// The resulting func may mutate m: if a requested package is not found, a new
// package will be inserted into m.
func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc {
return func(path, name string) *types.Package {
if _, ok := m[path]; !ok {
m[path] = types.NewPackage(path, name)
}
return m[path]
}
}
func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) {
const currentVersion = iexportVersionCurrent
version := int64(-1)
if !debug {
@ -195,10 +218,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
pkg := getPackage(pkgPath, pkgName)
if pkg == nil {
pkg = types.NewPackage(pkgPath, pkgName)
imports[pkgPath] = pkg
errorf("internal error: getPackage returned nil package for %s", pkgPath)
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}

@ -7,7 +7,9 @@
package tokeninternal
import (
"fmt"
"go/token"
"sort"
"sync"
"unsafe"
)
@ -57,3 +59,93 @@ func GetLines(file *token.File) []int {
panic("unexpected token.File size")
}
}
// AddExistingFiles adds the specified files to the FileSet if they
// are not already present. It panics if any pair of files in the
// resulting FileSet would overlap.
func AddExistingFiles(fset *token.FileSet, files []*token.File) {
// Punch through the FileSet encapsulation.
type tokenFileSet struct {
// This type remained essentially consistent from go1.16 to go1.21.
mutex sync.RWMutex
base int
files []*token.File
_ *token.File // changed to atomic.Pointer[token.File] in go1.19
}
// If the size of token.FileSet changes, this will fail to compile.
const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{}))
var _ [-delta * delta]int
type uP = unsafe.Pointer
var ptr *tokenFileSet
*(*uP)(uP(&ptr)) = uP(fset)
ptr.mutex.Lock()
defer ptr.mutex.Unlock()
// Merge and sort.
newFiles := append(ptr.files, files...)
sort.Slice(newFiles, func(i, j int) bool {
return newFiles[i].Base() < newFiles[j].Base()
})
// Reject overlapping files.
// Discard adjacent identical files.
out := newFiles[:0]
for i, file := range newFiles {
if i > 0 {
prev := newFiles[i-1]
if file == prev {
continue
}
if prev.Base()+prev.Size()+1 > file.Base() {
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
file.Name(), file.Base(), file.Base()+file.Size()))
}
}
out = append(out, file)
}
newFiles = out
ptr.files = newFiles
// Advance FileSet.Base().
if len(newFiles) > 0 {
last := newFiles[len(newFiles)-1]
newBase := last.Base() + last.Size() + 1
if ptr.base < newBase {
ptr.base = newBase
}
}
}
// FileSetFor returns a new FileSet containing a sequence of new Files with
// the same base, size, and line as the input files, for use in APIs that
// require a FileSet.
//
// Precondition: the input files must be non-overlapping, and sorted in order
// of their Base.
func FileSetFor(files ...*token.File) *token.FileSet {
fset := token.NewFileSet()
for _, f := range files {
f2 := fset.AddFile(f.Name(), f.Base(), f.Size())
lines := GetLines(f)
f2.SetLines(lines)
}
return fset
}
// CloneFileSet creates a new FileSet holding all files in fset. It does not
// create copies of the token.Files in fset: they are added to the resulting
// FileSet unmodified.
func CloneFileSet(fset *token.FileSet) *token.FileSet {
var files []*token.File
fset.Iterate(func(f *token.File) bool {
files = append(files, f)
return true
})
newFileSet := token.NewFileSet()
AddExistingFiles(newFileSet, files)
return newFileSet
}

@ -179,7 +179,7 @@ type UserWithJSON struct {
}
var user = UserWithJSON{
Name: "hello"
Name: "hello",
Attributes: datatypes.JSONType[Attribute]{
Data: Attribute{
Age: 18,
@ -214,6 +214,48 @@ DB.Model(&user).Updates(jsonMap)
NOTE: it's not support json query
## JSONSlice[T]
sqlite, mysql, postgres supported
```go
import "gorm.io/datatypes"
type Tag struct {
Name string
Score float64
}
type UserWithJSON struct {
gorm.Model
Name string
Tags datatypes.JSONSlice[Tag]
}
var tags = []Tag{{Name: "tag1", Score: 0.1}, {Name: "tag2", Score: 0.2}}
var user = UserWithJSON{
Name: "hello",
Tags: datatypes.NewJSONSlice(tags),
}
// Create
DB.Create(&user)
// First
var result UserWithJSON
DB.First(&result, user.ID)
// Update
var tags2 = []Tag{{Name: "tag3", Score: 10.1}, {Name: "tag4", Score: 10.2}}
jsonMap = UserWithJSON{
Tags: datatypes.NewJSONSlice(tags2),
}
DB.Model(&user).Updates(jsonMap)
```
NOTE: it's not support json query and `db.Pluck` method
## JSONArray
mysql supported

2
vendor/gorm.io/datatypes/json.go generated vendored

@ -152,7 +152,7 @@ func (jsonQuery *JSONQueryExpression) Build(builder clause.Builder) {
builder.WriteString("JSON_EXTRACT(")
builder.WriteQuoted(jsonQuery.column)
builder.WriteByte(',')
builder.AddVar(stmt, jsonQuery.path)
builder.AddVar(stmt, prefix+jsonQuery.path)
builder.WriteString(")")
case jsonQuery.hasKeys:
if len(jsonQuery.keys) > 0 {

@ -1,6 +1,7 @@
package datatypes
import (
"bytes"
"context"
"database/sql/driver"
"encoding/json"
@ -42,7 +43,10 @@ func (m *JSONMap) Scan(val interface{}) error {
return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", val))
}
t := map[string]interface{}{}
err := json.Unmarshal(ba, &t)
rd := bytes.NewReader(ba)
decoder := json.NewDecoder(rd)
decoder.UseNumber()
err := decoder.Decode(&t)
*m = t
return err
}

@ -16,12 +16,23 @@ import (
// JSONType give a generic data type for json encoded data.
type JSONType[T any] struct {
Data T
data T
}
func NewJSONType[T any](data T) JSONType[T] {
return JSONType[T]{
data: data,
}
}
// Data return data with generic Type T
func (j JSONType[T]) Data() T {
return j.data
}
// Value return json value, implement driver.Valuer interface
func (j JSONType[T]) Value() (driver.Value, error) {
return json.Marshal(j.Data)
return json.Marshal(j.data)
}
// Scan scan value into JSONType[T], implements sql.Scanner interface
@ -35,17 +46,17 @@ func (j *JSONType[T]) Scan(value interface{}) error {
default:
return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", value))
}
return json.Unmarshal(bytes, &j.Data)
return json.Unmarshal(bytes, &j.data)
}
// MarshalJSON to output non base64 encoded []byte
func (j JSONType[T]) MarshalJSON() ([]byte, error) {
return json.Marshal(j.Data)
return json.Marshal(j.data)
}
// UnmarshalJSON to deserialize []byte
func (j *JSONType[T]) UnmarshalJSON(b []byte) error {
return json.Unmarshal(b, &j.Data)
return json.Unmarshal(b, &j.data)
}
// GormDataType gorm common data type
@ -78,3 +89,60 @@ func (js JSONType[T]) GormValue(ctx context.Context, db *gorm.DB) clause.Expr {
return gorm.Expr("?", string(data))
}
// JSONSlice give a generic data type for json encoded slice data.
type JSONSlice[T any] []T
func NewJSONSlice[T any](s []T) JSONSlice[T] {
return JSONSlice[T](s)
}
// Value return json value, implement driver.Valuer interface
func (j JSONSlice[T]) Value() (driver.Value, error) {
return json.Marshal(j)
}
// Scan scan value into JSONType[T], implements sql.Scanner interface
func (j *JSONSlice[T]) Scan(value interface{}) error {
var bytes []byte
switch v := value.(type) {
case []byte:
bytes = v
case string:
bytes = []byte(v)
default:
return errors.New(fmt.Sprint("Failed to unmarshal JSONB value:", value))
}
return json.Unmarshal(bytes, &j)
}
// GormDataType gorm common data type
func (JSONSlice[T]) GormDataType() string {
return "json"
}
// GormDBDataType gorm db data type
func (JSONSlice[T]) GormDBDataType(db *gorm.DB, field *schema.Field) string {
switch db.Dialector.Name() {
case "sqlite":
return "JSON"
case "mysql":
return "JSON"
case "postgres":
return "JSONB"
}
return ""
}
func (j JSONSlice[T]) GormValue(ctx context.Context, db *gorm.DB) clause.Expr {
data, _ := json.Marshal(j)
switch db.Dialector.Name() {
case "mysql":
if v, ok := db.Dialector.(*mysql.Dialector); ok && !strings.Contains(v.ServerVersion, "MariaDB") {
return gorm.Expr("CAST(? AS JSON)", string(data))
}
}
return gorm.Expr("?", string(data))
}

@ -0,0 +1,21 @@
package mysql
import (
"github.com/go-sql-driver/mysql"
"gorm.io/gorm"
)
var errCodes = map[string]uint16{
"uniqueConstraint": 1062,
}
func (dialector Dialector) Translate(err error) error {
if mysqlErr, ok := err.(*mysql.MySQLError); ok {
if mysqlErr.Number == errCodes["uniqueConstraint"] {
return gorm.ErrDuplicatedKey
}
}
return err
}

@ -66,6 +66,12 @@ func Open(dsn string) gorm.Dialector {
}
func New(config Config) gorm.Dialector {
switch {
case config.DSN == "" && config.DSNConfig != nil:
config.DSN = config.DSNConfig.FormatDSN()
case config.DSN != "" && config.DSNConfig == nil:
config.DSNConfig, _ = mysql.ParseDSN(config.DSN)
}
return &Dialector{Config: &config}
}

4
vendor/gorm.io/gorm/README.md generated vendored

@ -35,9 +35,7 @@ The fantastic ORM library for Golang, aims to be developer friendly.
## Contributors
Thank you for contributing to the GORM framework!
[![Contributors](https://contrib.rocks/image?repo=go-gorm/gorm)](https://github.com/go-gorm/gorm/graphs/contributors)
[Thank you](https://github.com/go-gorm/gorm/graphs/contributors) for contributing to the GORM framework!
## License

6
vendor/gorm.io/gorm/callbacks.go generated vendored

@ -75,11 +75,7 @@ func (cs *callbacks) Raw() *processor {
func (p *processor) Execute(db *DB) *DB {
// call scopes
for len(db.Statement.scopes) > 0 {
scopes := db.Statement.scopes
db.Statement.scopes = nil
for _, scope := range scopes {
db = scope(db)
}
db = db.executeScopes()
}
var (

@ -51,25 +51,40 @@ func SaveBeforeAssociations(create bool) func(db *gorm.DB) {
}
elems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
distinctElems := reflect.MakeSlice(reflect.SliceOf(fieldType), 0, 10)
identityMap := map[string]bool{}
for i := 0; i < rValLen; i++ {
obj := db.Statement.ReflectValue.Index(i)
if reflect.Indirect(obj).Kind() != reflect.Struct {
break
}
if _, zero := rel.Field.ValueOf(db.Statement.Context, obj); !zero { // check belongs to relation value
rv := rel.Field.ReflectValueOf(db.Statement.Context, obj) // relation reflect value
if !isPtr {
rv = rv.Addr()
}
objs = append(objs, obj)
if isPtr {
elems = reflect.Append(elems, rv)
} else {
elems = reflect.Append(elems, rv.Addr())
elems = reflect.Append(elems, rv)
relPrimaryValues := make([]interface{}, 0, len(rel.FieldSchema.PrimaryFields))
for _, pf := range rel.FieldSchema.PrimaryFields {
if pfv, ok := pf.ValueOf(db.Statement.Context, rv); !ok {
relPrimaryValues = append(relPrimaryValues, pfv)
}
}
cacheKey := utils.ToStringKey(relPrimaryValues...)
if len(relPrimaryValues) != len(rel.FieldSchema.PrimaryFields) || !identityMap[cacheKey] {
if cacheKey != "" { // has primary fields
identityMap[cacheKey] = true
}
distinctElems = reflect.Append(distinctElems, rv)
}
}
}
if elems.Len() > 0 {
if saveAssociations(db, rel, elems, selectColumns, restricted, nil) == nil {
if saveAssociations(db, rel, distinctElems, selectColumns, restricted, nil) == nil {
for i := 0; i < elems.Len(); i++ {
setupReferences(objs[i], elems.Index(i))
}

@ -3,6 +3,7 @@ package callbacks
import (
"fmt"
"reflect"
"strings"
"gorm.io/gorm"
"gorm.io/gorm/clause"
@ -302,7 +303,8 @@ func ConvertToCreateValues(stmt *gorm.Statement) (values clause.Values) {
for _, column := range values.Columns {
if field := stmt.Schema.LookUpField(column.Name); field != nil {
if v, ok := selectColumns[field.DBName]; (ok && v) || (!ok && !restricted) {
if !field.PrimaryKey && (!field.HasDefaultValue || field.DefaultValueInterface != nil) && field.AutoCreateTime == 0 {
if !field.PrimaryKey && (!field.HasDefaultValue || field.DefaultValueInterface != nil ||
strings.EqualFold(field.DefaultValue, "NULL")) && field.AutoCreateTime == 0 {
if field.AutoUpdateTime > 0 {
assignment := clause.Assignment{Column: clause.Column{Name: field.DBName}, Value: curTime}
switch field.AutoUpdateTime {

@ -3,6 +3,7 @@ package callbacks
import (
"fmt"
"reflect"
"strings"
"gorm.io/gorm"
"gorm.io/gorm/clause"
@ -10,6 +11,98 @@ import (
"gorm.io/gorm/utils"
)
// parsePreloadMap extracts nested preloads. e.g.
//
// // schema has a "k0" relation and a "k7.k8" embedded relation
// parsePreloadMap(schema, map[string][]interface{}{
// clause.Associations: {"arg1"},
// "k1": {"arg2"},
// "k2.k3": {"arg3"},
// "k4.k5.k6": {"arg4"},
// })
// // preloadMap is
// map[string]map[string][]interface{}{
// "k0": {},
// "k7": {
// "k8": {},
// },
// "k1": {},
// "k2": {
// "k3": {"arg3"},
// },
// "k4": {
// "k5.k6": {"arg4"},
// },
// }
func parsePreloadMap(s *schema.Schema, preloads map[string][]interface{}) map[string]map[string][]interface{} {
preloadMap := map[string]map[string][]interface{}{}
setPreloadMap := func(name, value string, args []interface{}) {
if _, ok := preloadMap[name]; !ok {
preloadMap[name] = map[string][]interface{}{}
}
if value != "" {
preloadMap[name][value] = args
}
}
for name, args := range preloads {
preloadFields := strings.Split(name, ".")
value := strings.TrimPrefix(strings.TrimPrefix(name, preloadFields[0]), ".")
if preloadFields[0] == clause.Associations {
for _, relation := range s.Relationships.Relations {
if relation.Schema == s {
setPreloadMap(relation.Name, value, args)
}
}
for embedded, embeddedRelations := range s.Relationships.EmbeddedRelations {
for _, value := range embeddedValues(embeddedRelations) {
setPreloadMap(embedded, value, args)
}
}
} else {
setPreloadMap(preloadFields[0], value, args)
}
}
return preloadMap
}
func embeddedValues(embeddedRelations *schema.Relationships) []string {
if embeddedRelations == nil {
return nil
}
names := make([]string, 0, len(embeddedRelations.Relations)+len(embeddedRelations.EmbeddedRelations))
for _, relation := range embeddedRelations.Relations {
// skip first struct name
names = append(names, strings.Join(relation.Field.BindNames[1:], "."))
}
for _, relations := range embeddedRelations.EmbeddedRelations {
names = append(names, embeddedValues(relations)...)
}
return names
}
func preloadEmbedded(tx *gorm.DB, relationships *schema.Relationships, s *schema.Schema, preloads map[string][]interface{}, as []interface{}) error {
if relationships == nil {
return nil
}
preloadMap := parsePreloadMap(s, preloads)
for name := range preloadMap {
if embeddedRelations := relationships.EmbeddedRelations[name]; embeddedRelations != nil {
if err := preloadEmbedded(tx, embeddedRelations, s, preloadMap[name], as); err != nil {
return err
}
} else if rel := relationships.Relations[name]; rel != nil {
if err := preload(tx, rel, append(preloads[name], as), preloadMap[name]); err != nil {
return err
}
} else {
return fmt.Errorf("%s: %w (embedded) for schema %s", name, gorm.ErrUnsupportedRelation, s.Name)
}
}
return nil
}
func preload(tx *gorm.DB, rel *schema.Relationship, conds []interface{}, preloads map[string][]interface{}) error {
var (
reflectValue = tx.Statement.ReflectValue

@ -8,6 +8,8 @@ import (
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/schema"
"gorm.io/gorm/utils"
)
func Query(db *gorm.DB) {
@ -109,86 +111,136 @@ func BuildQuerySQL(db *gorm.DB) {
}
}
specifiedRelationsName := make(map[string]interface{})
for _, join := range db.Statement.Joins {
if db.Statement.Schema == nil {
fromClause.Joins = append(fromClause.Joins, clause.Join{
Expression: clause.NamedExpr{SQL: join.Name, Vars: join.Conds},
})
} else if relation, ok := db.Statement.Schema.Relationships.Relations[join.Name]; ok {
tableAliasName := relation.Name
columnStmt := gorm.Statement{
Table: tableAliasName, DB: db, Schema: relation.FieldSchema,
Selects: join.Selects, Omits: join.Omits,
}
if db.Statement.Schema != nil {
var isRelations bool // is relations or raw sql
var relations []*schema.Relationship
relation, ok := db.Statement.Schema.Relationships.Relations[join.Name]
if ok {
isRelations = true
relations = append(relations, relation)
} else {
// handle nested join like "Manager.Company"
nestedJoinNames := strings.Split(join.Name, ".")
if len(nestedJoinNames) > 1 {
isNestedJoin := true
gussNestedRelations := make([]*schema.Relationship, 0, len(nestedJoinNames))
currentRelations := db.Statement.Schema.Relationships.Relations
for _, relname := range nestedJoinNames {
// incomplete match, only treated as raw sql
if relation, ok = currentRelations[relname]; ok {
gussNestedRelations = append(gussNestedRelations, relation)
currentRelations = relation.FieldSchema.Relationships.Relations
} else {
isNestedJoin = false
break
}
}
selectColumns, restricted := columnStmt.SelectAndOmitColumns(false, false)
for _, s := range relation.FieldSchema.DBNames {
if v, ok := selectColumns[s]; (ok && v) || (!ok && !restricted) {
clauseSelect.Columns = append(clauseSelect.Columns, clause.Column{
Table: tableAliasName,
Name: s,
Alias: tableAliasName + "__" + s,
})
if isNestedJoin {
isRelations = true
relations = gussNestedRelations
}
}
}
exprs := make([]clause.Expression, len(relation.References))
for idx, ref := range relation.References {
if ref.OwnPrimaryKey {
exprs[idx] = clause.Eq{
Column: clause.Column{Table: clause.CurrentTable, Name: ref.PrimaryKey.DBName},
Value: clause.Column{Table: tableAliasName, Name: ref.ForeignKey.DBName},
if isRelations {
genJoinClause := func(joinType clause.JoinType, parentTableName string, relation *schema.Relationship) clause.Join {
tableAliasName := relation.Name
if parentTableName != clause.CurrentTable {
tableAliasName = utils.NestedRelationName(parentTableName, tableAliasName)
}
columnStmt := gorm.Statement{
Table: tableAliasName, DB: db, Schema: relation.FieldSchema,
Selects: join.Selects, Omits: join.Omits,
}
} else {
if ref.PrimaryValue == "" {
exprs[idx] = clause.Eq{
Column: clause.Column{Table: clause.CurrentTable, Name: ref.ForeignKey.DBName},
Value: clause.Column{Table: tableAliasName, Name: ref.PrimaryKey.DBName},
selectColumns, restricted := columnStmt.SelectAndOmitColumns(false, false)
for _, s := range relation.FieldSchema.DBNames {
if v, ok := selectColumns[s]; (ok && v) || (!ok && !restricted) {
clauseSelect.Columns = append(clauseSelect.Columns, clause.Column{
Table: tableAliasName,
Name: s,
Alias: utils.NestedRelationName(tableAliasName, s),
})
}
} else {
exprs[idx] = clause.Eq{
Column: clause.Column{Table: tableAliasName, Name: ref.ForeignKey.DBName},
Value: ref.PrimaryValue,
}
exprs := make([]clause.Expression, len(relation.References))
for idx, ref := range relation.References {
if ref.OwnPrimaryKey {
exprs[idx] = clause.Eq{
Column: clause.Column{Table: parentTableName, Name: ref.PrimaryKey.DBName},
Value: clause.Column{Table: tableAliasName, Name: ref.ForeignKey.DBName},
}
} else {
if ref.PrimaryValue == "" {
exprs[idx] = clause.Eq{
Column: clause.Column{Table: parentTableName, Name: ref.ForeignKey.DBName},
Value: clause.Column{Table: tableAliasName, Name: ref.PrimaryKey.DBName},
}
} else {
exprs[idx] = clause.Eq{
Column: clause.Column{Table: tableAliasName, Name: ref.ForeignKey.DBName},
Value: ref.PrimaryValue,
}
}
}
}
}
}
{
onStmt := gorm.Statement{Table: tableAliasName, DB: db, Clauses: map[string]clause.Clause{}}
for _, c := range relation.FieldSchema.QueryClauses {
onStmt.AddClause(c)
}
{
onStmt := gorm.Statement{Table: tableAliasName, DB: db, Clauses: map[string]clause.Clause{}}
for _, c := range relation.FieldSchema.QueryClauses {
onStmt.AddClause(c)
}
if join.On != nil {
onStmt.AddClause(join.On)
}
if join.On != nil {
onStmt.AddClause(join.On)
}
if cs, ok := onStmt.Clauses["WHERE"]; ok {
if where, ok := cs.Expression.(clause.Where); ok {
where.Build(&onStmt)
if onSQL := onStmt.SQL.String(); onSQL != "" {
vars := onStmt.Vars
for idx, v := range vars {
bindvar := strings.Builder{}
onStmt.Vars = vars[0 : idx+1]
db.Dialector.BindVarTo(&bindvar, &onStmt, v)
onSQL = strings.Replace(onSQL, bindvar.String(), "?", 1)
if cs, ok := onStmt.Clauses["WHERE"]; ok {
if where, ok := cs.Expression.(clause.Where); ok {
where.Build(&onStmt)
if onSQL := onStmt.SQL.String(); onSQL != "" {
vars := onStmt.Vars
for idx, v := range vars {
bindvar := strings.Builder{}
onStmt.Vars = vars[0 : idx+1]
db.Dialector.BindVarTo(&bindvar, &onStmt, v)
onSQL = strings.Replace(onSQL, bindvar.String(), "?", 1)
}
exprs = append(exprs, clause.Expr{SQL: onSQL, Vars: vars})
}
}
exprs = append(exprs, clause.Expr{SQL: onSQL, Vars: vars})
}
}
return clause.Join{
Type: joinType,
Table: clause.Table{Name: relation.FieldSchema.Table, Alias: tableAliasName},
ON: clause.Where{Exprs: exprs},
}
}
}
fromClause.Joins = append(fromClause.Joins, clause.Join{
Type: join.JoinType,
Table: clause.Table{Name: relation.FieldSchema.Table, Alias: tableAliasName},
ON: clause.Where{Exprs: exprs},
})
parentTableName := clause.CurrentTable
for _, rel := range relations {
// joins table alias like "Manager, Company, Manager__Company"
nestedAlias := utils.NestedRelationName(parentTableName, rel.Name)
if _, ok := specifiedRelationsName[nestedAlias]; !ok {
fromClause.Joins = append(fromClause.Joins, genJoinClause(join.JoinType, parentTableName, rel))
specifiedRelationsName[nestedAlias] = nil
}
parentTableName = rel.Name
}
} else {
fromClause.Joins = append(fromClause.Joins, clause.Join{
Expression: clause.NamedExpr{SQL: join.Name, Vars: join.Conds},
})
}
} else {
fromClause.Joins = append(fromClause.Joins, clause.Join{
Expression: clause.NamedExpr{SQL: join.Name, Vars: join.Conds},
@ -215,32 +267,7 @@ func Preload(db *gorm.DB) {
return
}
preloadMap := map[string]map[string][]interface{}{}
for name := range db.Statement.Preloads {
preloadFields := strings.Split(name, ".")
if preloadFields[0] == clause.Associations {
for _, rel := range db.Statement.Schema.Relationships.Relations {
if rel.Schema == db.Statement.Schema {
if _, ok := preloadMap[rel.Name]; !ok {
preloadMap[rel.Name] = map[string][]interface{}{}
}
if value := strings.TrimPrefix(strings.TrimPrefix(name, preloadFields[0]), "."); value != "" {
preloadMap[rel.Name][value] = db.Statement.Preloads[name]
}
}
}
} else {
if _, ok := preloadMap[preloadFields[0]]; !ok {
preloadMap[preloadFields[0]] = map[string][]interface{}{}
}
if value := strings.TrimPrefix(strings.TrimPrefix(name, preloadFields[0]), "."); value != "" {
preloadMap[preloadFields[0]][value] = db.Statement.Preloads[name]
}
}
}
preloadMap := parsePreloadMap(db.Statement.Schema, db.Statement.Preloads)
preloadNames := make([]string, 0, len(preloadMap))
for key := range preloadMap {
preloadNames = append(preloadNames, key)
@ -260,7 +287,9 @@ func Preload(db *gorm.DB) {
preloadDB.Statement.Unscoped = db.Statement.Unscoped
for _, name := range preloadNames {
if rel := preloadDB.Statement.Schema.Relationships.Relations[name]; rel != nil {
if relations := preloadDB.Statement.Schema.Relationships.EmbeddedRelations[name]; relations != nil {
db.AddError(preloadEmbedded(preloadDB.Table("").Session(&gorm.Session{Context: db.Statement.Context, SkipHooks: db.Statement.SkipHooks}), relations, db.Statement.Schema, preloadMap[name], db.Statement.Preloads[clause.Associations]))
} else if rel := preloadDB.Statement.Schema.Relationships.Relations[name]; rel != nil {
db.AddError(preload(preloadDB.Table("").Session(&gorm.Session{Context: db.Statement.Context, SkipHooks: db.Statement.SkipHooks}), rel, append(db.Statement.Preloads[name], db.Statement.Preloads[clause.Associations]...), preloadMap[name]))
} else {
db.AddError(fmt.Errorf("%s: %w for schema %s", name, gorm.ErrUnsupportedRelation, db.Statement.Schema.Name))

@ -245,11 +245,13 @@ func ConvertToAssignments(stmt *gorm.Statement) (set clause.Set) {
}
default:
updatingSchema := stmt.Schema
var isDiffSchema bool
if !updatingValue.CanAddr() || stmt.Dest != stmt.Model {
// different schema
updatingStmt := &gorm.Statement{DB: stmt.DB}
if err := updatingStmt.Parse(stmt.Dest); err == nil {
updatingSchema = updatingStmt.Schema
isDiffSchema = true
}
}
@ -276,7 +278,13 @@ func ConvertToAssignments(stmt *gorm.Statement) (set clause.Set) {
if (ok || !isZero) && field.Updatable {
set = append(set, clause.Assignment{Column: clause.Column{Name: field.DBName}, Value: value})
assignValue(field, value)
assignField := field
if isDiffSchema {
if originField := stmt.Schema.LookUpField(dbName); originField != nil {
assignField = originField
}
}
assignValue(assignField, value)
}
}
} else {

@ -366,6 +366,36 @@ func (db *DB) Scopes(funcs ...func(*DB) *DB) (tx *DB) {
return tx
}
func (db *DB) executeScopes() (tx *DB) {
tx = db.getInstance()
scopes := db.Statement.scopes
if len(scopes) == 0 {
return tx
}
tx.Statement.scopes = nil
conditions := make([]clause.Interface, 0, 4)
if cs, ok := tx.Statement.Clauses["WHERE"]; ok && cs.Expression != nil {
conditions = append(conditions, cs.Expression.(clause.Interface))
cs.Expression = nil
tx.Statement.Clauses["WHERE"] = cs
}
for _, scope := range scopes {
tx = scope(tx)
if cs, ok := tx.Statement.Clauses["WHERE"]; ok && cs.Expression != nil {
conditions = append(conditions, cs.Expression.(clause.Interface))
cs.Expression = nil
tx.Statement.Clauses["WHERE"] = cs
}
}
for _, condition := range conditions {
tx.Statement.AddClause(condition)
}
return tx
}
// Preload preload associations with given conditions
//
// // get all users, and preload all non-cancelled orders

@ -33,7 +33,7 @@ func (limit Limit) MergeClause(clause *Clause) {
clause.Name = ""
if v, ok := clause.Expression.(Limit); ok {
if (limit.Limit == nil || *limit.Limit == 0) && (v.Limit != nil && *v.Limit != 0) {
if (limit.Limit == nil || *limit.Limit == 0) && v.Limit != nil {
limit.Limit = v.Limit
}

@ -6,6 +6,8 @@ import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
"gorm.io/gorm/clause"
"gorm.io/gorm/logger"
@ -33,9 +35,10 @@ func (db *DB) CreateInBatches(value interface{}, batchSize int) (tx *DB) {
var rowsAffected int64
tx = db.getInstance()
// the reflection length judgment of the optimized value
reflectLen := reflectValue.Len()
callFc := func(tx *DB) error {
// the reflection length judgment of the optimized value
reflectLen := reflectValue.Len()
for i := 0; i < reflectLen; i += batchSize {
ends := i + batchSize
if ends > reflectLen {
@ -53,7 +56,7 @@ func (db *DB) CreateInBatches(value interface{}, batchSize int) (tx *DB) {
return nil
}
if tx.SkipDefaultTransaction {
if tx.SkipDefaultTransaction || reflectLen <= batchSize {
tx.AddError(callFc(tx.Session(&Session{})))
} else {
tx.AddError(tx.Transaction(callFc))
@ -104,7 +107,7 @@ func (db *DB) Save(value interface{}) (tx *DB) {
updateTx := tx.callbacks.Update().Execute(tx.Session(&Session{Initialized: true}))
if updateTx.Error == nil && updateTx.RowsAffected == 0 && !updateTx.DryRun && !selectedUpdate {
return tx.Create(value)
return tx.Clauses(clause.OnConflict{UpdateAll: true}).Create(value)
}
return updateTx
@ -489,7 +492,7 @@ func (db *DB) Count(count *int64) (tx *DB) {
tx.Statement.Dest = count
tx = tx.callbacks.Query().Execute(tx)
if tx.RowsAffected != 1 {
if _, ok := db.Statement.Clauses["GROUP BY"]; ok || tx.RowsAffected != 1 {
*count = tx.RowsAffected
}
@ -608,6 +611,15 @@ func (db *DB) Connection(fc func(tx *DB) error) (err error) {
return fc(tx)
}
var (
savepointIdx int64
savepointNamePool = &sync.Pool{
New: func() interface{} {
return fmt.Sprintf("gorm_%d", atomic.AddInt64(&savepointIdx, 1))
},
}
)
// Transaction start a transaction as a block, return error will rollback, otherwise to commit. Transaction executes an
// arbitrary number of commands in fc within a transaction. On success the changes are committed; if an error occurs
// they are rolled back.
@ -617,7 +629,9 @@ func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err er
if committer, ok := db.Statement.ConnPool.(TxCommitter); ok && committer != nil {
// nested transaction
if !db.DisableNestedTransaction {
err = db.SavePoint(fmt.Sprintf("sp%p", fc)).Error
poolName := savepointNamePool.Get()
defer savepointNamePool.Put(poolName)
err = db.SavePoint(poolName.(string)).Error
if err != nil {
return
}
@ -625,7 +639,7 @@ func (db *DB) Transaction(fc func(tx *DB) error, opts ...*sql.TxOptions) (err er
defer func() {
// Make sure to rollback when panic, Block error or Commit error
if panicked || err != nil {
db.RollbackTo(fmt.Sprintf("sp%p", fc))
db.RollbackTo(poolName.(string))
}
}()
}

20
vendor/gorm.io/gorm/gorm.go generated vendored

@ -47,6 +47,8 @@ type Config struct {
QueryFields bool
// CreateBatchSize default create batch size
CreateBatchSize int
// TranslateError enabling error translation
TranslateError bool
// ClauseBuilders clause builder
ClauseBuilders map[string]clause.ClauseBuilder
@ -347,14 +349,18 @@ func (db *DB) Callback() *callbacks {
// AddError add error to db
func (db *DB) AddError(err error) error {
if errTranslator, ok := db.Dialector.(ErrorTranslator); ok {
err = errTranslator.Translate(err)
}
if err != nil {
if db.Config.TranslateError {
if errTranslator, ok := db.Dialector.(ErrorTranslator); ok {
err = errTranslator.Translate(err)
}
}
if db.Error == nil {
db.Error = err
} else if err != nil {
db.Error = fmt.Errorf("%v; %w", db.Error, err)
if db.Error == nil {
db.Error = err
} else {
db.Error = fmt.Errorf("%v; %w", db.Error, err)
}
}
return db.Error
}

6
vendor/gorm.io/gorm/migrator.go generated vendored

@ -13,11 +13,7 @@ func (db *DB) Migrator() Migrator {
// apply scopes to migrator
for len(tx.Statement.scopes) > 0 {
scopes := tx.Statement.scopes
tx.Statement.scopes = nil
for _, scope := range scopes {
tx = scope(tx)
}
tx = tx.executeScopes()
}
return tx.Dialector.Migrator(tx.Session(&Session{}))

@ -17,12 +17,12 @@ func (idx Index) Table() string {
return idx.TableName
}
// Name return the name of the index.
// Name return the name of the index.
func (idx Index) Name() string {
return idx.NameValue
}
// Columns return the columns fo the index
// Columns return the columns of the index
func (idx Index) Columns() []string {
return idx.ColumnList
}
@ -37,7 +37,7 @@ func (idx Index) Unique() (unique bool, ok bool) {
return idx.UniqueValue.Bool, idx.UniqueValue.Valid
}
// Option return the optional attribute fo the index
// Option return the optional attribute of the index
func (idx Index) Option() string {
return idx.OptionValue
}

@ -113,7 +113,7 @@ func (m Migrator) AutoMigrate(values ...interface{}) error {
return err
}
} else {
if err := m.RunWithValue(value, func(stmt *gorm.Statement) (errr error) {
if err := m.RunWithValue(value, func(stmt *gorm.Statement) error {
columnTypes, err := queryTx.Migrator().ColumnTypes(value)
if err != nil {
return err
@ -123,7 +123,6 @@ func (m Migrator) AutoMigrate(values ...interface{}) error {
parseCheckConstraints = stmt.Schema.ParseCheckConstraints()
)
for _, dbName := range stmt.Schema.DBNames {
field := stmt.Schema.FieldsByDBName[dbName]
var foundColumn gorm.ColumnType
for _, columnType := range columnTypes {
@ -135,12 +134,15 @@ func (m Migrator) AutoMigrate(values ...interface{}) error {
if foundColumn == nil {
// not found, add column
if err := execTx.Migrator().AddColumn(value, dbName); err != nil {
if err = execTx.Migrator().AddColumn(value, dbName); err != nil {
return err
}
} else {
// found, smartly migrate
field := stmt.Schema.FieldsByDBName[dbName]
if err = execTx.Migrator().MigrateColumn(value, field, foundColumn); err != nil {
return err
}
} else if err := execTx.Migrator().MigrateColumn(value, field, foundColumn); err != nil {
// found, smart migrate
return err
}
}
@ -195,7 +197,7 @@ func (m Migrator) GetTables() (tableList []string, err error) {
func (m Migrator) CreateTable(values ...interface{}) error {
for _, value := range m.ReorderModels(values, false) {
tx := m.DB.Session(&gorm.Session{})
if err := m.RunWithValue(value, func(stmt *gorm.Statement) (errr error) {
if err := m.RunWithValue(value, func(stmt *gorm.Statement) (err error) {
var (
createTableSQL = "CREATE TABLE ? ("
values = []interface{}{m.CurrentTable(stmt)}
@ -214,7 +216,7 @@ func (m Migrator) CreateTable(values ...interface{}) error {
if !hasPrimaryKeyInDataType && len(stmt.Schema.PrimaryFields) > 0 {
createTableSQL += "PRIMARY KEY ?,"
primaryKeys := []interface{}{}
primaryKeys := make([]interface{}, 0, len(stmt.Schema.PrimaryFields))
for _, field := range stmt.Schema.PrimaryFields {
primaryKeys = append(primaryKeys, clause.Column{Name: field.DBName})
}
@ -225,8 +227,8 @@ func (m Migrator) CreateTable(values ...interface{}) error {
for _, idx := range stmt.Schema.ParseIndexes() {
if m.CreateIndexAfterCreateTable {
defer func(value interface{}, name string) {
if errr == nil {
errr = tx.Migrator().CreateIndex(value, name)
if err == nil {
err = tx.Migrator().CreateIndex(value, name)
}
}(value, idx.Name)
} else {
@ -276,8 +278,8 @@ func (m Migrator) CreateTable(values ...interface{}) error {
createTableSQL += fmt.Sprint(tableOption)
}
errr = tx.Exec(createTableSQL, values...).Error
return errr
err = tx.Exec(createTableSQL, values...).Error
return err
}); err != nil {
return err
}
@ -498,7 +500,7 @@ func (m Migrator) MigrateColumn(value interface{}, field *schema.Field, columnTy
currentDefaultNotNull := field.HasDefaultValue && (field.DefaultValueInterface != nil || !strings.EqualFold(field.DefaultValue, "NULL"))
dv, dvNotNull := columnType.DefaultValue()
if dvNotNull && !currentDefaultNotNull {
// defalut value -> null
// default value -> null
alterColumn = true
} else if !dvNotNull && currentDefaultNotNull {
// null -> default value

68
vendor/gorm.io/gorm/scan.go generated vendored

@ -4,10 +4,10 @@ import (
"database/sql"
"database/sql/driver"
"reflect"
"strings"
"time"
"gorm.io/gorm/schema"
"gorm.io/gorm/utils"
)
// prepareValues prepare values slice
@ -50,7 +50,7 @@ func scanIntoMap(mapValue map[string]interface{}, values []interface{}, columns
}
}
func (db *DB) scanIntoStruct(rows Rows, reflectValue reflect.Value, values []interface{}, fields []*schema.Field, joinFields [][2]*schema.Field) {
func (db *DB) scanIntoStruct(rows Rows, reflectValue reflect.Value, values []interface{}, fields []*schema.Field, joinFields [][]*schema.Field) {
for idx, field := range fields {
if field != nil {
values[idx] = field.NewValuePool.Get()
@ -65,28 +65,45 @@ func (db *DB) scanIntoStruct(rows Rows, reflectValue reflect.Value, values []int
db.RowsAffected++
db.AddError(rows.Scan(values...))
joinedSchemaMap := make(map[*schema.Field]interface{})
joinedNestedSchemaMap := make(map[string]interface{})
for idx, field := range fields {
if field == nil {
continue
}
if len(joinFields) == 0 || joinFields[idx][0] == nil {
if len(joinFields) == 0 || len(joinFields[idx]) == 0 {
db.AddError(field.Set(db.Statement.Context, reflectValue, values[idx]))
} else {
joinSchema := joinFields[idx][0]
relValue := joinSchema.ReflectValueOf(db.Statement.Context, reflectValue)
if relValue.Kind() == reflect.Ptr {
if _, ok := joinedSchemaMap[joinSchema]; !ok {
if value := reflect.ValueOf(values[idx]).Elem(); value.Kind() == reflect.Ptr && value.IsNil() {
continue
}
} else { // joinFields count is larger than 2 when using join
var isNilPtrValue bool
var relValue reflect.Value
// does not contain raw dbname
nestedJoinSchemas := joinFields[idx][:len(joinFields[idx])-1]
// current reflect value
currentReflectValue := reflectValue
fullRels := make([]string, 0, len(nestedJoinSchemas))
for _, joinSchema := range nestedJoinSchemas {
fullRels = append(fullRels, joinSchema.Name)
relValue = joinSchema.ReflectValueOf(db.Statement.Context, currentReflectValue)
if relValue.Kind() == reflect.Ptr {
fullRelsName := utils.JoinNestedRelationNames(fullRels)
// same nested structure
if _, ok := joinedNestedSchemaMap[fullRelsName]; !ok {
if value := reflect.ValueOf(values[idx]).Elem(); value.Kind() == reflect.Ptr && value.IsNil() {
isNilPtrValue = true
break
}
relValue.Set(reflect.New(relValue.Type().Elem()))
joinedSchemaMap[joinSchema] = nil
relValue.Set(reflect.New(relValue.Type().Elem()))
joinedNestedSchemaMap[fullRelsName] = nil
}
}
currentReflectValue = relValue
}
if !isNilPtrValue { // ignore if value is nil
f := joinFields[idx][len(joinFields[idx])-1]
db.AddError(f.Set(db.Statement.Context, relValue, values[idx]))
}
db.AddError(joinFields[idx][1].Set(db.Statement.Context, relValue, values[idx]))
}
// release data to pool
@ -163,7 +180,7 @@ func Scan(rows Rows, db *DB, mode ScanMode) {
default:
var (
fields = make([]*schema.Field, len(columns))
joinFields [][2]*schema.Field
joinFields [][]*schema.Field
sch = db.Statement.Schema
reflectValue = db.Statement.ReflectValue
)
@ -217,15 +234,26 @@ func Scan(rows Rows, db *DB, mode ScanMode) {
} else {
matchedFieldCount[column] = 1
}
} else if names := strings.Split(column, "__"); len(names) > 1 {
} else if names := utils.SplitNestedRelationName(column); len(names) > 1 { // has nested relation
if rel, ok := sch.Relationships.Relations[names[0]]; ok {
if field := rel.FieldSchema.LookUpField(strings.Join(names[1:], "__")); field != nil && field.Readable {
subNameCount := len(names)
// nested relation fields
relFields := make([]*schema.Field, 0, subNameCount-1)
relFields = append(relFields, rel.Field)
for _, name := range names[1 : subNameCount-1] {
rel = rel.FieldSchema.Relationships.Relations[name]
relFields = append(relFields, rel.Field)
}
// lastest name is raw dbname
dbName := names[subNameCount-1]
if field := rel.FieldSchema.LookUpField(dbName); field != nil && field.Readable {
fields[idx] = field
if len(joinFields) == 0 {
joinFields = make([][2]*schema.Field, len(columns))
joinFields = make([][]*schema.Field, len(columns))
}
joinFields[idx] = [2]*schema.Field{rel.Field, field}
relFields = append(relFields, field)
joinFields[idx] = relFields
continue
}
}

@ -89,6 +89,10 @@ type Field struct {
NewValuePool FieldNewValuePool
}
func (field *Field) BindName() string {
return strings.Join(field.BindNames, ".")
}
// ParseField parses reflect.StructField to Field
func (schema *Schema) ParseField(fieldStruct reflect.StructField) *Field {
var (
@ -580,8 +584,6 @@ func (field *Field) setupValuerAndSetter() {
case **bool:
if data != nil && *data != nil {
field.ReflectValueOf(ctx, value).SetBool(**data)
} else {
field.ReflectValueOf(ctx, value).SetBool(false)
}
case bool:
field.ReflectValueOf(ctx, value).SetBool(data)
@ -601,8 +603,6 @@ func (field *Field) setupValuerAndSetter() {
case **int64:
if data != nil && *data != nil {
field.ReflectValueOf(ctx, value).SetInt(**data)
} else {
field.ReflectValueOf(ctx, value).SetInt(0)
}
case int64:
field.ReflectValueOf(ctx, value).SetInt(data)
@ -667,8 +667,6 @@ func (field *Field) setupValuerAndSetter() {
case **uint64:
if data != nil && *data != nil {
field.ReflectValueOf(ctx, value).SetUint(**data)
} else {
field.ReflectValueOf(ctx, value).SetUint(0)
}
case uint64:
field.ReflectValueOf(ctx, value).SetUint(data)
@ -721,8 +719,6 @@ func (field *Field) setupValuerAndSetter() {
case **float64:
if data != nil && *data != nil {
field.ReflectValueOf(ctx, value).SetFloat(**data)
} else {
field.ReflectValueOf(ctx, value).SetFloat(0)
}
case float64:
field.ReflectValueOf(ctx, value).SetFloat(data)
@ -767,8 +763,6 @@ func (field *Field) setupValuerAndSetter() {
case **string:
if data != nil && *data != nil {
field.ReflectValueOf(ctx, value).SetString(**data)
} else {
field.ReflectValueOf(ctx, value).SetString("")
}
case string:
field.ReflectValueOf(ctx, value).SetString(data)
@ -916,6 +910,8 @@ func (field *Field) setupValuerAndSetter() {
sameElemType = field.FieldType == reflect.ValueOf(field.Serializer).Type().Elem()
}
serializerValue := reflect.Indirect(reflect.ValueOf(field.Serializer))
serializerType := serializerValue.Type()
field.Set = func(ctx context.Context, value reflect.Value, v interface{}) (err error) {
if s, ok := v.(*serializer); ok {
if s.fieldValue != nil {
@ -923,11 +919,12 @@ func (field *Field) setupValuerAndSetter() {
} else if err = s.Serializer.Scan(ctx, field, value, s.value); err == nil {
if sameElemType {
field.ReflectValueOf(ctx, value).Set(reflect.ValueOf(s.Serializer).Elem())
s.Serializer = reflect.New(reflect.Indirect(reflect.ValueOf(field.Serializer)).Type()).Interface().(SerializerInterface)
} else if sameType {
field.ReflectValueOf(ctx, value).Set(reflect.ValueOf(s.Serializer))
s.Serializer = reflect.New(reflect.Indirect(reflect.ValueOf(field.Serializer)).Type()).Interface().(SerializerInterface)
}
si := reflect.New(serializerType)
si.Elem().Set(serializerValue)
s.Serializer = si.Interface().(SerializerInterface)
}
} else {
err = oldFieldSetter(ctx, value, v)
@ -939,11 +936,15 @@ func (field *Field) setupValuerAndSetter() {
func (field *Field) setupNewValuePool() {
if field.Serializer != nil {
serializerValue := reflect.Indirect(reflect.ValueOf(field.Serializer))
serializerType := serializerValue.Type()
field.NewValuePool = &sync.Pool{
New: func() interface{} {
si := reflect.New(serializerType)
si.Elem().Set(serializerValue)
return &serializer{
Field: field,
Serializer: field.Serializer,
Serializer: si.Interface().(SerializerInterface),
}
},
}

@ -27,6 +27,8 @@ type Relationships struct {
HasMany []*Relationship
Many2Many []*Relationship
Relations map[string]*Relationship
EmbeddedRelations map[string]*Relationships
}
type Relationship struct {
@ -106,7 +108,7 @@ func (schema *Schema) parseRelation(field *Field) *Relationship {
}
if schema.err == nil {
schema.Relationships.Relations[relation.Name] = relation
schema.setRelation(relation)
switch relation.Type {
case HasOne:
schema.Relationships.HasOne = append(schema.Relationships.HasOne, relation)
@ -122,6 +124,39 @@ func (schema *Schema) parseRelation(field *Field) *Relationship {
return relation
}
func (schema *Schema) setRelation(relation *Relationship) {
// set non-embedded relation
if rel := schema.Relationships.Relations[relation.Name]; rel != nil {
if len(rel.Field.BindNames) > 1 {
schema.Relationships.Relations[relation.Name] = relation
}
} else {
schema.Relationships.Relations[relation.Name] = relation
}
// set embedded relation
if len(relation.Field.BindNames) <= 1 {
return
}
relationships := &schema.Relationships
for i, name := range relation.Field.BindNames {
if i < len(relation.Field.BindNames)-1 {
if relationships.EmbeddedRelations == nil {
relationships.EmbeddedRelations = map[string]*Relationships{}
}
if r := relationships.EmbeddedRelations[name]; r == nil {
relationships.EmbeddedRelations[name] = &Relationships{}
}
relationships = relationships.EmbeddedRelations[name]
} else {
if relationships.Relations == nil {
relationships.Relations = map[string]*Relationship{}
}
relationships.Relations[relation.Name] = relation
}
}
}
// User has many Toys, its `Polymorphic` is `Owner`, Pet has one Toy, its `Polymorphic` is `Owner`
//
// type User struct {
@ -166,6 +201,11 @@ func (schema *Schema) buildPolymorphicRelation(relation *Relationship, field *Fi
}
}
if primaryKeyField == nil {
schema.err = fmt.Errorf("invalid polymorphic type %v for %v on field %s, missing primaryKey field", relation.FieldSchema, schema, field.Name)
return
}
// use same data type for foreign keys
if copyableDataType(primaryKeyField.DataType) {
relation.Polymorphic.PolymorphicID.DataType = primaryKeyField.DataType
@ -443,6 +483,7 @@ func (schema *Schema) guessRelation(relation *Relationship, field *Field, cgl gu
primaryFields = primarySchema.PrimaryFields
}
primaryFieldLoop:
for _, primaryField := range primaryFields {
lookUpName := primarySchemaName + primaryField.Name
if gl == guessBelongs {
@ -454,11 +495,18 @@ func (schema *Schema) guessRelation(relation *Relationship, field *Field, cgl gu
lookUpNames = append(lookUpNames, strings.TrimSuffix(lookUpName, primaryField.Name)+"ID", strings.TrimSuffix(lookUpName, primaryField.Name)+"Id", schema.namer.ColumnName(foreignSchema.Table, strings.TrimSuffix(lookUpName, primaryField.Name)+"ID"))
}
for _, name := range lookUpNames {
if f := foreignSchema.LookUpFieldByBindName(field.BindNames, name); f != nil {
foreignFields = append(foreignFields, f)
primaryFields = append(primaryFields, primaryField)
continue primaryFieldLoop
}
}
for _, name := range lookUpNames {
if f := foreignSchema.LookUpField(name); f != nil {
foreignFields = append(foreignFields, f)
primaryFields = append(primaryFields, primaryField)
break
continue primaryFieldLoop
}
}
}

@ -6,6 +6,7 @@ import (
"fmt"
"go/ast"
"reflect"
"strings"
"sync"
"gorm.io/gorm/clause"
@ -25,6 +26,7 @@ type Schema struct {
PrimaryFieldDBNames []string
Fields []*Field
FieldsByName map[string]*Field
FieldsByBindName map[string]*Field // embedded fields is 'Embed.Field'
FieldsByDBName map[string]*Field
FieldsWithDefaultDBValue []*Field // fields with default value assigned by database
Relationships Relationships
@ -67,6 +69,27 @@ func (schema Schema) LookUpField(name string) *Field {
return nil
}
// LookUpFieldByBindName looks for the closest field in the embedded struct.
//
// type Struct struct {
// Embedded struct {
// ID string // is selected by LookUpFieldByBindName([]string{"Embedded", "ID"}, "ID")
// }
// ID string // is selected by LookUpFieldByBindName([]string{"ID"}, "ID")
// }
func (schema Schema) LookUpFieldByBindName(bindNames []string, name string) *Field {
if len(bindNames) == 0 {
return nil
}
for i := len(bindNames) - 1; i >= 0; i-- {
find := strings.Join(bindNames[:i], ".") + "." + name
if field, ok := schema.FieldsByBindName[find]; ok {
return field
}
}
return nil
}
type Tabler interface {
TableName() string
}
@ -140,15 +163,16 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
}
schema := &Schema{
Name: modelType.Name(),
ModelType: modelType,
Table: tableName,
FieldsByName: map[string]*Field{},
FieldsByDBName: map[string]*Field{},
Relationships: Relationships{Relations: map[string]*Relationship{}},
cacheStore: cacheStore,
namer: namer,
initialized: make(chan struct{}),
Name: modelType.Name(),
ModelType: modelType,
Table: tableName,
FieldsByName: map[string]*Field{},
FieldsByBindName: map[string]*Field{},
FieldsByDBName: map[string]*Field{},
Relationships: Relationships{Relations: map[string]*Relationship{}},
cacheStore: cacheStore,
namer: namer,
initialized: make(chan struct{}),
}
// When the schema initialization is completed, the channel will be closed
defer close(schema.initialized)
@ -176,6 +200,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
field.DBName = namer.ColumnName(schema.Table, field.Name)
}
bindName := field.BindName()
if field.DBName != "" {
// nonexistence or shortest path or first appear prioritized if has permission
if v, ok := schema.FieldsByDBName[field.DBName]; !ok || ((field.Creatable || field.Updatable || field.Readable) && len(field.BindNames) < len(v.BindNames)) {
@ -184,6 +209,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
}
schema.FieldsByDBName[field.DBName] = field
schema.FieldsByName[field.Name] = field
schema.FieldsByBindName[bindName] = field
if v != nil && v.PrimaryKey {
for idx, f := range schema.PrimaryFields {
@ -202,6 +228,9 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
if of, ok := schema.FieldsByName[field.Name]; !ok || of.TagSettings["-"] == "-" {
schema.FieldsByName[field.Name] = field
}
if of, ok := schema.FieldsByBindName[bindName]; !ok || of.TagSettings["-"] == "-" {
schema.FieldsByBindName[bindName] = field
}
field.setupValuerAndSetter()
}
@ -221,8 +250,18 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
}
}
if schema.PrioritizedPrimaryField == nil && len(schema.PrimaryFields) == 1 {
schema.PrioritizedPrimaryField = schema.PrimaryFields[0]
if schema.PrioritizedPrimaryField == nil {
if len(schema.PrimaryFields) == 1 {
schema.PrioritizedPrimaryField = schema.PrimaryFields[0]
} else if len(schema.PrimaryFields) > 1 {
// If there are multiple primary keys, the AUTOINCREMENT field is prioritized
for _, field := range schema.PrimaryFields {
if field.AutoIncrement {
schema.PrioritizedPrimaryField = field
break
}
}
}
}
for _, field := range schema.PrimaryFields {
@ -283,6 +322,7 @@ func ParseWithSpecialTableName(dest interface{}, cacheStore *sync.Map, namer Nam
return schema, schema.err
} else {
schema.FieldsByName[field.Name] = field
schema.FieldsByBindName[field.BindName()] = field
}
}

12
vendor/gorm.io/gorm/statement.go generated vendored

@ -324,11 +324,9 @@ func (stmt *Statement) BuildCondition(query interface{}, args ...interface{}) []
case clause.Expression:
conds = append(conds, v)
case *DB:
for _, scope := range v.Statement.scopes {
v = scope(v)
}
v.executeScopes()
if cs, ok := v.Statement.Clauses["WHERE"]; ok {
if cs, ok := v.Statement.Clauses["WHERE"]; ok && cs.Expression != nil {
if where, ok := cs.Expression.(clause.Where); ok {
if len(where.Exprs) == 1 {
if orConds, ok := where.Exprs[0].(clause.OrConditions); ok {
@ -336,9 +334,13 @@ func (stmt *Statement) BuildCondition(query interface{}, args ...interface{}) []
}
}
conds = append(conds, clause.And(where.Exprs...))
} else if cs.Expression != nil {
} else {
conds = append(conds, cs.Expression)
}
if v.Statement == stmt {
cs.Expression = nil
stmt.Statement.Clauses["WHERE"] = cs
}
}
case map[interface{}]interface{}:
for i, j := range v {

@ -13,8 +13,14 @@ import (
func AssertObjEqual(t *testing.T, r, e interface{}, names ...string) {
for _, name := range names {
got := reflect.Indirect(reflect.ValueOf(r)).FieldByName(name).Interface()
expect := reflect.Indirect(reflect.ValueOf(e)).FieldByName(name).Interface()
rv := reflect.Indirect(reflect.ValueOf(r))
ev := reflect.Indirect(reflect.ValueOf(e))
if rv.IsValid() != ev.IsValid() {
t.Errorf("%v: expect: %+v, got %+v", utils.FileWithLineNum(), r, e)
return
}
got := rv.FieldByName(name).Interface()
expect := ev.FieldByName(name).Interface()
t.Run(name, func(t *testing.T) {
AssertEqual(t, got, expect)
})

@ -131,3 +131,20 @@ func ToString(value interface{}) string {
}
return ""
}
const nestedRelationSplit = "__"
// NestedRelationName nested relationships like `Manager__Company`
func NestedRelationName(prefix, name string) string {
return prefix + nestedRelationSplit + name
}
// SplitNestedRelationName Split nested relationships to `[]string{"Manager","Company"}`
func SplitNestedRelationName(name string) []string {
return strings.Split(name, nestedRelationSplit)
}
// JoinNestedRelationNames nested relationships like `Manager__Company`
func JoinNestedRelationNames(relationNames []string) string {
return strings.Join(relationNames, nestedRelationSplit)
}

24
vendor/modules.txt vendored

@ -8,7 +8,7 @@ github.com/aliyun/aliyun-oss-go-sdk/oss
## explicit; go 1.16
github.com/allegro/bigcache/v3
github.com/allegro/bigcache/v3/queue
# github.com/baidubce/bce-sdk-go v0.9.146
# github.com/baidubce/bce-sdk-go v0.9.148
## explicit; go 1.11
github.com/baidubce/bce-sdk-go/auth
github.com/baidubce/bce-sdk-go/bce
@ -75,7 +75,7 @@ github.com/gin-gonic/gin/binding
github.com/gin-gonic/gin/internal/bytesconv
github.com/gin-gonic/gin/internal/json
github.com/gin-gonic/gin/render
# github.com/go-co-op/gocron v1.19.0
# github.com/go-co-op/gocron v1.22.2
## explicit; go 1.19
github.com/go-co-op/gocron
# github.com/go-logr/logr v1.2.4
@ -202,7 +202,7 @@ github.com/jinzhu/now
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
# github.com/klauspost/compress v1.16.3
# github.com/klauspost/compress v1.16.5
## explicit; go 1.18
github.com/klauspost/compress
github.com/klauspost/compress/fse
@ -214,10 +214,10 @@ github.com/klauspost/compress/zstd/internal/xxhash
# github.com/klauspost/cpuid/v2 v2.2.4
## explicit; go 1.15
github.com/klauspost/cpuid/v2
# github.com/leodido/go-urn v1.2.2
# github.com/leodido/go-urn v1.2.3
## explicit; go 1.16
github.com/leodido/go-urn
# github.com/lib/pq v1.10.7
# github.com/lib/pq v1.10.8
## explicit; go 1.13
github.com/lib/pq
github.com/lib/pq/oid
@ -459,7 +459,7 @@ go.uber.org/zap/zapcore
# golang.org/x/arch v0.3.0
## explicit; go 1.17
golang.org/x/arch/x86/x86asm
# golang.org/x/crypto v0.7.0
# golang.org/x/crypto v0.8.0
## explicit; go 1.17
golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20
@ -479,7 +479,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
# golang.org/x/net v0.8.0
# golang.org/x/net v0.9.0
## explicit; go 1.17
golang.org/x/net/html
golang.org/x/net/html/atom
@ -502,7 +502,7 @@ golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
# golang.org/x/text v0.8.0
# golang.org/x/text v0.9.0
## explicit; go 1.17
golang.org/x/text/cases
golang.org/x/text/encoding
@ -531,7 +531,7 @@ golang.org/x/text/width
# golang.org/x/time v0.3.0
## explicit
golang.org/x/time/rate
# golang.org/x/tools v0.7.0
# golang.org/x/tools v0.8.0
## explicit; go 1.18
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
@ -581,10 +581,10 @@ gopkg.in/gomail.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
# gorm.io/datatypes v1.1.1
# gorm.io/datatypes v1.2.0
## explicit; go 1.18
gorm.io/datatypes
# gorm.io/driver/mysql v1.4.7
# gorm.io/driver/mysql v1.5.0
## explicit; go 1.14
gorm.io/driver/mysql
# gorm.io/driver/postgres v1.5.0
@ -600,7 +600,7 @@ gorm.io/gen/internal/model
gorm.io/gen/internal/parser
gorm.io/gen/internal/template
gorm.io/gen/internal/utils/pools
# gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11
# gorm.io/gorm v1.25.0
## explicit; go 1.16
gorm.io/gorm
gorm.io/gorm/callbacks

Loading…
Cancel
Save